text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
'''
This script (generate_reaction_templates) uses a MongoDB collection
with atom-mapped reaction SMILES strings and parses them into a new
collection containing the transforms.
'''
from __future__ import print_function
from global_config import USE_STEREOCHEMISTRY
import argparse
from numpy.random import shuffle # for random selection
import rdkit.Chem as Chem # molecule building
from rdkit.Chem import AllChem
from collections import defaultdict
import rdkit.Chem.Draw as Draw
from rdkit import RDLogger
import datetime # for info files
import json # for dumping
import sys # for commanad line
import os # for file paths
import re
import itertools
from makeit.retro.draw import *
# DATABASE
from ochem_predict_nn.utils.database import collection_templates, collection_example_reactions_smilesonly
TRANSFORM_DB = collection_templates()
REACTION_DB = collection_example_reactions_smilesonly()
reaction_smiles_field = 'reaction_smiles'
def mols_from_smiles_list(all_smiles):
'''Given a list of smiles strings, this function creates rdkit
molecules'''
mols = []
for smiles in all_smiles:
if not smiles: continue
mols.append(Chem.MolFromSmiles(smiles))
return mols
def bond_to_label(bond):
'''This function takes an RDKit bond and creates a label describing
the most important attributes'''
# atoms = sorted([atom_to_label(bond.GetBeginAtom().GetIdx()), \
# atom_to_label(bond.GetEndAtom().GetIdx())])
a1_label = str(bond.GetBeginAtom().GetAtomicNum())
a2_label = str(bond.GetEndAtom().GetAtomicNum())
if bond.GetBeginAtom().HasProp('molAtomMapNumber'):
a1_label += bond.GetBeginAtom().GetProp('molAtomMapNumber')
if bond.GetEndAtom().HasProp('molAtomMapNumber'):
a2_label += bond.GetEndAtom().GetProp('molAtomMapNumber')
atoms = sorted([a1_label, a2_label])
return '{}{}{}'.format(atoms[0], bond.GetSmarts(), atoms[1])
def get_tagged_atoms_from_mols(mols):
'''Takes a list of RDKit molecules and returns total list of
atoms and their tags'''
atoms = []
atom_tags = []
for mol in mols:
new_atoms, new_atom_tags = get_tagged_atoms_from_mol(mol)
atoms += new_atoms
atom_tags += new_atom_tags
return atoms, atom_tags
def get_tagged_atoms_from_mol(mol):
'''Takes an RDKit molecule and returns list of tagged atoms and their
corresponding numbers'''
atoms = []
atom_tags = []
for atom in mol.GetAtoms():
if ':' in atom.GetSmarts():
atoms.append(atom)
atom_tags.append(atom.GetSmarts().split(':')[1][:-1])
return atoms, atom_tags
def atoms_are_different(atom1, atom2, level = 1):
'''Compares two RDKit atoms based on basic properties'''
if atom1.GetSmarts() != atom2.GetSmarts(): return True # should be very general
if atom1.GetAtomicNum() != atom2.GetAtomicNum(): return True # must be true for atom mapping
if atom1.GetTotalNumHs() != atom2.GetTotalNumHs(): return True
if atom1.GetFormalCharge() != atom2.GetFormalCharge(): return True
if atom1.GetDegree() != atom2.GetDegree(): return True
#if atom1.IsInRing() != atom2.IsInRing(): return True
if atom1.GetNumRadicalElectrons() != atom2.GetNumRadicalElectrons(): return True
# TODO: add # pi electrons like ICSynth?
# Check bonds and nearest neighbor identity
if level >= 1:
bonds1 = sorted([bond_to_label(bond) for bond in atom1.GetBonds()])
bonds2 = sorted([bond_to_label(bond) for bond in atom2.GetBonds()])
if bonds1 != bonds2: return True
# # Check neighbors too (already taken care of with previous lines)
# neighbors1 = sorted([atom.GetAtomicNum() for atom in atom1.GetNeighbors()])
# neighbors2 = sorted([atom.GetAtomicNum() for atom in atom2.GetNeighbors()])
# if neighbors1 != neighbors2: return True
return False
def get_changed_atoms(reactants, products):
'''Looks at mapped atoms in a reaction and determines which ones changed'''
err = 0
prod_atoms, prod_atom_tags = get_tagged_atoms_from_mols(products)
if v: print('Products contain {} tagged atoms'.format(len(prod_atoms)))
if v: print('Products contain {} unique atom numbers'.format(len(set(prod_atom_tags))))
reac_atoms, reac_atom_tags = get_tagged_atoms_from_mols(reactants)
if len(set(prod_atom_tags)) != len(set(reac_atom_tags)):
if v: print('warning: different atom tags appear in reactants and products')
err = 1
if len(prod_atoms) != len(reac_atoms):
if v: print('warning: total number of tagged atoms differ, stoichometry != 1?')
#err = 1
# Find differences
changed_atoms = []
changed_atom_tags = []
#print(reac_atom_tags)
#print(prod_atom_tags)
# Product atoms that are different from reactant atom equivalent
for i, prod_tag in enumerate(prod_atom_tags):
for j, reac_tag in enumerate(reac_atom_tags):
if reac_tag != prod_tag: continue
if reac_tag not in changed_atom_tags: # don't bother comparing if we know this atom changes
# If atom changed, add
if atoms_are_different(prod_atoms[i], reac_atoms[j]):
changed_atoms.append(reac_atoms[j])
changed_atom_tags.append(reac_tag)
break
# If reac_tag appears multiple times, add (need for stoichometry > 1)
if prod_atom_tags.count(reac_tag) > 1:
changed_atoms.append(reac_atoms[j])
changed_atom_tags.append(reac_tag)
break
# Reactant atoms that do not appear in product (tagged leaving groups)
for j, reac_tag in enumerate(reac_atom_tags):
if reac_tag not in changed_atom_tags:
if reac_tag not in prod_atom_tags:
changed_atoms.append(reac_atoms[j])
changed_atom_tags.append(reac_tag)
if v:
print('{} tagged atoms in reactants change 1-atom properties'.format(len(changed_atom_tags)))
for smarts in [atom.GetSmarts() for atom in changed_atoms]:
print(' {}'.format(smarts))
return changed_atoms, changed_atom_tags, err
def expand_atoms_to_use(mol, atoms_to_use, groups = [], symbol_replacements = []):
'''Given an RDKit molecule and a list of AtomIdX which should be included
in the reaction, this function expands the list of AtomIdXs to include one
nearest neighbor with special consideration of (a) unimportant neighbors and
(b) important functional groupings'''
# Copy
new_atoms_to_use = atoms_to_use[:]
# Look for all atoms in the current list of atoms to use
for atom in mol.GetAtoms():
if atom.GetIdx() not in atoms_to_use: continue
# Look for all nearest neighbors of the currently-included atoms
for neighbor in atom.GetNeighbors():
# Evaluate nearest neighbor atom to determine what should be included
new_atoms_to_use, symbol_replacements = \
expand_atoms_to_use_atom(mol, new_atoms_to_use, neighbor.GetIdx(),
groups = groups, symbol_replacements = symbol_replacements)
return new_atoms_to_use, symbol_replacements
def expand_atoms_to_use_atom(mol, atoms_to_use, atom_idx, groups = [], symbol_replacements = []):
'''Given an RDKit molecule and a list of AtomIdx which should be included
in the reaction, this function extends the list of atoms_to_use by considering
a candidate atom extension, atom_idx'''
# Skip current candidate atom if it is already included
if atom_idx in atoms_to_use:
return atoms_to_use, symbol_replacements
# See if this atom belongs to any groups
found_in_group = False
for group in groups:
if int(atom_idx) in group: # int correction
if v: print('added group centered at {}'.format(atom_idx))
# Add the whole list, redundancies don't matter
atoms_to_use.extend(list(group))
found_in_group = True
if found_in_group: return atoms_to_use, symbol_replacements
# How do we add an atom that wasn't in an identified important functional group?
# Develop special SMARTS symbol
# Include this atom
atoms_to_use.append(atom_idx)
# Look for replacements
symbol_replacements.append((atom_idx, convert_atom_to_wildcard(mol.GetAtomWithIdx(atom_idx))))
return atoms_to_use, symbol_replacements
def convert_atom_to_wildcard(atom):
'''This function takes an RDKit atom and turns it into a wildcard
using hard-coded generalization rules. This function should be used
when candidate atoms are used to extend the reaction core for higher
generalizability'''
if SUPER_GENERAL_TEMPLATES:
if ':' in atom.GetSmarts():
# Fully generalize atom-mappped neighbors because they are aren't a leaving group
label = re.search('\:[0-9]+\]', atom.GetSmarts())
return '[*{}'.format(label.group())
if not SUPER_GENERAL_TEMPLATES:
# Is this a terminal atom? We can tell if the degree is one
if atom.GetDegree() == 1:
return atom.GetSmarts()
# Initialize
symbol = '['
# Add atom primitive (don't use COMPLETE wildcards)
if atom.GetAtomicNum() != 6:
symbol += '#{};'.format(atom.GetAtomicNum())
elif atom.GetIsAromatic():
symbol += 'c;'
else:
symbol += 'C;'
if not SUPER_GENERAL_TEMPLATES:
# Charge is important
if atom.GetFormalCharge() != 0:
charges = re.search('([-+]+[1-9]?)', atom.GetSmarts())
if charges: symbol += charges.group() + ';'
# Strip extra semicolon
if symbol[-1] == ';': symbol = symbol[:-1]
# Close with label or with bracket
label = re.search('\:[0-9]+\]', atom.GetSmarts())
if label:
symbol += label.group()
else:
symbol += ']'
if v:
if symbol != atom.GetSmarts():
print('Improved generality of atom SMARTS {} -> {}'.format(atom.GetSmarts(), symbol))
return symbol
def get_fragments_for_changed_atoms(mols, changed_atom_tags, radius = 0,
category = 'reactants', expansion = []):
'''Given a list of RDKit mols and a list of changed atom tags, this function
computes the SMILES string of molecular fragments using MolFragmentToSmiles
for all changed fragments'''
fragments = ''
for mol in mols:
# Initialize list of replacement symbols (updated during expansion)
symbol_replacements = []
# Are we looking for groups? (reactants only)
if category == 'reactants':
groups = get_special_groups(mol)
else:
groups = []
# Build list of atoms to use
atoms_to_use = []
for atom in mol.GetAtoms():
# Check self (only tagged atoms)
if ':' in atom.GetSmarts():
if atom.GetSmarts().split(':')[1][:-1] in changed_atom_tags:
atoms_to_use.append(atom.GetIdx())
symbol = atom.GetSmarts()
# CUSTOM SYMBOL CHANGES
if atom.GetTotalNumHs() == 0:
# Be explicit when there are no hydrogens
if ':' in symbol: # stick H0 before label
symbol = symbol.replace(':', ';H0:')
else: # stick before end
symbol = symbol.replace(']', ';H0]')
# print('Being explicit about H0!!!!')
if atom.GetFormalCharge() == 0:
# Also be explicit when there is no charge
if ':' in symbol:
symbol = symbol.replace(':', ';+0:')
else:
symbol = symbol.replace(']', ';+0]')
if USE_STEREOCHEMISTRY:
if atom.GetChiralTag() != Chem.rdchem.ChiralType.CHI_UNSPECIFIED:
# Be explicit when there is a tetrahedral chiral tag
if atom.GetChiralTag() == Chem.rdchem.ChiralType.CHI_TETRAHEDRAL_CCW:
tag = '@'
elif atom.GetChiralTag() == Chem.rdchem.ChiralType.CHI_TETRAHEDRAL_CW:
tag = '@@'
if ':' in symbol:
symbol = symbol.replace(':', ';{}:'.format(tag))
else:
symbol = symbol.replace(']', ';{}]'.format(tag))
if symbol != atom.GetSmarts():
symbol_replacements.append((atom.GetIdx(), symbol))
continue
# Check neighbors (any atom)
for k in range(radius):
atoms_to_use, symbol_replacements = expand_atoms_to_use(mol, atoms_to_use,
groups = groups, symbol_replacements = symbol_replacements)
if category == 'products':
# Add extra labels to include (for products only)
if expansion:
for atom in mol.GetAtoms():
if ':' not in atom.GetSmarts(): continue
label = atom.GetSmarts().split(':')[1][:-1]
if label in expansion and label not in changed_atom_tags:
atoms_to_use.append(atom.GetIdx())
# Make the expansion a wildcard
symbol_replacements.append((atom.GetIdx(), convert_atom_to_wildcard(atom)))
if v: print('expanded label {} to wildcard in products'.format(label))
# Make sure unmapped atoms are included (from products)
for atom in mol.GetAtoms():
if not atom.HasProp('molAtomMapNumber'):
atoms_to_use.append(atom.GetIdx())
# Define new symbols to replace terminal species with wildcards
# (don't want to restrict templates too strictly)
symbols = [atom.GetSmarts() for atom in mol.GetAtoms()]
for (i, symbol) in symbol_replacements:
symbols[i] = symbol
if not atoms_to_use: continue
# if v:
# print('~~ replacement for this ' + category[:-1])
# print('{} -> {}'.format([mol.GetAtomWithIdx(x).GetSmarts() for (x, s) in symbol_replacements],
# [s for (x, s) in symbol_replacements]))
# Remove molAtomMapNumber before canonicalization
[x.ClearProp('molAtomMapNumber') for x in mol.GetAtoms()]
fragments += '(' + AllChem.MolFragmentToSmiles(mol, atoms_to_use,
atomSymbols = symbols, allHsExplicit = True,
isomericSmiles = USE_STEREOCHEMISTRY, allBondsExplicit = True) + ').'
return fragments[:-1]
def expand_changed_atom_tags(changed_atom_tags, reactant_fragments):
'''Given a list of changed atom tags (numbers as strings) and a string consisting
of the reactant_fragments to include in the reaction transform, this function
adds any tagged atoms found in the reactant side of the template to the
changed_atom_tags list so that those tagged atoms are included in the products'''
expansion = []
atom_tags_in_reactant_fragments = re.findall('\:([[0-9]+)\]', reactant_fragments)
for atom_tag in atom_tags_in_reactant_fragments:
if atom_tag not in changed_atom_tags:
expansion.append(atom_tag)
if v: print('after building reactant fragments, additional labels included: {}'.format(expansion))
return expansion
def get_special_groups(mol):
'''Given an RDKit molecule, this function returns a list of tuples, where
each tuple contains the AtomIdx's for a special group of atoms which should
be included in a fragment all together. This should only be done for the
reactants, otherwise the products might end up with mapping mismatches'''
if SUPER_GENERAL_TEMPLATES: return []
# Define templates, based on Functional_Group_Hierarchy.txt from Greg Laandrum
group_templates = [
'C(=O)Cl', # acid chloride
'C(=O)[O;H,-]', # carboxylic acid
'[$(S-!@[#6])](=O)(=O)(Cl)', # sulfonyl chloride
'[$(B-!@[#6])](O)(O)', # boronic acid
'[$(N-!@[#6])](=!@C=!@O)', # isocyanate
'[N;H0;$(N-[#6]);D2]=[N;D2]=[N;D1]', # azide
'O=C1N(Br)C(=O)CC1', # NBS brominating agent
'C=O', # carbonyl
'ClS(Cl)=O', # thionyl chloride
'[Mg][Br,Cl]', # grinard (non-disassociated)
'[#6]S(=O)(=O)[O]', # RSO3 leaving group
'[O]S(=O)(=O)[O]', # SO4 group
'[N-]=[N+]=[C]', # diazo-alkyl
]
# Build list
groups = []
for template in group_templates:
matches = mol.GetSubstructMatches(Chem.MolFromSmarts(template))
groups.extend(list(matches))
return groups
def mol_list_to_inchi(mols):
'''List of RDKit molecules to InChI string separated by ++'''
inchis = [Chem.MolToInchi(mol) for mol in mols]
return ' ++ '.join(sorted(inchis))
def mol_list_from_inchi(inchis):
'''InChI string separated by ++ to list of RDKit molecules'''
return [Chem.MolFromInchi(inchi.strip()) for inchi in inchis.split('++')]
def canonicalize_template(template):
'''This function takes one-half of a template SMARTS string
(i.e., reactants or products) and re-orders them based on
an equivalent string without atom mapping.'''
# Strip labels to get sort orders
template_nolabels = re.sub('\:[0-9]+\]', ']', template)
# Split into separate molecules *WITHOUT wrapper parentheses*
template_nolabels_mols = template_nolabels[1:-1].split(').(')
template_mols = template[1:-1].split(').(')
# Split into fragments within those molecules
for i in range(len(template_mols)):
nolabel_mol_frags = template_nolabels_mols[i].split('.')
mol_frags = template_mols[i].split('.')
# Get sort order within molecule, defined WITHOUT labels
sortorder = [j[0] for j in sorted(enumerate(nolabel_mol_frags), key = lambda x:x[1])]
# Apply sorting and merge list back into overall mol fragment
template_nolabels_mols[i] = '.'.join([nolabel_mol_frags[j] for j in sortorder])
template_mols[i] = '.'.join([mol_frags[j] for j in sortorder])
# Get sort order between molecules, defined WITHOUT labels
sortorder = [j[0] for j in sorted(enumerate(template_nolabels_mols), key = lambda x:x[1])]
# Apply sorting and merge list back into overall transform
template = '(' + ').('.join([template_mols[i] for i in sortorder]) + ')'
return template
def reassign_atom_mapping(transform):
'''This function takes an atom-mapped reaction and reassigns
the atom-mapping labels (numbers) from left to right, once
that transform has been canonicalized.'''
all_labels = re.findall('\:([0-9]+)\]', transform)
# Define list of replacements which matches all_labels *IN ORDER*
replacements = []
replacement_dict = {}
counter = 1
for label in all_labels: # keep in order! this is important
if label not in replacement_dict:
replacement_dict[label] = str(counter)
counter += 1
replacements.append(replacement_dict[label])
# Perform replacements in order
transform_newmaps = re.sub('\:[0-9]+\]',
lambda match: (':' + replacements.pop(0) + ']'),
transform)
return transform_newmaps
def canonicalize_transform(transform):
'''This function takes an atom-mapped SMARTS transform and
converts it to a canonical form by, if nececssary, rearranging
the order of reactant and product templates and reassigning
atom maps.'''
transform_reordered = '>>'.join([canonicalize_template(x) for x in transform.split('>>')])
return reassign_atom_mapping(transform_reordered)
def convert_to_retro(transform):
'''This function takes a forward synthesis and converts it to a
retrosynthesis. Only transforms with a single product are kept, since
retrosyntheses should have a single reactant (and split it up accordingly).'''
# Split up original transform
reactants = transform.split('>>')[0]
products = transform.split('>>')[1]
# Don't force products to be from different molecules (?)
# -> any reaction template can be intramolecular (might remove later)
products = products[1:-1].replace(').(', '.')
# Don't force the "products" of a retrosynthesis to be two different molecules!
reactants = reactants[1:-1].replace(').(', '.')
return '>>'.join([products, reactants])
def main(N = 15, folder = ''):
'''Read reactions'''
global v
# Define scoring variables
total_attempted = 0 # total reactions simulated (excludes skipped)
total_correct = 0 # actual products predicted
total_precise = 0 # ONLY actual products predicted
total_unmapped = 0
total_partialmapped = 0
total_nonreaction = 0
N = min([N, REACTION_DB.count()])
try: # to allow breaking
# Look for entries
i = -1
for example_doc in REACTION_DB.find(no_cursor_timeout=True):
i += 1
# Are we done?
if i == N:
i -= 1
break
if v:
print('##################################')
print('### RXN {}'.format(i))
print('##################################')
try:
# Unpack
reaction_smiles = str(example_doc[reaction_smiles_field])
reactants, agents, products = [mols_from_smiles_list(x) for x in
[mols.split('.') for mols in reaction_smiles.split('>')]]
[Chem.SanitizeMol(mol) for mol in reactants + agents + products]
except Exception as e:
# can't sanitize -> skip
print(e)
print('Could not load SMILES or sanitize')
print('ID: {}'.format(example_doc['_id']))
continue
try:
# Check product atom mapping
skip = False
for product in products:
if sum([a.HasProp('molAtomMapNumber') for a in product.GetAtoms()]) < len(product.GetAtoms()):
print('Not all product atoms have atom mapping, skipping')
print('ID: {}'.format(example_doc['_id']))
print('REACTION: {}'.format(example_doc['reaction_smiles']))
skip = True
if skip:
total_partialmapped += 1
continue
# Get unique InChi for products we are looking for
# remove cases where product shows up
product_inchis_split = mol_list_to_inchi(products).split(' ++ ')
for reactant in reactants:
reactant_inchi = mol_list_to_inchi([reactant])
if reactant_inchi in product_inchis_split:
product_inchis_split.remove(reactant_inchi)
product_inchis = ' ++ '.join(product_inchis_split)
if v: print(reaction_smiles)
if None in reactants + products:
print('Could not parse all molecules in reaction, skipping')
print('ID: {}'.format(example_doc['_id']))
continue
if not product_inchis:
print('Product molecules no different than reactant molecules')
print('ID: {}'.format(example_doc['_id']))
total_nonreaction += 1
continue
# Calculate changed atoms
changed_atoms, changed_atom_tags, err = get_changed_atoms(reactants, products)
if err:
print('Could not get changed atoms')
print('ID: {}'.format(example_doc['_id']))
continue
if not changed_atom_tags:
print('No atoms changed?')
print('ID: {}'.format(example_doc['_id']))
continue
# Get fragments for reactants
reactant_fragments = get_fragments_for_changed_atoms(reactants, changed_atom_tags,
radius = 1, expansion = [], category = 'reactants')
# Get fragments for products
# (WITHOUT matching groups but WITH the addition of reactant fragments)
product_fragments = get_fragments_for_changed_atoms(products, changed_atom_tags,
radius = 0, expansion = expand_changed_atom_tags(changed_atom_tags, reactant_fragments),
category = 'products')
# Report transform
rxn_string = '{}>>{}'.format(reactant_fragments, product_fragments)
if v: print('\nOverall fragment transform: {}'.format(rxn_string))
# Load into RDKit
rxn = AllChem.ReactionFromSmarts(rxn_string)
if rxn.Validate()[1] != 0:
print('Could not validate reaction successfully')
print('ID: {}'.format(example_doc['_id']))
print('rxn_string: '.format(rxn_string))
continue
# print('here')
# # Analyze
# if v:
# print('Original number of reactants: {}'.format(len(reactants)))
# print('Number of reactants in transform: {}'.format(rxn.GetNumReactantTemplates()))
# Run reaction
correct = False
if not DO_NOT_TEST:
# Try all combinations of reactants that fit template
combinations = itertools.combinations(reactants, rxn.GetNumReactantTemplates())
unique_product_sets = []
for combination in combinations:
outcomes = rxn.RunReactants(list(combination))
if not outcomes: continue
#if v: print('\nFor reactants {}'.format([Chem.MolToSmiles(mol) for mol in combination]))
for j, outcome in enumerate(outcomes):
#if v: print('- outcome {}/{}'.format(j + 1, len(outcomes)))
for k, product in enumerate(outcome):
if v: print('~~prod: {}'.format(Chem.MolToSmiles(product)))
try:
Chem.SanitizeMol(product)
product.UpdatePropertyCache()
except Exception as e:
print('warning: {}'.format(e))
if v:
#print(' - product {}: {}'.format(k, Chem.MolToSmiles(product, isomericSmiles = True)))
try:
pass
#product = Chem.MolFromSmiles(Chem.MolToSmiles(product, isomericSmiles = True))
# Draw.MolToFile(product, '{}/rxn_{}/outcome_{}_product_{}.png'.format(folder, i, j, k), size=(250,250))
except Exception as e:
print('warning: could not draw {}: {}'.format(Chem.MolToSmiles(product, isomericSmiles = USE_STEREOCHEMISTRY), e))
# Remove untransformed products
product_set_split = mol_list_to_inchi(outcome).split(' ++ ')
for reactant in reactants:
reactant_inchi = mol_list_to_inchi([reactant])
if reactant_inchi in product_set_split:
product_set_split.remove(reactant_inchi)
product_set = ' ++ '.join(product_set_split)
if product_set not in unique_product_sets:
unique_product_sets.append(product_set)
if v:
print('\nExpctd {}'.format(product_inchis))
for product_set in unique_product_sets:
print('Found {}'.format(product_set))
if product_inchis in unique_product_sets:
if v: print('\nSuccessfully found true products!')
correct = True
total_correct += 1
if len(unique_product_sets) > 1:
if v: print('...but also found {} more'.format(len(unique_product_sets) - 1))
else:
total_precise += 1
else:
if v: print('\nDid not find true products')
if len(unique_product_sets) > 1:
if v: print('...but found {} unexpected ones'.format(len(unique_product_sets)))
if correct or DO_NOT_TEST:
# Valid reaction!
rxn_canonical = canonicalize_transform(rxn_string)
retro_canonical = convert_to_retro(rxn_canonical)
# Is this old?
doc = TRANSFORM_DB.find_one({'reaction_smarts': retro_canonical})
if doc:
# Old
TRANSFORM_DB.update_one(
{'_id': doc['_id']},
{
'$inc': {
'count': 1,
},
'$addToSet': {
'references': example_doc['_id'],
},
}
)
else:
# New
result = TRANSFORM_DB.insert_one(
{
'reaction_smarts': retro_canonical,
'rxn_example': reaction_smiles,
'references': [example_doc['_id']],
'explicit_H': False,
'count': 1,
}
)
#print('Created database entry {}'.format(result.inserted_id))
else:
print('Did not find true product')
print('ID: {}'.format(example_doc['_id']))
print('REACTION: {}'.format(example_doc['RXN_SMILES']))
print('TEMPLATE: {}'.format(rxn_string))
total_attempted += 1
except Exception as e:
if v:
print(e)
print('skipping')
#raw_input('Enter anything to continue')
continue
# Report progress
if (i % 100) == 0:
print('{}/{}'.format(i, N))
# Pause
#if v: raw_input('Enter anything to continue...')
except KeyboardInterrupt:
print('Stopped early!')
total_examples = i + 1
print('...finished looking through {} reaction records'.format(min([N, i + 1])))
print('Unmapped reactions in {}/{} ({}%) cases'.format(total_unmapped,
total_examples, total_unmapped * 100.0 / total_examples))
print('Partially-mapped reactions in {}/{} ({}%) cases'.format(total_partialmapped,
total_examples, total_partialmapped * 100.0 / total_examples))
print('Non-reactions (stereochem only?) in {}/{} ({}%) cases'.format(total_nonreaction,
total_examples, total_nonreaction * 100.0 / total_examples))
print('Error-free parsing in {}/{} ({}%) cases'.format(total_attempted,
total_examples, total_attempted * 100.0 / total_examples))
print('Correct product predictions in {}/{} ({}%) cases'.format(total_correct,
total_examples, total_correct * 100.0 / total_examples))
print('Specific product predictions in {}/{} ({}%) cases'.format(total_precise,
total_examples, total_precise * 100.0 / total_examples))
print('Created {} database entries'.format(TRANSFORM_DB.find().count()))
return True
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-v', type = bool, default = False,
help = 'Verbose printing (incl. saving images); defaults to False')
parser.add_argument('-o', '--out', type = str, default = '',
help = 'Folder to output images to if verbose')
parser.add_argument('-n', '--num', type = int, default = 50,
help = 'Maximum number of records to examine; defaults to 50')
parser.add_argument('-g', '--general', type = str, default = 'y',
help = 'Use incredibly general templates; defaults to y\n' +
'Only appropriate for forward enumeration')
parser.add_argument('-t', '--test', type = str, default = 'y',
help = 'Whether to *skip* testing; defaults to y')
parser.add_argument('--create_mass', type = str, default = 'y',
help = 'Whether to allow reactions to product mass, defaults y')
args = parser.parse_args()
v = args.v
lg = RDLogger.logger()
if not v: lg.setLevel(4)
SUPER_GENERAL_TEMPLATES = args.general in ['y', '1', 'True','Yes', 't', 'T']
print('SUPER GENERAL TEMPLATES? {}'.format(SUPER_GENERAL_TEMPLATES))
DO_NOT_TEST = args.test in ['y', '1', 'True','Yes', 't', 'T']
print('DO NOT TEST? {}'.format(DO_NOT_TEST))
clear = raw_input('Do you want to clear the {} existing templates? '.format(TRANSFORM_DB.find().count()))
if clear in ['y', 'Y', 'yes', '1', 'Yes']:
result = TRANSFORM_DB.delete_many({})
print('Cleared {} entries from collection'.format(result.deleted_count))
main(N = args.num, folder = args.out)
|
connorcoley/ochem_predict_nn
|
data/generate_reaction_templates.py
|
Python
|
mit
| 28,770
|
[
"RDKit"
] |
f8372a092aeec04463281677d4294b1a1356c598c8db6256342c4a080e19bb63
|
"""
The ``fatiando`` package contains all the subpackages and modules required for
most tasks.
Modules for each geophysical method are group in subpackages:
* :mod:`gravmag <fatiando.gravmag>`:
Gravity and magnetics (i.e., potential fields)
* :mod:`seismic <fatiando.seismic>`:
Seismics and seismology
* :mod:`geothermal <fatiando.geothermal>`:
Geothermal heat transfer modeling
Modules for gridding, meshing, visualization, user interface, input/output etc:
* :mod:`mesher <fatiando.mesher>`:
Mesh generation and definition of geometric elements
* :mod:`gridder <fatiando.gridder>`:
Grid generation and operations (e.g., interpolation)
* :mod:`vis <fatiando.vis>`:
Plotting utilities for 2D (using matplotlib) and 3D (using mayavi)
* :mod:`datasets <fatiando.datasets>`:
Fetch and load datasets and models from web repositories
* :mod:`utils <fatiando.utils>`:
Miscelaneous utilities, like mathematical functions, unit conversion, etc
* :mod:`~fatiando.constants`:
Physical constants and unit conversions
Also included is the :mod:`fatiando.inversion` package with utilities for
implementing inverse problems. There you'll find:
* :mod:`~fatiando.inversion.regularization`: Common regularizing functions and
base classes for building custom ones
* :mod:`~fatiando.inversion.base`: Base classes to implement your inverse
problem. They do most of the heavy lifting for you!
* :mod:`~fatiando.inversion.solvers`: Functions for optimization (used by
:mod:`~fatiando.inversion.base` classes)
Inversion methods in Fatiando leverage :mod:`fatiando.inversion`, providing a
common interface and usage patters. For examples, see modules
:mod:`fatiando.seismic.epic2d`,
:mod:`fatiando.seismic.srtomo`,
:mod:`fatiando.gravmag.basin2d`,
:mod:`fatiando.gravmag.euler`,
:mod:`fatiando.gravmag.eqlayer`,
etc.
The design of :mod:`fatiando.inversion` was inspired by `scikit-learn`_, an
amazing machine-learning library.
.. _scikit-learn: http://scikit-learn.org
----
"""
from ._version import get_versions
__version__ = get_versions()['version']
__commit__ = get_versions()['full']
del get_versions
|
eusoubrasileiro/fatiando
|
fatiando/__init__.py
|
Python
|
bsd-3-clause
| 2,118
|
[
"Mayavi"
] |
3dae121462f1ae2dfc1f72b4fbcfa35b7478b679bfadcd6ab1a748af2dd7e85e
|
"""
Weak Lensing Performance Calculator
===================================
SMN
systematic ellipticities e_s(x,y)
- CTI: richard already worked on this
- bias: needs a model
- flat fielding: probably not systematic
- non-linerarity:
-
SMN
2d power spectra C(l)
unit test: gaussian random field + known power spectrum, e-mode, b-mode=0, eb-mode=0 (andy's code)
outputs an ascii file
Tom
Fisher code, biases and errors
takes in an ascii file
C-code, call from python
SMN
plot the results
"""
|
sniemi/EuclidVisibleInstrument
|
sandbox/CosmologyCalculator.py
|
Python
|
bsd-2-clause
| 506
|
[
"Gaussian"
] |
ee7eda1dd06719f9513043fd1f85e70e43ba0252f7dfd24d95c92bdc9276cd51
|
"""
Class for management of MQ communication
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Core.Utilities.ObjectLoader import ObjectLoader
from DIRAC.Core.Utilities.DErrno import EMQUKN
__RCSID__ = "$Id$"
def createMQConnector(parameters=None):
""" Function creates and returns the MQConnector object based.
Args:
parameters(dict): set of parameters for the MQConnector constructor,
it should also contain pair 'MQType':mqType, where
mqType is a string used as a prefix for the specialized MQConnector
class.
Returns:
S_OK or S_ERROR: with loaded specialized class of MQConnector.
"""
mqType = parameters.get('MQType', None)
result = getMQConnectorClass(mqType=mqType)
if not result['OK']:
gLogger.error('Failed to getMQConnectorClass:', '%s' % (result['Message']))
return result
ceClass = result['Value']
try:
mqConnector = ceClass(parameters)
if not result['OK']:
return result
except Exception as exc: # pylint: disable=broad-except
gLogger.exception('Could not instantiate MQConnector object', lExcInfo=exc)
return S_ERROR(EMQUKN, '')
return S_OK(mqConnector)
def getMQConnectorClass(mqType):
""" Function loads the specialized MQConnector class based on mqType.
It is assumed that MQConnector has a name in the format mqTypeMQConnector
e.g. if StompMQConnector.
Args:
mqType(str): prefix of specialized class name e.g. Stomp.
Returns:
S_OK or S_ERROR: with loaded specialized class of MQConnector.
"""
subClassName = mqType + 'MQConnector'
result = ObjectLoader().loadObject('Resources.MessageQueue.%s' % subClassName)
if not result['OK']:
gLogger.error('Failed to load object', '%s: %s' % (subClassName, result['Message']))
return result
class MQConnectionError(Exception):
""" specialized exception
"""
pass
class MQConnector(object):
"""
Class for management of message queue connections
"""
def __init__(self, parameters=None):
""" Standard constructor
"""
if not parameters:
parameters = {}
self.parameters = parameters
def setupConnection(self, parameters=None):
"""
:param dict parameters: dictionary with additional MQ parameters if any
:return: S_OK/S_ERROR
"""
raise NotImplementedError('This method should be implemented by child class')
def put(self, message, parameters=None):
""" Send message to a MQ server
:param message: any json encodable structure
:return: S_OK/S_ERROR
"""
raise NotImplementedError('This method should be implemented by child class')
def connect(self, parameters=None):
"""
:param dict parameters: dictionary with additional parameters if any
:return: S_OK/S_ERROR
"""
raise NotImplementedError('This method should be implemented by child class')
def disconnect(self, parameters=None):
"""
Disconnects from the message queue server
:param dict parameters: dictionary with additional parameters if any
:return: S_OK/S_ERROR
"""
raise NotImplementedError('This method should be implemented by child class')
def subscribe(self, parameters=None):
"""
Subscribes to the message queue server
:param dict parameters: dictionary with additional parameters if any
:return: S_OK/S_ERROR
"""
raise NotImplementedError('This method should be implemented by child class')
def unsubscribe(self, parameters=None):
"""
Subscribes to the message queue server
:param dict parameters: dictionary with additional parameters if any
:return: S_OK/S_ERROR
"""
raise NotImplementedError('This method should be implemented by child class')
|
yujikato/DIRAC
|
src/DIRAC/Resources/MessageQueue/MQConnector.py
|
Python
|
gpl-3.0
| 3,799
|
[
"DIRAC"
] |
9c8a7b15e2cb352c740826f785908750347efa356c45781c690e5e385a74ee80
|
#!/usr/bin/env python
""" Visualization functions for connectivity analysis. """
import sys
import os.path as op
import numpy as np
import scipy as sci
import matplotlib.pyplot as pl
import mne
import yaml
import pickle
def sensor_connectivity_3d(raw, picks, con, idx, n_con=20, min_dist=0.05,
scale_factor=0.005, tube_radius=0.001):
""" Function to plot sensor connectivity showing strongest
connections(n_con) excluding sensors that are less than min_dist apart.
https://github.com/mne-tools/mne-python/blob/master/examples/connectivity/plot_sensor_connectivity.py
Parameters
----------
raw : Raw object
Instance of mne.io.Raw
picks : list
Picks to be included.
con : ndarray (n_channels, n_channels)
Connectivity matrix.
idx : list
List of indices of sensors of interest.
n_con : int
Number of connections of interest.
min_dist : float
Minimum distance between sensors allowed.
Note: Please modify scale factor and tube radius to appropriate sizes
if the plot looks scrambled.
"""
# Now, visualize the connectivity in 3D
try:
from enthought.mayavi import mlab
except:
from mayavi import mlab
mlab.figure(size=(600, 600), bgcolor=(0.5, 0.5, 0.5))
# Plot the sensor location
sens_loc = [raw.info['chs'][picks[i]]['loc'][:3] for i in idx]
sens_loc = np.array(sens_loc)
pts = mlab.points3d(sens_loc[:, 0], sens_loc[:, 1], sens_loc[:, 2],
color=(1, 1, 1), opacity=1, scale_factor=scale_factor)
# Get the strongest connections
threshold = np.sort(con, axis=None)[-n_con]
ii, jj = np.where(con >= threshold)
# Remove close connections
con_nodes = list()
con_val = list()
for i, j in zip(ii, jj):
if sci.linalg.norm(sens_loc[i] - sens_loc[j]) > min_dist:
con_nodes.append((i, j))
con_val.append(con[i, j])
con_val = np.array(con_val)
# Show the connections as tubes between sensors
vmax = np.max(con_val)
vmin = np.min(con_val)
for val, nodes in zip(con_val, con_nodes):
x1, y1, z1 = sens_loc[nodes[0]]
x2, y2, z2 = sens_loc[nodes[1]]
points = mlab.plot3d([x1, x2], [y1, y2], [z1, z2], [val, val],
vmin=vmin, vmax=vmax, tube_radius=tube_radius,
colormap='RdBu')
points.module_manager.scalar_lut_manager.reverse_lut = True
mlab.scalarbar(title='Phase Lag Index (PLI)', nb_labels=4)
# Add the sensor names for the connections shown
nodes_shown = list(set([n[0] for n in con_nodes] +
[n[1] for n in con_nodes]))
for node in nodes_shown:
x, y, z = sens_loc[node]
mlab.text3d(x, y, z, raw.ch_names[picks[node]], scale=0.005,
color=(0, 0, 0))
view = (-88.7, 40.8, 0.76, np.array([-3.9e-4, -8.5e-3, -1e-2]))
mlab.view(*view)
def plot_grouped_connectivity_circle(yaml_fname, con, orig_labels,
node_order_size=68, indices=None,
out_fname='circle.png', title=None,
subplot=111, include_legend=False,
n_lines=None, fig=None, show=True,
vmin=None, vmax=None, colormap='hot',
colorbar=False):
'''
Plot the connectivity circle grouped and ordered according to
groups in the yaml input file provided.
orig_labels : list of str
Label names in the order as appears in con.
'''
# read the yaml file with grouping
if op.isfile(yaml_fname):
with open(yaml_fname, 'r') as f:
labels = yaml.load(f)
else:
print '%s - File not found.' % yaml_fname
sys.exit()
cortex_colors = ['m', 'b', 'y', 'c', 'r', 'g',
'g', 'r', 'c', 'y', 'b', 'm']
# make list of label_names (without individual cortex locations)
label_names = list()
for lab in labels:
label_names.extend(labels[lab])
lh_labels = [name + '-lh' for name in label_names]
rh_labels = [name + '-rh' for name in label_names]
# Save the plot order and create a circular layout
node_order = list()
node_order.extend(lh_labels[::-1]) # reverse the order
node_order.extend(rh_labels)
assert len(node_order) == node_order_size, 'Node order length is correct.'
# the respective no. of regions in each cortex
group_bound = [len(labels[key]) for key in labels.keys()]
group_bound = [0] + group_bound[::-1] + group_bound
group_boundaries = [sum(group_bound[:i+1]) for i in range(len(group_bound))]
# remove the first element of group_bound
# make label colours such that each cortex is of one colour
group_bound.pop(0)
label_colors = []
for ind, rep in enumerate(group_bound):
label_colors += [cortex_colors[ind]] * rep
assert len(label_colors) == len(node_order), 'Number of colours do not match'
# remove the last total sum of the list
group_boundaries.pop()
from mne.viz.circle import circular_layout
node_angles = circular_layout(orig_labels, node_order, start_pos=90,
group_boundaries=group_boundaries)
# the order of the node_colors must match that of orig_labels
# therefore below reordering is necessary
reordered_colors = [label_colors[node_order.index(orig)]
for orig in orig_labels]
# Plot the graph using node_order and colours
# orig_labels is the order of nodes in the con matrix (important)
from mne.viz import plot_connectivity_circle
plot_connectivity_circle(con, orig_labels, n_lines=n_lines,
facecolor='white', textcolor='black',
node_angles=node_angles, colormap=colormap,
node_colors=reordered_colors,
node_edgecolor='white', fig=fig,
fontsize_names=6, vmax=vmax, vmin=vmin,
colorbar_size=0.2, colorbar_pos=(-0.3, 0.1),
colorbar=colorbar, show=show, subplot=subplot,
indices=indices, title=title)
if include_legend:
import matplotlib.patches as mpatches
legend_patches = [mpatches.Patch(color=col, label=key)
for col, key in zip(['g', 'r', 'c', 'y', 'b', 'm'],
labels.keys())]
pl.legend(handles=legend_patches, loc=(0.02, 0.02), ncol=1,
mode=None, fontsize='small')
if out_fname:
pl.savefig(out_fname, facecolor='white', dpi=300)
def plot_generic_grouped_circle(yaml_fname, con, orig_labels,
node_order_size,
out_fname='circle.png', title=None,
subplot=111, include_legend=False,
n_lines=None, fig=None, show=True,
vmin=None, vmax=None,
colorbar=False):
'''
Plot the connectivity circle grouped and ordered according to
groups in the yaml input file provided. This is not specific to aparc and
does not automatically split the labels into left and right hemispheres.
orig_labels : list of str
Label names in the order as appears in con.
'''
# read the yaml file with grouping
if op.isfile(yaml_fname):
with open(yaml_fname, 'r') as f:
labels = yaml.load(f)
else:
print '%s - File not found.' % yaml_fname
sys.exit()
cortex_colors = ['m', 'b', 'y', 'c', 'r', 'g']
# make list of label_names (without individual cortex locations)
label_names = list()
for lab in labels:
label_names.extend(labels[lab])
# here label_names are the node_order
node_order = label_names
assert len(node_order) == node_order_size, 'Node order length is correct.'
# the respective no. of regions in each cortex
group_bound = [len(labels[key]) for key in labels.keys()]
group_bound = [0] + group_bound
group_boundaries = [sum(group_bound[:i+1]) for i in range(len(group_bound))]
# remove the first element of group_bound
# make label colours such that each cortex is of one colour
group_bound.pop(0)
label_colors = []
for ind, rep in enumerate(group_bound):
label_colors += [cortex_colors[ind]] * rep
assert len(label_colors) == len(node_order), 'Number of colours do not match'
# remove the last total sum of the list
group_boundaries.pop()
from mne.viz.circle import circular_layout
node_angles = circular_layout(orig_labels, label_names, start_pos=90,
group_boundaries=group_boundaries)
# the order of the node_colors must match that of orig_labels
# therefore below reordering is necessary
reordered_colors = [label_colors[node_order.index(orig)]
for orig in orig_labels]
# Plot the graph using node_order and colours
# orig_labels is the order on nodes in the con matrix (important)
from mne.viz import plot_connectivity_circle
plot_connectivity_circle(con, orig_labels, n_lines=n_lines,
facecolor='white', textcolor='black',
node_angles=node_angles,
node_colors=reordered_colors,
node_edgecolor='white', fig=fig,
fontsize_names=8, vmax=vmax, vmin=vmin,
colorbar_size=0.2, colorbar_pos=(-0.3, 0.1),
colorbar=colorbar, show=show, subplot=subplot,
title=title)
if include_legend:
import matplotlib.patches as mpatches
legend_patches = [mpatches.Patch(color=col, label=key)
for col, key in zip(['g', 'r', 'c', 'y', 'b', 'm'],
labels.keys())]
pl.legend(handles=legend_patches, loc=(0.02, 0.02), ncol=1,
mode=None, fontsize='small')
if out_fname:
pl.savefig(out_fname, facecolor='white', dpi=300)
|
fboers/jumegX
|
connectivity/con_viz.py
|
Python
|
bsd-3-clause
| 10,426
|
[
"Mayavi"
] |
c961b055a0a48a5e6b3e93a6f1ac75cb24002d91d2ab174f35748bd2f45312b7
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import os
import numpy as np
from pymatgen.core.structure import Structure
from pymatgen.core.units import Ha_to_eV, bohr_to_ang
from pymatgen.io.abinit.abiobjects import (
Electrons,
ElectronsAlgorithm,
KSampling,
PPModel,
RelaxationMethod,
Smearing,
SpinMode,
lattice_from_abivars,
species_by_znucl,
structure_to_abivars,
)
from pymatgen.util.testing import PymatgenTest
class LatticeFromAbivarsTest(PymatgenTest):
def test_rprim_acell(self):
l1 = lattice_from_abivars(acell=3 * [10], rprim=np.eye(3))
self.assertAlmostEqual(l1.volume, bohr_to_ang**3 * 1000)
assert l1.angles == (90, 90, 90)
l2 = lattice_from_abivars(acell=3 * [10], angdeg=(90, 90, 90))
assert l1 == l2
l2 = lattice_from_abivars(acell=3 * [8], angdeg=(60, 60, 60))
abi_rprimd = (
np.reshape(
[
4.6188022,
0.0000000,
6.5319726,
-2.3094011,
4.0000000,
6.5319726,
-2.3094011,
-4.0000000,
6.5319726,
],
(3, 3),
)
* bohr_to_ang
)
self.assertArrayAlmostEqual(l2.matrix, abi_rprimd)
l3 = lattice_from_abivars(acell=[3, 6, 9], angdeg=(30, 40, 50))
abi_rprimd = (
np.reshape(
[
3.0000000,
0.0000000,
0.0000000,
3.8567257,
4.5962667,
0.0000000,
6.8944000,
4.3895544,
3.7681642,
],
(3, 3),
)
* bohr_to_ang
)
self.assertArrayAlmostEqual(l3.matrix, abi_rprimd)
with self.assertRaises(ValueError):
lattice_from_abivars(acell=[1, 1, 1], angdeg=(90, 90, 90), rprim=np.eye(3))
with self.assertRaises(ValueError):
lattice_from_abivars(acell=[1, 1, 1], angdeg=(-90, 90, 90))
def test_znucl_typat(self):
"""Test the order of typat and znucl in the Abinit input and enforce_typat, enforce_znucl."""
# Ga Ga1 1 0.33333333333333 0.666666666666667 0.500880 1.0
# Ga Ga2 1 0.66666666666667 0.333333333333333 0.000880 1.0
# N N3 1 0.333333333333333 0.666666666666667 0.124120 1.0
# N N4 1 0.666666666666667 0.333333333333333 0.624120 1.0
gan = Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "abinit", "gan.cif"))
# By default, znucl is filled using the first new type found in sites.
def_vars = structure_to_abivars(gan)
def_znucl = def_vars["znucl"]
self.assertArrayEqual(def_znucl, [31, 7])
def_typat = def_vars["typat"]
self.assertArrayEqual(def_typat, [1, 1, 2, 2])
# But it's possible to enforce a particular value of typat and znucl.
enforce_znucl = [7, 31]
enforce_typat = [2, 2, 1, 1]
enf_vars = structure_to_abivars(gan, enforce_znucl=enforce_znucl, enforce_typat=enforce_typat)
self.assertArrayEqual(enf_vars["znucl"], enforce_znucl)
self.assertArrayEqual(enf_vars["typat"], enforce_typat)
self.assertArrayEqual(def_vars["xred"], enf_vars["xred"])
assert [s.symbol for s in species_by_znucl(gan)] == ["Ga", "N"]
for itype1, itype2 in zip(def_typat, enforce_typat):
assert def_znucl[itype1 - 1] == enforce_znucl[itype2 - 1]
with self.assertRaises(Exception):
structure_to_abivars(gan, enforce_znucl=enforce_znucl, enforce_typat=None)
class SpinModeTest(PymatgenTest):
def test_base(self):
polarized = SpinMode.as_spinmode("polarized")
other_polarized = SpinMode.as_spinmode("polarized")
unpolarized = SpinMode.as_spinmode("unpolarized")
polarized.to_abivars()
self.assertTrue(polarized is other_polarized)
self.assertTrue(polarized == other_polarized)
self.assertTrue(polarized != unpolarized)
# Test pickle
self.serialize_with_pickle(polarized)
# Test dict methods
self.assertMSONable(polarized)
self.assertMSONable(unpolarized)
class SmearingTest(PymatgenTest):
def test_base(self):
fd1ev = Smearing.as_smearing("fermi_dirac:1 eV")
fd1ev.to_abivars()
self.assertTrue(fd1ev)
same_fd = Smearing.as_smearing("fermi_dirac:" + str(1.0 / Ha_to_eV))
self.assertTrue(same_fd == fd1ev)
nosmear = Smearing.nosmearing()
assert nosmear == Smearing.as_smearing("nosmearing")
self.assertFalse(nosmear)
self.assertTrue(nosmear != fd1ev)
self.assertMSONable(nosmear)
new_fd1ev = Smearing.from_dict(fd1ev.as_dict())
self.assertTrue(new_fd1ev == fd1ev)
# Test pickle
self.serialize_with_pickle(fd1ev)
# Test dict methods
self.assertMSONable(fd1ev)
class ElectronsAlgorithmTest(PymatgenTest):
def test_base(self):
algo = ElectronsAlgorithm(nstep=70)
_ = algo.to_abivars()
# Test pickle
self.serialize_with_pickle(algo)
# Test dict methods
self.assertMSONable(algo)
class ElectronsTest(PymatgenTest):
def test_base(self):
default_electrons = Electrons()
self.assertTrue(default_electrons.nsppol == 2)
self.assertTrue(default_electrons.nspinor == 1)
self.assertTrue(default_electrons.nspden == 2)
_ = default_electrons.to_abivars()
# new = Electron.from_dict(default_electrons.as_dict())
# Test pickle
self.serialize_with_pickle(default_electrons, test_eq=False)
custom_electrons = Electrons(
spin_mode="unpolarized",
smearing="marzari4:0.2 eV",
algorithm=ElectronsAlgorithm(nstep=70),
nband=10,
charge=1.0,
comment="Test comment",
)
# Test dict methods
self.assertMSONable(custom_electrons)
class KSamplingTest(PymatgenTest):
def test_base(self):
monkhorst = KSampling.monkhorst((3, 3, 3), (0.5, 0.5, 0.5), 0, False, False)
gamma_centered = KSampling.gamma_centered((3, 3, 3), False, False)
monkhorst.to_abivars()
# Test dict methods
self.assertMSONable(monkhorst)
self.assertMSONable(gamma_centered)
class RelaxationTest(PymatgenTest):
def test_base(self):
atoms_and_cell = RelaxationMethod.atoms_and_cell()
atoms_only = RelaxationMethod.atoms_only()
atoms_and_cell.to_abivars()
# Test dict methods
self.assertMSONable(atoms_and_cell)
self.assertMSONable(atoms_only)
class PPModelTest(PymatgenTest):
def test_base(self):
godby = PPModel.as_ppmodel("godby:12 eV")
# print(godby)
# print(repr(godby))
godby.to_abivars()
self.assertTrue(godby)
same_godby = PPModel.as_ppmodel("godby:" + str(12.0 / Ha_to_eV))
self.assertTrue(same_godby == godby)
noppm = PPModel.get_noppmodel()
self.assertFalse(noppm)
self.assertTrue(noppm != godby)
new_godby = PPModel.from_dict(godby.as_dict())
self.assertTrue(new_godby == godby)
# Test pickle
self.serialize_with_pickle(godby)
# Test dict methods
self.assertMSONable(godby)
|
materialsproject/pymatgen
|
pymatgen/io/abinit/tests/test_abiobjects.py
|
Python
|
mit
| 7,646
|
[
"ABINIT",
"pymatgen"
] |
0a81eb19b5517720a3d4402e8e4da64a12dceddeea356d789589a64773438888
|
from Firefly import logging
from Firefly.const import SERVICE_CONFIG_FILE
from Firefly.helpers.service import Service
import configparser
from forecastiopy import ForecastIO, FIOCurrently, FIOAlerts, FIODaily
from Firefly import scheduler
TITLE = 'Dark Sky Service for Firefly'
AUTHOR = 'Zachary Priddy me@zpriddy.com'
SERVICE_ID = 'service_darksky'
COMMANDS = ['refresh']
REQUESTS = ['current']
SECTION = 'DARKSKY'
# TODO: Setup function should get the config from the service config file. If the
# required params are not in the config file then it should log and error message
# and abort install
# TODO: push this data to location weather info.. this could be useful
def Setup(firefly, package, **kwargs):
config = configparser.ConfigParser()
config.read(SERVICE_CONFIG_FILE)
enable = config.getboolean(SECTION, 'enable', fallback=False)
if enable is False:
return False
api_key = config.get(SECTION,'api_key',fallback=None)
refresh = config.getint(SECTION,'refresh',fallback=30)
if api_key is None:
logging.error(code='FF.DAR.SET.001') # darksky api key missing
return False
darksky = Darksky(firefly, package, api_key=api_key, refresh=refresh)
firefly.install_component(darksky)
return True
class Darksky(Service):
def __init__(self, firefly, package, **kwargs):
super().__init__(firefly, SERVICE_ID, package, TITLE, AUTHOR, COMMANDS, REQUESTS)
self._api_key = kwargs.get('api_key')
self._long = firefly.location.longitude
self._lat = firefly.location.latitude
self._refresh_time = kwargs.get('refresh')
self._darksky = ForecastIO.ForecastIO(self._api_key,
units=ForecastIO.ForecastIO.UNITS_SI,
lang=ForecastIO.ForecastIO.LANG_ENGLISH,
latitude = self._lat,
longitude= self._long
)
self._currently = None
self._alerts = None
self.add_command('refresh', self.refresh)
self.add_request('current', self.current)
scheduler.runEveryM(self._refresh_time, self.refresh)
self.refresh()
def refresh(self):
if self._darksky.has_currently() is True:
currently = FIOCurrently.FIOCurrently(self._darksky)
alerts = FIOAlerts.FIOAlerts(self._darksky)
#TODO: Fix this in FIODaily.has_daily()
daily = FIODaily.FIODaily(self._darksky)
print('Currently')
for item in currently.get().keys():
print(item + ' : ' + str(currently.get()[item]))
# Or access attributes directly
#print(currently.temperature)
#print(currently.humidity)
if self._darksky.has_alerts() is True:
for a in alerts.alerts:
print(a)
self._currently = currently.currently
self._alerts = alerts.alerts
else:
print('No Alert data')
if self._darksky.has_daily():
for d in range(0,daily.days()):
print(daily.get(d))
else:
print('No Currently data')
def current(self, command, refresh=False, **kwargs):
if refresh:
self.refresh()
return self._currently.currently
|
Firefly-Automation/Firefly
|
Firefly/services/darksky.py
|
Python
|
apache-2.0
| 3,196
|
[
"Firefly"
] |
d883885b193df90a1fe36d77cc9dc4078e685b81bb814703fd665108b93ab438
|
# -*- coding: utf-8 -*-
# xbmctr MEDIA CENTER, is an XBMC add on that sorts and displays
# video content from several websites to the XBMC user.
#
# Copyright (C) 2011, Dr Ayhan Colak
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# for more info please visit http://xbmctr.com
'''
Author: drascom
Date: 13/04/2012
'''
import urllib, urllib2, re, sys, cookielib
import xbmc, xbmcaddon, xbmcgui,xbmcplugin
import araclar
import mechanize
import urlresolver
import time,os,base64
import simplejson as json
#
# Eklenti bildirimleri --------------------------------------------------------
addon_id = 'script.module.xbmctr'
__ayarlar__ = xbmcaddon.Addon(id=addon_id)
path = __ayarlar__.getAddonInfo('path')
IMAGES_PATH = xbmc.translatePath(os.path.join(path, 'resoures','image'))
addon_icon = __ayarlar__.getAddonInfo('icon')
USER_AGENT = "Mozilla/5.0 (X11; Linux x86_64; rv:10.0.1)"
downloadFolder = __ayarlar__.getSetting('download-folder')
insidans=1
#--cozuculer--#
ktk='w4dhbMSxxZ9rYW4gw5x5ZQ=='
df='aHR0cDovL2ZvcnVtLnhibWN0ci5jb20vbWVtYmVyLnBocA=='
A='QWRtaW5pc3RyYXRvcg=='
f='JnBhc3N3b3JkPQ=='
k='dXNlcm5hbWU9'
b='JnN1Ym1pdD1Mb2dpbiZhY3Rpb249ZG9fbG9naW4mdXJsPWh0dHA6Ly9mb3J1bS54Ym1jdHIuY29tLw=='
r='aHR0cDovL2ZvcnVtLnhibWN0ci5jb20vdXllLQ=='
c='VklQ'
ss='TW9kZXJhdG9y'
tr='U3VwZXIgTW9kZXJhdG9y'
qq='Lmh0bWw='
yl='VXNlcm5hbWU='
kl='cGFzc3dvcmQ='
pl='TW9kIEFEQVlJ'
ttt='WWVuaSDDnHll'
#--cozuculer--#
__settings__ = xbmcaddon.Addon(id='script.module.xbmctr')
__language__ = __settings__.getLocalizedString
FILENAME = "cozucu"
'''Constants'''
xbmcPlayer = xbmc.Player()
playList = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
vk=[]
value=[]
'''
listing,pagination function
for multipage web site
'''
def unique(l):
s = set(); n = 0
for x in l:
if x not in s: s.add(x); l[n] = x; n += 1
del l[n:]
def dacka(yol):
files = os.listdir(yol)
global imps
imps = []
for i in range(len(files)):
py_name = files[i].split('.')
if len(py_name) > 1:
if py_name[1] == 'py' and py_name[0] != '__init__':
py_name = py_name[0]
imps.append(py_name)
file = open(yol+'/__init__.py','w')
toWrite = '__all__ = '+str(imps)
file.write(toWrite)
file.close()
return imps
def addDir(fileName,name, mode, url="", thumbnail=""):
u = sys.argv[0]+"?fileName="+urllib.quote_plus(fileName)+"&name="+urllib.quote_plus(name)+"&mode="+urllib.quote_plus(mode)+"&url="+urllib.quote_plus(url)
liz = xbmcgui.ListItem(name, iconImage="DefaultFolder.png", thumbnailImage=thumbnail)
xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=u, listitem=liz, isFolder=True)
def fullfilm_gizli_player(url):
## print 'GIZLI PLaYER url ',url
link=araclar.get_url(url)
link=link.replace('%3A',":").replace('%3F',"?").replace('%3D',"=")
match=re.compile('v=(.*?)&').findall(link)
## print 'GIZLI PLaYER',match
url=match[0]
return url
def liste_olustur(Url,match):
## print 'Paging started'
urlList=''
for pageUrl in match:
#web page list function
urlList=urlList+pageUrl #add page to list
urlList=urlList+':;' #add seperator
total=Url+':;'+urlList #add first url
match = total.split(':;') #split links
del match [-1] #delete first seperator
#print match
info='Film '+str(len(match)-1)+' part.'
match = filter(bool,match)
return match
def prepare_face_links(videoTitle,match):
i=0
for pageLink in match:
link=araclar.get_url(pageLink)
match=re.compile('<embed src=\'.*?file=(.*?)&a').findall(link)
for videoLink in match:
i+=1
araclar.addVideoLink(videoTitle+' Part '+str(i),videoLink,'')
playList.add(videoLink)
def Youtube_Player(url):
## safe='aHR0cDovL3hibWN0ci50di8='
## link=araclar.get_url(base64.b64decode(safe))
## match1=re.compile('besir>>(.*?)<<besir').findall(link)
## for kkk in match1:
## print kkk
code=re.match(r"http://www.youtube.com/embed/(.*?)$", url).group(1)
print '[code]'+str(code)
url='plugin://plugin.video.youtube/?action=play_video&videoid=' + str(code)
return url
def uploadc_Player(url):
uploadcurl=url
link=araclar.get_url(uploadcurl)
match=re.compile('\'file\',\'(.*?)Enter.*?.mp4\'').findall(link)
for uploadcgelen in match:
print str(uploadcgelen)
url=str(uploadcgelen)+'Enter%20your%20zip%20code%20here.mp4'
return url
##def mailru_Player(url):
## referer='http://img.mail.ru/r/video2/uvpv3.swf?3'
## cookie='VID=2SlVa309oFH4; mrcu=EE18510E964723319742F901060A; p=IxQAAMr+IQAA; video_key=203516; s='
## UA='Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3'
## m = re.search('movieSrc=(?P<url>.+?)$', url, re.IGNORECASE | re.DOTALL)
## if m:
## items = []
## headers = { "User-Agent":UA, "Referer":referer, "Cookie":cookie }
## # header "Cookie" with parameters need to be set for your download/playback
## quality = "???"
## vurl = m.group('url')
## vurl = re.sub('\&[^$]*','',vurl)
## data = araclar.get_url_headers('http://api.video.mail.ru/videos/' + vurl + '.json', headers)
## item = json.loads(data)
## for qual in item[u'videos']:
## if qual == 'sd':
## quality = "480p"
## elif qual == "hd":
## quality = "720p"
## else:
## quality = "???"
## link = item[u'videos'][qual]
## items.append({'quality':quality, 'url':link+'|Cookie='+cookie+'|Referer='+referer})
##
## # best quality
## videobul = sorted(items,key=lambda i:i['quality'])
## #resolved.reverse()
## return videobul[-1][u'url']
def Divxshare_Player(url):
value=''
xbmc.executebuiltin('Notification("Dream Clup",DIVXSTAGE Deneniyor.)')
link=araclar.get_url(url)
match=re.compile('domain="(.*?)";\n\t\t\tflashvars.file="(.*?)";\n\t\t\tflashvars\.filekey="(.*?)"').findall(link)
if not match:
xbmc.executebuiltin('Notification("Sitede HATA ",ilk Linki yok.)')
for domain,dosya,key in match:
transfer =domain+"/api/player.api.php?file="+dosya+"&codes=undefined&user=undefined&key="+key+"&pass=undefined"
link=araclar.get_url(transfer)
match=re.compile('url=(.*?)&').findall(link)
if not match:
xbmc.executebuiltin('Notification("Serverda HATA ",Ikinci Linki yok.)')
for url in match:
if url.endswith('flv'):
value=url
else:
pass
print value
return value
def stagevu_player(code):
xbmc.executebuiltin('Notification("Dream Clup",STAGEVU Deneniyor.)')
link=araclar.get_url(url)
sv1 = re.compile('src="(.+?)" border="0">').findall(link)
link=araclar.get_url(sv1[0])
sv3 = re.compile('src="(.+?)" border="0"').findall(link)
link=araclar.get_url(sv3[0])
StageVu=re.compile('<param name="src" value="(.+?)" />').findall(link)
print "stagevu",StageVu
return StageVu[0]
def wfih_player(code):
return code
def Yesload_Player(code):
value=''
xbmc.executebuiltin('Notification("Dream Clup",YESLOAD Deneniyor.)')
link=araclar.get_url(code)
yesloadurlbul1=re.compile('<a target="_blank" href="http://yesload.net/(.*?)">').findall(link)
for x in yesloadurlbul1:
code='http://yesload.net/player_api/info?token='+x
link=araclar.get_url(code)
yesloadurlbul2=re.compile('premium.token=(.*?)"').findall(link)
for x in yesloadurlbul2:
code='http://yesload.net/player_api/info?token='+x
link=araclar.get_url(code)
yesloadplayer=re.compile('url=(.*?).flv&').findall(link)
for url in yesloadplayer:
value=url
return value
def Streamcloud_Player(url):
print "streamcoud gelen url",url
value=''
try:
req = urllib2.Request(url)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3')
response = urllib2.urlopen(req)
link=response.read()
cookie = response.info().getheader('Set-Cookie')
response.close()
match=re.compile('<input type="hidden" name="op" value="(.*?)">\n\t\t\t\t\t\t<input type="hidden" name="usr_login" value="(.*?)">\n\t\t\t\t\t\t<input type="hidden" name="id" value="(.*?)">\n\t\t\t\t\t\t<input type="hidden" name="fname" value="(.*?)">\n\t\t\t\t\t\t<input type="hidden" name="referer" value="(.*?)">\n\t\t\t\t\t\t<input type="hidden" name="hash" value="(.*?)">\n\t\t\t\t\t\t<input type="submit" name="imhuman" .*? value="(.*?)">').findall(link)
gelen_veri=re.compile('file: "(.+?)"').findall(link)
if len(gelen_veri)>0:
value=gelen_veri[0]
return value
else:
for op,usr_login,id,fname,referer,hash,imhuman in match:
import time
time.sleep(11)
gidecekveriler=urllib.urlencode({'op' : op, 'usr_login' : usr_login, 'id' : id, 'fname' : fname, 'referer' : referer, 'hash' : hash, 'imhuman' : imhuman.replace(" ","+")})
req=urllib2.Request(url)
req.add_header('Referer', url)
req.add_data(gidecekveriler)
req.add_header('Cookie',cookie)
post = urllib2.urlopen(req)
gelen_veri=post.read()
#print gelen_veri
gelen_veri=re.compile('file: "(.+?)"').findall(gelen_veri)
for final in gelen_veri:
print final
value=final
except:
pass
print "cozucu stream value",value
return value
def stream2k_Player(code):
value=''
xbmc.executebuiltin('Notification("Dream Clup",STREAM2K Deneniyor.)')
link=araclar.get_url(code)
match=re.compile('<iframe.*?src="(.*?)"><\/iframe><BR><\/div>\W+<div id="underplayer">').findall(link)
link=araclar.get_url(match[0])
match1=re.compile("file: .(.*)'").findall(link)
for url in match1:
value=url
return value
def VideoAz_Player(url):
xbmc.executebuiltin('Notification("Dream Clup",VIDEOAZ Deneniyor.)')
link=araclar.get_url(url)
print url
playerbul=re.compile('flashvars="(.*?)"').findall(link)
print "playerbul",playerbul
for a in playerbul:
url1=('http://www.video.az/assets/player/player.swf?'+a)
url=url1
return url
def Dailymotion(code):
## safe='aHR0cDovL3hibWN0ci50di8='
## link=araclar.get_url(base64.b64decode(safe))
## match1=re.compile('besir>>(.*?)<<besir').findall(link)
## for kkk in match1:
## print kkk
value=[]
count=[]
url="http://www.dailymotion.com/embed/video/"+code
xbmc.executebuiltin('Notification("Dream Clup",DAILYMOTION Deneniyor.)')
link=araclar.get_url(url)
link = urllib.unquote(link).decode('utf8').replace('\\/', '/')
dm_high = re.compile('"stream_h264_url":"(.+?)"').findall(link)
dm_low = re.compile('"stream_h264_ld_url":"(.+?)"').findall(link)
if dm_high:
count.append('Dailymotion 360kb/s HD')
if dm_low:
count.append('Dailymotion 180kb/s SD ')
else:
pass
dialog = xbmcgui.Dialog()
ret = dialog.select(__language__(30008),count)
if ret == 0:
value.append(('Dailymotion 384 p',dm_high[0]))
if ret == 1:
value.append(('Dailymotion 240 p',dm_low[0]))
return value
def Flashx_Player(url):
## safe='aHR0cDovL3hibWN0ci50di8='
## link=araclar.get_url(base64.b64decode(safe))
## match1=re.compile('besir>>(.*?)<<besir').findall(link)
## for kkk in match1:
## print kkk
xbmc.executebuiltin('Notification("Dream Clup",FLASHX Deneniyor.)')
link=araclar.get_url(url)
print url
playerbul=re.compile('href="http://flashx.tv/video\/(.*?)\/.*?">').findall(link)
print "playerbul",playerbul
for a in playerbul:
url1=('http://play.flashx.tv/nuevo/player/cst.php?hash='+a)
url=url1
link=araclar.get_url(url)
match=re.compile('<file>(.*?).flv</file>').findall(link)
print "playerbul2",match
for a in match:
url=a+'.flv'
print "playerbul3",url
return url
def Putlocker_Player(url):
## safe='aHR0cDovL3hibWN0ci50di8='
## link=araclar.get_url(base64.b64decode(safe))
## match1=re.compile('besir>>(.*?)<<besir').findall(link)
## for kkk in match1:
## print kkk
value=''
xbmc.executebuiltin('Notification("Dream Clup",PUTLOCKER Deneniyor.)')
link=araclar.get_url(url)
match=re.compile('<input type="hidden" value="(.*?)" name="(.*?)">').findall(link)
print "ilk match",match
for a,b in match:
bilgiler=urllib.urlencode({b : a,'confirm': 'Close Ad and Watch as Free User'})
adres=urllib.urlopen(url,bilgiler)
#Gelen bilgiyi okuyalim
adres=adres.read()
#Gelen bilginin icinde URL adresini alalim
adres=re.compile('playlist: \'(.*?)\'').findall(adres)
for son_url in adres:
url='http://www.putlocker.com'+son_url
req = urllib2.Request(url)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3')
response = urllib2.urlopen(req)
link=response.read()
link=link.replace('amp;',"").replace('\xd6',"O").replace('\xfc',"u").replace('\xdd',"I").replace('\xfd',"i").replace('\xe7',"c").replace('\xde',"s").replace('\xfe',"s").replace('\xc7',"c").replace('\xf0',"g").replace('#038;',"")
response.close()
match=re.compile('content url="(.*?)"').findall(link)
print "putlocker son",match
if len(match) >1:
del match[0]#burasi onemli kalmali ok
for value in match:
return value
if not value:
return false
def Sockshare_Player(url):
## safe='aHR0cDovL3hibWN0ci50di8='
## link=araclar.get_url(base64.b64decode(safe))
## match1=re.compile('besir>>(.*?)<<besir').findall(link)
## for kkk in match1:
## print kkk
value=''
xbmc.executebuiltin('Notification("Dream Clup",SOCKSHARE Deneniyor.)')
link=araclar.get_url(url)
match=re.compile('<input type="hidden" value="(.*?)" name="(.*?)">').findall(link)
print "ilk match",match
for a,b in match:
bilgiler=urllib.urlencode({b : a,'confirm': 'Close Ad and Watch as Free User'})
adres=urllib.urlopen(url,bilgiler)
#Gelen bilgiyi okuyalim
adres=adres.read()
#Gelen bilginin icinde URL adresini alalim
adres=re.compile('playlist: \'(.*?)\'').findall(adres)
for son_url in adres:
url='http://www.sockshare.com'+son_url
req = urllib2.Request(url)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3')
response = urllib2.urlopen(req)
link=response.read()
link=link.replace('amp;',"").replace('\xd6',"O").replace('\xfc',"u").replace('\xdd',"I").replace('\xfd',"i").replace('\xe7',"c").replace('\xde',"s").replace('\xfe',"s").replace('\xc7',"c").replace('\xf0',"g").replace('#038;',"")
response.close()
match=re.compile('content url="(.*?)"').findall(link)
print "sockshare son",match
if len(match) >1:
del match[0]#burasi onemli kalmali ok
for value in match:
return value
if not value:
return false
def vk_Player(vk_list):
## safe='aHR0cDovL3hibWN0ci50di8='
## link=araclar.get_url(base64.b64decode(safe))
## match1=re.compile('besir>>(.*?)<<besir').findall(link)
## for kkk in match1:
## print kkk
value=[]
count=[]
fixed=''
gecis=0
resolutions = ["240", "360", "480", "720", "1080"]
for url in vk_list if not isinstance(vk_list, basestring) else [vk_list]:
try:
link=araclar.get_url(url)
host=re.compile("host=(.*?)&").findall(link)
uid=re.compile("uid=(.*?)&").findall(link)
vtag=re.compile("vtag=(.*?)&").findall(link)
hd = re.compile("hd_def=(.*?)&").findall(link)
if not hd or hd[0]=="-1":
hd = re.compile("max_hd = '(.*?)';").findall(link)
flv = re.compile("no_flv=(.*?)&").findall(link)
#http://cs514110.userapi.com/u175995076/video/ac8f196d08.xxx.mp4
start_url=host[0]+'u'+uid[0]+'/videos/' + vtag[0]
print "vk start url",start_url
print "hd:",hd
x=(int(hd[0])+1)
if hd >0 or flv == 1:
for i in range (x):
i=resolutions[i]+' p'
count.append(i)
if gecis==0:
dialog = xbmcgui.Dialog()
ret = dialog.select(__language__(30014),count)
for i in range (x):
if ret == i:
url=start_url+'.'+str(resolutions[i])+'.mp4'
fixed=str(resolutions[i])
gecis+=1
else:
'VK SECIM YOK'
else:
url=start_url+'.'+fixed+'.mp4'
print ('SECIM :'+fixed)
gecis+=1
value.append(url)
else:
print 'HD FLV YANLIS'
except:
print 'LINK TARAMA FAILED'
pass
return value
def xml_scanner(videoTitle,url):
value=[]
xmlScan=araclar.get_url(url)
dizihd=re.compile('git=(.*?)"').findall(xmlScan)
face_1=re.compile('http://dizihd.com/dizihdd.php(.+?)"').findall(xmlScan)#xml ici face link
youtube_1=re.compile('v=(.*?)"').findall(xmlScan)#xml içi youtube link
dizimag=re.compile('url="(.*?)"').findall(xmlScan) #xml ici dizimag
music=re.compile('<file>(.*?)</file>').findall(xmlScan)
if len(youtube_1)> 0 :
for i, url in enumerate(youtube_1):
Url='plugin://plugin.video.youtube/?action=play_video&videoid='+str(youtube_1[0])
value.append(Url)
if len(face_1)> 0 :
for i, url in enumerate(face_1):
Url='http://dizihd.com/dizihdd.php'+str(url)
value.append(Url)
if len(dizimag)> 0:
for i, url in enumerate(dizimag):
value.append(url)
if len(music)> 0 :
for i, url in enumerate(music):
value.append(url)
print 'XML DONUS DEGERI',value
return value
xbmcplugin.endOfDirectory(handle=int(sys.argv[1]), succeeded=True)
def videobul(urlList):
value=[]
try:
print "Bizim Sistem Deneniyor"
value=denetle(urlList)
print "Bizim sistem sonuc:",value
if not value:
print "urlresolver Sistem Deneniyor"
value=UrlResolverPlayer(urlList)
print "********* resolver sonuc ***********",value
except:
value=[('server bulunamadi','http://www.youtube.com/watch?v=tMqzgaKWJoQ')]
finally:
return value
def denetle(urlList):
vk=[]
value=[]
print "URL listesi",urlList
for url in urlList if not isinstance(urlList, basestring) else [urlList]:
print "cozulecek url",url
if 'video.ak.fbcdn.net' in url:
value.append(("F Server",url))
if 'vk.com' in url:
print 'VK BULUNDU'
vk.append(url)
if "divxstage" in url:
value.append(("DivxStage",Divxshare_Player(url)))
if "novamov" in url:
value.append(("NovaMov",Divxshare_Player(url)))
if "flashx" in url:
value.append(("Flashx",Flashx_Player(url)))
if 'youtube' in url:
print 'YOUTUBE BULUNDU'
value.append(("YT Server",Youtube_Player(url)))
if 'uploadc' in url:
print 'UPLOADC BULUNDU'
value.append(("UPLOADC Server",uploadc_Player(url)))
if 'mail.ru' in url:
print 'MAILRU BULUNDU'
value.append(("MR Server",mailru_Player(url)))
if 'http://www.videoslasher.com' in url:
value.append(("VS Server",Videoslasher_Player(url)))
if "movshare" in url:
value.append(("MovShare",Divxshare_Player(url)))
if "vimple" in url:
xbmc.executebuiltin('Notification("Beklenen Hata",vimple henuz cozulmedi.)')
if "stream2k.com" in url:
value.append(("Stream2k",stream2k_Player(url)))
if "video.az" in url:
value.append(("VideoAz",VideoAz_Player(url)))
if "stagevu" in url:
value.append(("StageVu",stagevu_Player(url)))
if "streamcloud" in url:
code=Streamcloud_Player(url)
if code:
value.append(("Streamcloud",code))
if "nowvideo" in url:
value.append(("NowVideo",Divxshare_Player(url)))
if "yesload" in url:
value.append(("Yesload",Yesload_Player(url)))
if "putlocker" in url:
value.append(("Putlocker",Putlocker_Player(url)))
if "sockshare" in url:
value.append(("Sockshare",Sockshare_Player(url)))
if vk:
print 'VK ILK OKUMA:'
vk_sonuc=vk_Player(vk)
for url in vk_sonuc:
value.append(("VK Server",url))
if value:
return value
else:
return UrlResolverPlayer(urlList)
####
def magix_player(name,url):
if "mail.ru" in url:
MailRu_Player(url)
if "ok.ru" in url:
ok_ru(url)
else:
UrlResolverPlayer = url
playList.clear()
media = urlresolver.HostedMediaFile(UrlResolverPlayer)
source = media
if source:
url = source.resolve()
araclar.addLink(name,url,'')
araclar.playlist_yap(playList,name,url)
xbmcPlayer.play(playList)
####
def UrlResolverPlayer(urlList):
value=[]
print "URL listesi",urlList
for url in urlList if not isinstance(urlList, basestring) else [urlList]:
print "URLRESOLVER url",url
if url in url:
print 'URLRESOLVER LINK BULUNDU'
UrlResolverPlayer = url
media = urlresolver.HostedMediaFile(UrlResolverPlayer)
source = media
if source:
xbmc.executebuiltin('Notification(Dream-Clup!,URLRESOLVER Deneniyor)')
url = source.resolve()
value.append(("URLRESOLVER LINK",url))
if value:
return value
else:
return False
#
def skk():
req = urllib2.Request((base64.b64decode(df)));
req.add_header('User-Agent',"Mozilla/5.0 (X11; Linux x86_64; rv:10.0.1) Gecko/2013000101 Firefox/10.0.1")
login= __ayarlar__.getSetting((base64.b64decode(yl)))
password= __ayarlar__.getSetting((base64.b64decode(kl)))
data=urllib2.urlopen(req,(base64.b64decode(k))+login+(base64.b64decode(f))+password+(base64.b64decode(b))).read()
response = urllib2.urlopen((base64.b64decode(r))+login+(base64.b64decode(qq))).read()
match=re.compile('\((.*?)\)<br').findall(response)
if (base64.b64decode(A)) in match[0] or (base64.b64decode(pl)) in match[0] or (base64.b64decode(c)) in match[0] or (base64.b64decode(ktk)) in match[0] or (base64.b64decode(tr)) in match[0] or (base64.b64decode(ss)) in match[0] or (base64.b64decode(ttt)) in match[0]:
__ayarlar__.setSetting('var',"True")
return True
else:
d = xbmcgui.Dialog()
d.ok(__ayarlar__.getLocalizedString(30001),__ayarlar__.getLocalizedString(30002),__ayarlar__.getLocalizedString(30003))
d.ok(__ayarlar__.getLocalizedString(30004),__ayarlar__.getLocalizedString(30005),__ayarlar__.getLocalizedString(30006))
__ayarlar__.setSetting('var',"False")
return False
EXIT()
def rebeka():
d = xbmcgui.Dialog()
login=__ayarlar__.getSetting("var")
if not login or login=="False":
global insidans
if insidans==1:
cevap=d.yesno(__ayarlar__.getLocalizedString(30007),__ayarlar__.getLocalizedString(30008),__ayarlar__.getLocalizedString(30009),__ayarlar__.getLocalizedString(30010))
if cevap:
__ayarlar__.openSettings()
check=skk()
else:
__ayarlar__.setSetting('var',"False")
EXIT()
insidans+=1
else:
pass
if __ayarlar__.getSetting("var")=="True":
pass
else:
__ayarlar__.setSetting('var',"False")
EXIT()
def sirala(IMAGES_PATH):
# rebeka()
for fileName in imps:
thumbnail= os.path.join(IMAGES_PATH, fileName+".png")
addDir(fileName, '[COLOR lightgreen][B]'+fileName+'[/B][/COLOR]' ,"main()", "",thumbnail)
xbmcplugin.endOfDirectory(int(sys.argv[1]))
def kalala(IMAGES_PATH,url):
dacka(url)
sirala(IMAGES_PATH)
def hata():
d = xbmcgui.Dialog()
d.ok(__ayarlar__.getLocalizedString(30011),__ayarlar__.getLocalizedString(30012),__ayarlar__.getLocalizedString(30013))
def EXIT():
xbmc.executebuiltin("XBMC.Container.Refresh(path,replace)")
xbmc.executebuiltin("XBMC.ActivateWindow(Home)")
###########################MAIL-RU-PLAYER######################################
##referer='http://img.mail.ru/r/video2/uvpv3.swf?3'
##cookie='VID=2SlVa309oFH4; mrcu=EE18510E964723319742F901060A; p=IxQAAMr+IQAA; video_key=203516; s='
##UA='Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3'
##
def MailRu_Player(url):
req = urllib2.Request(url)
resp = urllib2.urlopen(req)
html = resp.read()
cookie_string = resp.headers.getheader('Set-Cookie').split(';')[0]
print resp.headers.getheader('Set-Cookie')
headers = {
'Cookie': cookie_string
}
metadata_url_start = html.find('metadataUrl') + len('metadataUrl":"')
metadata_url_end = html.find('"', metadata_url_start)
metadata_url = html[metadata_url_start:metadata_url_end]
metadata_response = urllib2.urlopen(metadata_url)
metadata = json.loads(metadata_response.read())
#---------------------------------#
xbmc_cookies = '|Cookie=' + urllib.quote(cookie_string)
streams = [(v['key'], v['url'] + xbmc_cookies) for v in metadata['videos']]
for name,url in streams:
araclar.addLink('[COLOR lightblue][B]M_R >> [/B][/COLOR]'+name,url,'')
#---------------------------------#
#### safe='aHR0cDovL3hibWN0ci50di8='
#### link=araclar.get_url(base64.b64decode(safe))
#### match1=re.compile('besir>>(.*?)<<besir').findall(link)
#### for kkk in match1:
#### print kkk
##
## name='a'
## m = _regex(url)
## if m:
## items = []
## headers = { "User-Agent":UA, "Referer":referer, "Cookie":cookie }
## quality = "???"
## vurl = m.group('url')
## vurl = re.sub('\&[^$]*','',vurl)
## data = araclar.get_url_headers('http://videoapi.my.mail.ru/videos/' + vurl + '.json', headers)
## item = json.loads(data)
## for qual in item[u'videos']:
## if qual == 'hd':
## quality = "480p"
## else:
## quality = "???"
## link = 'http://videoapi.my.mail.ru/file/video/v'+vurl
## items.append({'quality':quality, 'url':link+'|Cookie='+cookie+'|Referer='+referer})
## resolved = sorted(items,key=lambda i:i['quality'])
## return resolved[-1][u'url']
##
##
##
##def _regex(url):
## return re.search('movieSrc=(?P<url>.+?)$', url, re.IGNORECASE | re.DOTALL)
##
##
##
##def _regex(url):
## return re.search('movieSrc=(?P<url>.+?)$', url, re.IGNORECASE | re.DOTALL)
####################################
#---------------------------------#
USER_AGENT = 'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.99 Safari/537.36'
ACCEPT = 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'
def ok_ru(url):
fileName ="cozucu"
sources = []
if(re.search(r'ok.ru', url)):
# try:
id = re.search('\d+', url).group(0)
jsonUrl = 'http://ok.ru/dk?cmd=videoPlayerMetadata&mid=' + id
jsonSource = json.loads(http_req(jsonUrl))
for source in jsonSource['videos']:
name = '%s %s' % ('', source['name'])
link = '%s|User-Agent=%s&Accept=%s&Referer=%s' % (source['url'], USER_AGENT, ACCEPT, urllib.quote_plus(url))
#item = {'name': name, 'url': link}
#sources.append(item)
url = link
print url
araclar.addDir(fileName,'[COLOR beige][B][COLOR blue]>>[/COLOR]'+name+'[/B][/COLOR]', "yeni4(name,url)",url,"")
# except: pass
#return sources
def http_req(url, getCookie=False):
req = urllib2.Request(url)
req.add_header('User-Agent', USER_AGENT)
req.add_header('Accept', ACCEPT)
req.add_header('Cache-Control', 'no-transform')
response = urllib2.urlopen(req)
source = response.read()
response.close()
if getCookie:
cookie = response.headers.get('Set-Cookie')
return {'source': source, 'cookie': cookie}
return source
def yeni4(name,url):
xbmcPlayer = xbmc.Player()
playList = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
playList.clear()
araclar.addLink(name,url,'')
listitem = xbmcgui.ListItem(name)
playList.add(url, listitem)
xbmcPlayer.play(playList)
|
noba3/KoTos
|
addons/script.module.xbmctr/lib/cozucu.py
|
Python
|
gpl-2.0
| 33,035
|
[
"VisIt"
] |
58f1182163adfd5c551da935f19641394e0878b9af202dc4d7b8112ae84a6b64
|
# -*- coding: utf-8 -*-
"""
chemreac.core
=============
In ``chemreac.core`` you will find :py:class:`ReactionDiffusion` which
is the class describing the system of ODEs.
"""
from __future__ import (absolute_import, division, print_function)
from collections import defaultdict, OrderedDict
from functools import reduce
import inspect
from itertools import chain
from operator import add
import os
import warnings
import numpy as np
from .chemistry import mk_sn_dict_from_names, ReactionSystem
from .util.pyutil import monotonic
from .units import get_derived_unit, to_unitless, linspace
from .constants import get_unitless_constant
from ._chemreac import PyReactionDiffusion
Geom_names = {'f': 'Flat', 'c': 'Cylindrical', 's': 'Spherical'}
class ReactionDiffusionBase(object):
def to_ReactionSystem(self, substance_names=None):
substance_names = substance_names or self.substance_names
rxns = []
for ri in range(self.nr):
rxn = self.to_Reaction(ri, substance_names)
rxns.append(rxn)
kw = {}
if self.substance_latex_names:
kw['latex_name'] = self.substance_latex_names
return ReactionSystem(rxns, mk_sn_dict_from_names(substance_names, **kw))
@classmethod
def from_ReactionSystem(cls, rsys, variables=None, fields=None, **kwargs):
"""
Make a :class:`ReactionDiffusion` from a :class:`chempy.ReactionSystem`.
Parameters
----------
rsys : chempy.ReactionSystem
variables : dict
fields: optional
unit_registry : dict, optional
\\*\\*kwargs :
Keyword arguments passed on to :class:`ReactionDiffusion`
"""
if fields is not None and variables is not None:
for k in variables:
if k.startswith('doserate') or k == 'density':
warnings.warn("value for %s in variables in from_ReactionSystem possibly overriden by field" % k)
from chempy.kinetics.rates import RadiolyticBase
from chempy.units import is_unitless
mass_action_rxns = []
radiolytic_rxns = []
for rxn in rsys.rxns:
for key in chain(rxn.reac, rxn.prod, rxn.inact_reac):
if key not in rsys.substances:
raise ValueError("Unkown substance name: %s" % key)
if isinstance(rxn.param, RadiolyticBase):
radiolytic_rxns.append(rxn)
else:
mass_action_rxns.append(rxn)
# Handle radiolytic yields
if kwargs.get('unit_registry', None) is not None:
yield_unit = get_unit(kwargs['unit_registry'], 'radyield')
else:
yield_unit = 1
yields = OrderedDict()
if isinstance(fields, dict):
for k in fields.keys():
yields[k] = defaultdict(lambda: 0*yield_unit)
fields = list(fields.values())
for rxn in radiolytic_rxns:
for doserate_name, g_val in rxn.param.g_values(variables).items():
if doserate_name.startswith('doserate'):
if doserate_name.startswith('doserate_'):
doserate_name = doserate_name[9:]
else:
raise NotImplementedError("Expected doserate_name to start with 'doserate'")
if doserate_name not in yields:
yields[doserate_name] = defaultdict(lambda: 0*yield_unit)
for k in rxn.keys():
n, = rxn.net_stoich([k])
if k not in yields[doserate_name]:
yields[doserate_name][k] = n*g_val
else:
yields[doserate_name][k] += n*g_val
g_values = [rsys.as_per_substance_array(v, unit=yield_unit, raise_on_unk=True)
for v in yields.values()]
g_value_parents = []
for k in yields:
parent = None
for r in radiolytic_rxns:
if parent is None:
parent = r.reac
else:
if parent != r.reac:
raise ValueError("Mixed parents for %s" % k)
if parent == {}:
g_value_parents.append(-1)
else:
raise NotImplementedError("Concentration dependent radiolysis not supported.")
if fields is None:
# Each doserate_name gets its own field:
fields = [[variables['density']*variables[
dname if dname == 'doserate' else ('doserate_'+dname)
]] * kwargs.get('N', 1) for dname in yields]
if fields == [[]]:
fields = None
def _kwargs_updater(key, attr):
if attr in kwargs:
return
try:
kwargs[attr] = [getattr(rsys.substances[sn], key) for sn in
rsys.substances]
except AttributeError:
try:
kwargs[attr] = [rsys.substances[sn].data[key]
for sn in rsys.substances]
except KeyError:
pass
for key, attr in zip(
['D', 'mobility', 'name', 'latex_name'],
['D', 'mobility', 'substance_names',
'substance_latex_names']):
_kwargs_updater(key, attr)
rate_coeffs = [rxn.rate_expr().rate_coeff(variables) for rxn in mass_action_rxns]
if kwargs.get('unit_registry', None) is None:
assert all(is_unitless(arg) for arg in [fields, rate_coeffs, g_values] + (
[kwargs['D']] if 'D' in kwargs else []
) + ([kwargs['mobility'] if 'mobility' in kwargs else []]))
cb = ReactionDiffusion
else:
cb = ReactionDiffusion.nondimensionalisation
return cb(
rsys.ns,
[reduce(add, [[i]*rxn.reac.get(k, 0) for i, k
in enumerate(rsys.substances)]) for rxn in mass_action_rxns],
[reduce(add, [[i]*rxn.prod.get(k, 0) for i, k
in enumerate(rsys.substances)]) for rxn in mass_action_rxns],
rate_coeffs,
stoich_inact=[reduce(add, [
[i]*(0 if rxn.inact_reac is None else
rxn.inact_reac.get(k, 0)) for i, k in enumerate(rsys.substances)
]) for rxn in mass_action_rxns],
g_values=g_values,
g_value_parents=g_value_parents,
fields=fields,
**kwargs)
def _as_odesys(self, **kwargs):
from ._odesys import ODESys
return ODESys(self, **kwargs)
def to_Reaction(self, ri, substance_names=None):
"""
Convenience method for making a Reaction instance
for reaction index ri
"""
from .chemistry import Reaction
if substance_names is None:
substance_names = self.substance_names
return Reaction(
{substance_names[i]: self.stoich_active[ri].count(i) for
i in range(self.n)},
{substance_names[i]: self.stoich_prod[ri].count(i) for
i in range(self.n)},
param=self.k[ri],
inact_reac={
substance_names[i]: self.stoich_inact[ri].count(i) for
i in range(self.n)})
def alloc_fout(self):
return np.zeros(self.n*self.N)
def alloc_jout(self, banded=None, order='F', pad=None):
if banded is None:
banded = self.N > 1
if pad is None:
pad = banded
if pad is True:
pad = self.n*self.n_jac_diags
elif pad is False:
pad = 0
if order == 'C':
rpad, cpad = 0, pad
elif order == 'F':
rpad, cpad = pad, 0
else:
raise ValueError("Order must be 'C' or 'F'")
if banded:
nr = 2*(self.n*self.n_jac_diags) + 1 + rpad
nc = self.n*self.N + cpad
return np.zeros((nr, nc), order=order)
else:
return np.zeros((self.n*self.N + rpad, self.n*self.N + cpad), order=order)
def alloc_jout_compressed(self, nsat=0):
from block_diag_ilu import alloc_compressed
return alloc_compressed(self.N, self.n, self.n_jac_diags, nsat)
@property
def ny(self):
return self.N*self.n
def get_unit(unit_registry, key):
try:
return get_derived_unit(unit_registry, key)
except KeyError:
return dict(
radyield=unit_registry['amount']/get_unit(unit_registry, 'energy'),
field=get_unit(unit_registry, 'energy')/(get_unit(
unit_registry, 'time') * get_unit(unit_registry, 'length')**3)
)[key]
def g_units(unit_registry, g_value_parents):
""" Forms the unit of radiolytic yield
Parameters
----------
g_value_parents: iterable of integers
parent substance indices (-1 indicates no parent)
"""
g_units = []
for parent in g_value_parents:
if parent == -1:
g_units.append(get_unit(unit_registry, 'radyield'))
else:
g_units.append(get_unit(unit_registry, 'radyield') /
get_unit(unit_registry, 'concentration'))
return g_units
def k_units(unit_registry, reaction_orders):
return [get_unit(unit_registry, 'concentration')**(1-order)/get_unit(unit_registry, 'time')
for order in reaction_orders]
class ReactionDiffusion(PyReactionDiffusion, ReactionDiffusionBase):
"""
Object representing the numerical model, with callbacks for evaluating
derivatives and jacobian.
Instances provide the methods:
- :meth:`f`
- :meth:`dense_jac_rmaj`
- ``f(t, y, fout)``
- ``dense_jac_rmaj(t, y, jout)``
- ``dense_jac_cmaj(t, y, jout)``
- ``banded_jac_cmaj(t, y, jout)``
- ``banded_packed_jac_cmaj(t, y, jout)``
some of which are used by e.g. :func:`~chemreac.integrate.integrate_scipy`
Additional convenience attributes (not used by underlying C++ class):
- :py:attr:`substance_names`
- :py:attr:`substance_latex_names`
.. note::
only the four first arguments (up to k) are considered positional
Parameters
----------
n: integer
Number of species.
stoich_active: list of lists of integer indices
Reactant index lists per reaction.
stoich_prod: list of lists of integer indices
Product index lists per reaction.
k: 1-dimensional array
Reaction rate coefficients.
N: integer
Number of compartments (default: 1 if ``x==None`` else ``len(x)-1``).
D: sequence of floats
Diffusion coefficients (of length n).
z_chg: sequence of integers
1-dimensional array of ion charges.
mobility: sequence of floats
Mobility of ions.
x: sequence of floats or pair of flats or float, optional
Compartment boundaries (of length N+1), if x is a pair of floats it is
expanded into ``linspace(x[0], x[1], N+1)``, if x is a float it is
expanded into ``linspace(0, x, N+1)``. Default: ``1``.
stoich_inact: list of lists of integer indices
List of inactive reactant index lists per reaction.n, default: ``[]``.
geom: str (letter)
Any in 'fcs' (flat, cylindrical, spherical).
logy: bool
f and \\*_jac_\\* routines operate on log_b(concentration).
logt: bool
f and \\*_jac_\\* routines operate on log_b(time).
logx: bool
f and \\*_jac_\\* routines operate on log_b(space).
nstencil: integer
Number of points used in finite difference scheme.
lrefl: bool
Reflective left boundary (default: True).
rrefl: bool
Reflective right boundary (default: True).
auto_efield: bool
Calculate electric field from concentrations (default: False).
surf_chg: pair of floats
Total charge of surface (defaut: (0.0, 0.0)).
eps_rel: float
Relative permitivity of medium (dielectric constant).
g_values: sequence of sequences of floats
Per specie yields (amount / energy) per field type.
g_value_parents: sequence of integers (optional)
Indices of parents for each g_value. ``-1`` denotes no concentration
dependence of g_values. default: ``[-1]*len(g_values)``.
fields: sequence of sequence of floats
Per bin field strength (energy / volume / time) per field type.
May be calculated as product between density and doserate.
modulated_rxns: sequence of integers
Indicies of reactions subject to per bin modulation.
modulation: sequence of sequences of floats
Per bin modulation vectors for each index in modulated_rxns.
ilu_limit: float
Requirement on (average) diagonal dominance for performing ILU
factorization.
n_jac_diags: int
Number of diagonals to include in Jacobian. ``-1`` delegates to
environment variable ``CHEMREAC_N_JAC_DIAGS``. ``0`` makes it equal to
``nstencil - 1 / 2``.
use_log2: bool
Use base 2 for logarithmic transform instead of e.
unit_registry: dict (optional)
See ``chemreac.units.SI_base_registry`` for an example (default: None).
Attributes
----------
unit_registry: dict
"""
# not used by C++ class
extra_attrs = ['substance_names', 'substance_latex_names']
# subset of extra_attrs optionally passed by user
kwarg_attrs = ['substance_names', 'substance_latex_names']
_substance_names = None
_substance_latex_names = None
@property
def substance_names(self):
return self._substance_names or list(map(str, range(self.n)))
@substance_names.setter
def substance_names(self, names):
self._substance_names = names
@property
def substance_latex_names(self):
return self._substance_latex_names or list(map(str, range(self.n)))
@substance_latex_names.setter
def substance_latex_names(self, latex_names):
self._substance_latex_names = latex_names
def __new__(cls, n, stoich_active, stoich_prod, k, # *, i.e. last pos arg
N=0, D=None, z_chg=None,
mobility=None, x=None, stoich_inact=None, geom='f',
logy=False, logt=False, logx=False, nstencil=None,
lrefl=True, rrefl=True, auto_efield=False, surf_chg=None,
eps_rel=1.0, g_values=None, g_value_parents=None,
fields=None,
modulated_rxns=None,
modulation=None,
unit_registry=None,
ilu_limit=None,
n_jac_diags=-1,
use_log2=False,
clip_to_pos=False,
faraday_const=None,
vacuum_permittivity=None,
**kwargs):
if N == 0:
if x is None:
N = 1
else:
N = len(x)-1
nstencil = nstencil or (1 if N == 1 else 3)
if N < nstencil:
raise ValueError("N must be >= nstencil")
if z_chg is None:
z_chg = list([0]*n)
if mobility is None:
mobility = np.zeros(n)
if N > 1:
assert len(D) in (n, n*N)
assert n == len(z_chg)
assert n == len(mobility)
else:
if D is None:
D = np.zeros(n)
if x is None:
x = 1.0
try:
if len(x) == N+1:
if not monotonic(x, 1, True):
raise ValueError("x must be strictly positive monotonic")
_x = x
elif len(x) == 2:
_x = linspace(x[0], x[1], N+1)
elif len(x) == 1:
raise TypeError
else:
raise ValueError("Don't know what to do with len(x) == %d" %
len(x))
except TypeError:
_x = linspace(0, x, N+1)
if stoich_inact is None:
stoich_inact = list([[]]*len(stoich_active))
if len(stoich_inact) != len(stoich_active):
raise ValueError("length mismatch")
if len(stoich_active) != len(stoich_prod):
raise ValueError("length mismatch")
if len(stoich_active) != len(k):
raise ValueError("length mismatch")
if geom not in 'fcs':
raise ValueError("Unkown geom: %s" % geom)
if surf_chg is None:
surf_chg = (0.0, 0.0)
# Handle g_values
if g_values is None:
g_values = []
else:
if fields is not None:
assert len(g_values) == len(fields)
for gv in g_values:
assert len(gv) == n
if g_value_parents is None:
g_value_parents = [-1]*len(g_values)
else:
if not len(g_values) == len(g_value_parents):
raise ValueError("g_values and g_value_parents need to be of same length")
if fields is None:
fields = [[0.0]*N]*len(g_values)
else:
assert len(fields) == len(g_values)
for fld in fields:
assert len(fld) == N
if modulated_rxns is not None:
if len(modulated_rxns) != len(modulation):
raise ValueError("len(modulated_rxns) != len(modulation)")
if modulation is not None:
if modulated_rxns is None:
raise ValueError("modulated_rxns missing.")
if any(len(arr) != N for arr in modulation):
raise ValueError("An array in modulation of size != N")
_k = np.asarray(k)
if _k.ndim != 1:
raise ValueError("Rates vector has inproper dimension")
_minus_one = -1
rd = super(ReactionDiffusion, cls).__new__(
cls, n, stoich_active, stoich_prod, _k,
N, D, z_chg, mobility, _x, stoich_inact, 'fcs'.index(geom), logy,
logt, logx, g_values, g_value_parents, fields,
modulated_rxns or [], modulation or [], nstencil, lrefl, rrefl,
auto_efield, surf_chg, eps_rel,
faraday_const or get_unitless_constant(
unit_registry, 'Faraday_constant'),
vacuum_permittivity or get_unitless_constant(
unit_registry, 'vacuum_permittivity'),
ilu_limit=(float(os.environ.get('CHEMREAC_ILU_LIMIT', 1000)) if
ilu_limit is None else ilu_limit),
n_jac_diags=(int(os.environ.get('CHEMREAC_N_JAC_DIAGS', 1)) if
n_jac_diags is _minus_one else n_jac_diags),
use_log2=use_log2,
clip_to_pos=clip_to_pos
)
rd.unit_registry = unit_registry
for attr in cls.kwarg_attrs:
if attr in kwargs:
setattr(rd, '_' + attr, kwargs.pop(attr))
rd.param_names = kwargs.pop('param_names', None)
if kwargs:
raise KeyError("Unkown kwargs: %s" % ", ".join(kwargs))
return rd
def __reduce__(self):
args = inspect.getargspec(self.__new__).args[1:]
return (self.__class__, tuple(getattr(self, attr) for attr in args))
_prop_unit = {
'mobility': 'electrical_mobility',
'D': 'diffusion',
'fields': 'field',
'k': (k_units, 'reac_orders'), # special case
'g_values': (g_units, 'g_value_parents'),
'x': 'length',
'surf_chg': 'charge',
}
@classmethod
def nondimensionalisation(cls, n, stoich_active, stoich_prod, k, **kwargs):
""" Alternative constructor taking arguments with units """
reac_orders = map(len, stoich_active)
_k_units = k_units(kwargs['unit_registry'], reac_orders)
k_unitless = [to_unitless(kv, ku) for kv, ku in zip(k, _k_units)]
g_values_unitless = [
np.asarray([to_unitless(yld, yld_unit) for yld in gv]) for gv, yld_unit in zip(
kwargs.get('g_values', []),
g_units(kwargs['unit_registry'], kwargs.get('g_value_parents', []))
)]
for key, rep in cls._prop_unit.items():
val = kwargs.pop(key, None)
if val is not None:
if isinstance(rep, tuple):
pass
else:
kwargs[key] = to_unitless(
val, get_unit(kwargs['unit_registry'], rep))
return cls(n, stoich_active, stoich_prod, k_unitless,
g_values=g_values_unitless, **kwargs)
@property
def reac_orders(self):
return map(len, self.stoich_active)
def with_units(self, prop):
rep = self._prop_unit[prop]
if isinstance(rep, tuple):
return [v*u for v, u in zip(getattr(self, prop), rep[0](
self.unit_registry, getattr(self, rep[1])))]
else:
return np.asarray(getattr(self, prop))*get_unit(
self.unit_registry, rep)
def set_with_units(self, prop, val):
rep = self._prop_unit[prop]
if isinstance(rep, tuple):
raise NotImplementedError("Not implemented for %s" % prop)
else:
setattr(self, prop, to_unitless(val, get_unit(
self.unit_registry, rep)))
|
chemreac/chemreac
|
chemreac/core.py
|
Python
|
bsd-2-clause
| 21,344
|
[
"ChemPy"
] |
44fcbd6c0a937457f7d3b1cfe7940426ef5de11992cc99faa168d0c991be8f46
|
#!/usr/bin/env python
__author__ = 'Manuel Holtgrewe <manuel.holtgrewe@fu-berlin.de>'
import os.path
import re
import clang.cindex as ci
import app
import violations
RULE_NAMING_CONSTANT = 'naming.constant'
RULE_NAMING_STRUCT = 'naming.struct'
RULE_NAMING_UNION = 'naming.union'
RULE_NAMING_CLASS = 'naming.class'
RULE_NAMING_ENUM = 'naming.enum'
RULE_NAMING_FIELD = 'naming.field'
RULE_NAMING_ENUM_CONSTANT = 'naming.enum_constant'
RULE_NAMING_VARIABLE = 'naming.variable'
RULE_NAMING_FUNCTION = 'naming.function'
RULE_NAMING_PARAMETER = 'naming.parameter'
RULE_NAMING_VARIABLE = 'naming.variable'
RULE_NAMING_CXX_METHOD = 'naming.method'
RULE_NAMING_TYPEDEF = 'naming.typedef'
RULE_NAMING_TPL_NON_TYPE_PARAMETER = 'naming.tpl_nontype_param'
RULE_NAMING_TPL_TYPE_PARAMETER = 'naming.tpl_type_param'
RULE_NAMING_TPL_TPL_PARAMETER = 'naming.tpl_tpl_param'
RULE_NAMING_FUNCTION_TPL = 'naming.function_tpl'
RULE_NAMING_CLASS_TPL = 'naming.class_tpl'
RULE_NAMING_CLASS_TPL_SPEC = 'naming.class_tpl_spec'
RE_CONSTANT = r'^[A-Z]([_A-Z0-9])*_*$'
RE_VARIABLE = r'^_?[a-z]([_a-zA-Z0-9])*_*$'
RE_FUNCTION = r'operator.*|^_?[a-z]([_a-zA-Z0-9])*\(.*\)_*$'
RE_TYPE = r'^[A-Z]([a-zA-Z0-9])*_*$'
RE_TYPE_TEMPLATE = r'^[A-Z]([a-zA-Z0-9])*_*<.*>$'
RE_STRUCT = r'^[A-Z]([a-zA-Z0-9])*_*(<.*>)?$'
RULE_TEXTS = {
RULE_NAMING_CONSTANT: 'Constant names must be all upper-case, separated by underscores.',
RULE_NAMING_STRUCT: 'Struct names must be all camel case, starting with upper case.',
RULE_NAMING_UNION: 'Union names must be all camel case, starting with upper case.',
RULE_NAMING_CLASS: 'Class names must be all camel case, starting with upper case.',
RULE_NAMING_ENUM: 'Enum names must be all camel case, starting with upper case.',
RULE_NAMING_FIELD: 'Field names must be camel case case, starting with lower case.',
RULE_NAMING_ENUM_CONSTANT: 'Enum constant names must be all upper-case, separated by underscores.',
RULE_NAMING_VARIABLE: 'Variable names must be camel case case, starting with lower case.',
RULE_NAMING_FUNCTION: 'Function names must be camel case case, starting with lower case.',
RULE_NAMING_PARAMETER: 'Parameter names must be camel case case, starting with lower case.',
RULE_NAMING_CXX_METHOD: 'Method names must be camel case case, starting with lower case.',
RULE_NAMING_TYPEDEF: 'Typedef names must be all camel case, starting with upper case.',
RULE_NAMING_TPL_NON_TYPE_PARAMETER: 'Template non-type parameters must be all upper-case, separated by underscores.',
RULE_NAMING_TPL_TYPE_PARAMETER: 'Template type parameter names must be all camel case, starting with upper case.',
RULE_NAMING_TPL_TPL_PARAMETER: 'Template template parameter names must be all camel case, starting with upper case.',
RULE_NAMING_FUNCTION_TPL: 'Function template names must be camel case case, starting with lower case.',
RULE_NAMING_CLASS_TPL: 'Class template names must be all camel case, starting with upper case.',
RULE_NAMING_CLASS_TPL_SPEC: 'Partial specialization names must be all camel case, starting with upper case.',
}
class GenericSymbolNameRule(object):
def __init__(self, kind, regular_ex, rule_name):
self.kind = kind
self.regular_ex = regular_ex
self.rule_name = rule_name
self.visitor = None
def allowVisit(self, node):
if not app._hasFileLocation(node):
#print 'no location'
return False
if not node.displayname:
#print 'no displayname'
return False # Ignore empty symbols.
# print 'allow visit template type?', displayname, node.kind
if node.kind == self.kind:
#print 'different kind'
return True
return False
def check(self, node):
displayname = node.displayname
#print 'checking', displayname
#import pdb; pdb.set_trace()
if not re.match(self.regular_ex, displayname):
v = violations.RuleViolation(
self.rule_name, displayname, node.location.file.name,
node.location.line, node.location.column)
return [v]
return []
class VariableNameRule(object):
"""Checks variable names (in variable declarations).
The name must either be camel case (starting with lower case character) or
all upper case.
"""
def __init__(self):
self.visitor = None
def allowVisit(self, node):
if not app._hasFileLocation(node):
return False
displayname = node.displayname
if not displayname:
return False # Ignore empty symbols.
if node.kind == ci.CursorKind.VAR_DECL:
return True
return False
def check(self, node):
displayname = node.displayname
if not re.match(RE_VARIABLE, displayname) and not re.match(RE_CONSTANT, displayname):
# TODO(holtgrew): Only allow RE_CONSTANT if 'const' in declaration type.
v = violations.RuleViolation(
RULE_NAMING_VARIABLE, displayname, node.location.file.name,
node.location.line, node.location.column)
return [v]
return []
class FunctionTemplateRule(object):
"""Checks function templates.
Function template have to follow the function naming scheme. However,
libclang also exposes constructors with kind function template. The visitor
keeps a stack of current classes so we look whether the current class or
class template has the same name as the function template and allow this
besides the function naming scheme.
"""
def __init__(self):
self.visitor = None
def allowVisit(self, node):
if not app._hasFileLocation(node):
return False
displayname = node.displayname
if not displayname:
return False # Ignore empty symbols.
if node.kind == ci.CursorKind.FUNCTION_TEMPLATE:
return True
return False
def check(self, node):
displayname = node.displayname
if not re.match(RE_FUNCTION, displayname):
up_to_bracket = displayname[:displayname.find('<')]
## print 'CHECK', self.visitor.getCurrentClassName(), '!=?', up_to_bracket
if self.visitor.getCurrentClassName() != up_to_bracket:
v = violations.RuleViolation(
RULE_NAMING_FUNCTION_TPL, displayname, node.location.file.name,
node.location.line, node.location.column)
return [v]
return []
class InIncludeDirsRule(object):
"""Rule to block visiting and recursion outside include dirs."""
def __init__(self, include_dirs, exclude_dirs, source_files):
self.include_dirs = [os.path.abspath(x) for x in include_dirs]
self.source_files = [os.path.abspath(x) for x in source_files]
self.exclude_dirs = [os.path.abspath(x) for x in exclude_dirs]
self.cache = {}
def allowVisit(self, node):
"""Return True if visiting is allowed."""
if node.kind == ci.CursorKind.TRANSLATION_UNIT:
return True
if not app._hasFileLocation(node):
return False
if self.cache.has_key(node.location.file.name):
return self.cache[node.location.file.name]
# Check whether node's location is below the include directories or one
# of the source files.
filename = os.path.abspath(node.location.file.name)
result = False
for x in self.include_dirs:
if filename.startswith(x):
result = True
break
if not result:
for x in self.source_files:
if filename == x:
result = True
break
if result:
for x in self.exclude_dirs:
if filename.startswith(x):
result = False
break
self.cache[node.location.file.name] = result
return result
def allowRecurse(self, node):
"""Return True if we want to recurse below node."""
return self.allowVisit(node)
|
catkira/seqan
|
util/py_lib/seqan/pyclangcheck/rules.py
|
Python
|
bsd-3-clause
| 8,182
|
[
"VisIt"
] |
af83a1cbe6025639a56cc3b05fb19117d242a3d883a469ee18514d30a9be2bb4
|
# -*- coding: utf-8 -*-
'''
*A simple problem solver written in pure Python*
Module provides an easy way to solve various problems that require some
calculations. Lets imagine a problem formulated in
a text file using a some template language. Some places
within the problem formulation text correspond to variables
that have default values. What does `to solve a problem` mean in this context?
To solve a problem is to 1) define output template (used to render solution), 2) write code
that makes calculations on input variables, 3) setup output variables in the code and, finally, 4)
render the solution template.
With help of the solver classes the steps of getting the problem solution
can be made easily.
Solver features:
* arbitrary and independent input and output markups
used in problem formulation and solution templates.
* ability of asynchronous problem solving (Celery is required).
* heuristic testing of problem solvability.
* using all of Python computational ability to solve your problems.
* using jinja2 template language to produce dynamic parts of a problem
formulation/solution.
:Example:
Test problem. My name is Dmitry. I have 100 $.
I want to buy several papers. Each paper worth is 5 $. How many papers can
I buy?
Test problem (abstraction level) ::
test_problem_template_formulation = """
My name is {{username}}. I have {{total}} $.
I want to buy several papers. Each paper worth is {{paper_cost}}$.
How many papers can I buy?
"""
test_problem_solution_code = """
OUTPUTS['result']=INPUTS['total']/INPUTS['paper_cost']
OUTPUTS['name'] = INPUTS['username']
"""
test_problem_output_template="""
My name is {{name}} and answer is {{result}}.
"""
from solver import Task, Solver
task = Task(test_problem_template_formulation,
default_vals={'username': 'Dmitry',
'total': 100, 'paper_cost': 20},
solution_template=test_problem_output_template,
code = test_problem_solution_code
)
psolver = Solver(task)
# solve the problem
psolver.solve()
# or you can try to solve problem asynchronously instead
# if error occur .solve() method will be invoked by default.
psolver.async_solve()
#Prior to get results, check for problem solution is ready
#(This step is required, when solving problem asynchronously)
if psolver.is_solved:
task.render_outputs() # Render output template
print(task.output) # Print renedered template or do something else
'''
__all__ = ('Solver', 'Task', 'get_celery_worker_status')
import ast
import datetime
import pickle
import uuid
import warnings
from jinja2 import Environment, Template
from jinja2.exceptions import TemplateSyntaxError
from jinja2.meta import find_undeclared_variables
import six
from .expts import TemplateOutputSyntaxError, UnsolvableProblem
if six.PY3:
from io import BytesIO
else:
from StringIO import StringIO
def get_celery_worker_status():
ERROR_KEY = "ERROR"
try:
from celery import Celery
app = Celery()
app.config_from_object('celeryconf')
insp = app.control.inspect()
d = insp.stats()
if not d:
d = {ERROR_KEY: 'No running Celery workers were found.'}
except IOError as e:
from errno import errorcode
msg = "Error connecting to the backend: " + str(e)
if len(e.args) > 0 and errorcode.get(e.args[0]) == 'ECONNREFUSED':
msg += ' Check the RabbitMQ server is running.'
d = {ERROR_KEY: msg}
except ImportError as e:
d = {ERROR_KEY: str(e)}
except Exception as x:
d = {ERROR_KEY: str(x)}
return d
WORKER_STATUS_ERROR = True if get_celery_worker_status().get('ERROR', False)\
else False
class Task:
'''Base class to handle problem instance.'''
def __init__(self, content, solution_template='',
default_vals=None, code=''):
# public properties
self.content = content
self.output = ''
self.default_vals = default_vals
self.code = code
self.output_vals = {}
# private properties
self._env = Environment()
self._templateerror = False
self.solution_template = solution_template
try:
self._allvars = find_undeclared_variables(
self._env.parse(self.content))
except TemplateSyntaxError:
self._allvars = set([])
self._templateerror = True
if not bool(self._allvars):
self._allvars = set([])
self._template_output_error = False
try:
self._all_output_vars = find_undeclared_variables(
self._env.parse(self.solution_template))
except TemplateSyntaxError:
self._all_output_vars = {}
self._template_output_error = True
def render_outputs(self):
if self._template_output_error:
raise TemplateOutputSyntaxError('Check output\
template for correctness.')
if self._all_output_vars and not\
self._all_output_vars.issubset(set(self.output_vals.keys())):
warnings.warn('Not all variables defined in `output_vals`.',
SyntaxWarning)
if not self._template_output_error and not\
self._all_output_vars:
self.output = self.solution_template
else:
temp = Template(self.solution_template)
self.output = temp.render(**self.output_vals)
def is_content_valid(self):
'''Validate input template. '''
if not self.content:
return True
elif self._templateerror:
return False
else:
return True
def is_state_correct(self):
'''Validate state correctness of the problem.
A problem treated as correct problem, if:
1) Content template for a problem is valid, see `is_content_valid`\
method;
2) All variables defined in the content has default values;
'''
if not self.is_content_valid():
return False
if self._allvars:
if self._allvars.issubset(set(self.default_vals.keys())):
return True
else:
return False
else:
return True
@property
def is_solved(self):
'''returns True if `output_vals` exists otherwise False'''
return True if self.output_vals else False
class Solver:
'''Main class for getting problem solutions.'''
def_input_code_PY2 = '''
import pickle
from StringIO import StringIO
_raw_input = ''
_tmpvar = StringIO(_raw_input)
INPUTS = pickle.load(_tmpvar)
OUTPUTS = {}
'''
def_input_code_PY3 = '''
import pickle
from io import BytesIO
_raw_input = b''
_tmpvar = BytesIO(_raw_input)
INPUTS = pickle.load(_tmpvar)
OUTPUTS = {}
'''
def_input_code = def_input_code_PY3 if six.PY3 else def_input_code_PY2
def __init__(self, task, preamble='', postamble=''):
'''Create a solver instance.'''
self.task = task
self.preamble = preamble
self.postamble = postamble # NOTE: currently not used.
self._code = '# -*- coding: utf-8 -*- \n\n' + self.preamble +\
'\n\n' + Solver.def_input_code + '\n\n' +\
self.task.code + '\n\n' + self.postamble
self.created = datetime.datetime.now()
self.start = None
self.end = None
self.id = uuid.uuid4().hex
self._async_obj = None
def is_solvable(self, silent=True):
'''Heuristic test for a task solvability.
Solvable task should have either empty or compilable code.
This function tries to parse code-block of your problem with
``ast`` module
'''
if not self.task.code:
return True
retvalue = True
if silent:
try:
ast.parse(self._code)
retvalue = True
except:
retvalue = False
finally:
return retvalue
else:
ast.parse(self._code)
return retvalue
def solve(self):
'''Solve current problem.
'''
if not self.is_solvable():
raise UnsolvableProblem
if six.PY3:
a_task_serial = BytesIO()
pickle.dump(self.task.default_vals, a_task_serial)
else:
a_task_serial = StringIO()
pickle.dump(self.task.default_vals, a_task_serial)
a_task_serial.seek(0)
pcode = ast.parse(self._code, mode='exec')
transformer = ast.NodeTransformer()
newcode = transformer.visit(pcode)
for item in newcode.body:
if isinstance(item, ast.Assign):
if any(map(lambda x: getattr(x, 'id', None) == '_raw_input',
item.targets)):
if six.PY3:
item.value = ast.Bytes(a_task_serial.read())
else:
item.value = ast.Str(a_task_serial.read())
newcode = ast.fix_missing_locations(newcode)
cobj = compile(newcode, '<string>', mode='exec')
evalspace = {}
try:
self.start = datetime.datetime.now()
six.exec_(cobj, evalspace)
self.end = datetime.datetime.now()
finally:
if not self.end:
self.start = None
self.end = None
if evalspace.get('OUTPUTS'):
self.task.output_vals = evalspace.get('OUTPUTS')
def async_solve(self):
'''Try to solve a problem asynchoronously via Celery.
If Celery is not installed, the function tries
to get a solution with solve method.
'''
if WORKER_STATUS_ERROR:
self.solve()
else:
if not self._async_obj:
from tasks import solve
self.start = datetime.datetime.now()
self._async_obj = solve.delay(self)
@property
def is_solved(self):
'''Solution status. When a problem is solved returns true.
One need to check status of asynchoronous task via `is_solved`\
before getting the results.
'''
if self._async_obj and not isinstance(self._async_obj, tuple):
if self._async_obj.ready():
self.task.output_vals, self.start,\
self.end = self._async_obj.result
return True if self.end else False
@property
def total(self):
'''Total execution time in milliseconds.'''
if self.start and self.end:
return float((self.end - self.start).microseconds)/1000.0
else:
return None
|
scidam/solver
|
solver/base.py
|
Python
|
mit
| 10,955
|
[
"VisIt"
] |
b29665db56657fb20f12b840a56f22f77285289544eabcb0ab02d5f81f8bbd62
|
"""
This module contains an interface to the Adas Data File type 15 (ADF 15),
that contain Photon Emissivity Coefficients (PEC). See [1] for an example
file.
[1] http://www.adas.ac.uk/man/appxa-15.pdf
"""
import os
import _xxdata_15
# Some hard coded parameters to run src/xxdata_15/xxdata_15.for routine. The
# values have been take from src/xxdata_15/test.for, and should be OK for
# all files.
parameters = {
'nstore': 500,
'nddim': 50,
'ntdim': 40,
'ndptnl': 4,
'ndptn': 128,
'ndptnc': 256,
'ndcnct': 100,
'ndstack': 40,
'ndcmt': 2000,
}
class Adf15(object):
def __init__(self, name):
if not os.path.isfile(name):
raise IOError("no such file: '%s'" % name)
self.name = name
def read(self):
self._read_xxdata_15()
return self._convert_to_dictionary()
def _read_xxdata_15(self):
null_fds = os.open(os.devnull, os.O_RDWR)
save = os.dup(1)
os.dup2(null_fds, 1)
iunit = _xxdata_15.helper_open_file(self.name)
dsname = os.path.basename(self.name)
ret = _xxdata_15.xxdata_15(iunit, dsname, **parameters)
_xxdata_15.helper_close_file(iunit)
self._raw_return_value = ret
os.dup2(save, 1) # restore stdout
os.close(null_fds) # close the temporary fds
def _convert_to_dictionary(self):
ret = self._raw_return_value
iz0, is_, is1, esym, nptnl, nptn, nptnc, iptnla, iptna, iptnca,\
ncnct, icnctv, ncptn_stack, cptn_stack, lres, lptn, lcmt,\
lsup, nbsel, isela, cwavel, cfile, ctype, cindm, wavel,\
ispbr, isppr, isstgr, iszr, ita, ida, teta, teda, pec,\
pec_max, ncmt_stack, cmt_stack = ret
d = {}
d['nuclear_charge'] = iz0
d['charge'] = is_
d['charge+1'] = is1
d['element'] = esym.strip()
d['partition_levels'] = nptnl
d['partial_file'] = bool(lres)
d['partial_block_present'] = bool(lptn)
d['comment_text_block_present'] = bool(lcmt)
n_datablocks = nbsel
n_densities = ida
n_temperatures = ita
densities = teda
temperatures = teta
pec = pec
wavelengths = wavel
transition_types = ctype.T.reshape(-1, 8)
datablocks = []
for i in xrange(n_datablocks):
b = {}
nd, nt = n_densities[i], n_temperatures[i]
b['density'] = densities[:nd,i]
b['temperature'] = temperatures[:nt,i]
b['pec'] = pec[:nt, :nd, i]
b['wavelength'] = wavelengths[i]
b['type'] = ''.join(transition_types[i]).strip()
# convert everything to SI + eV units
b['density'] *= 1e6 # cm^-3 = 10^6 m^-3
b['pec'] *= 1e-6 # cm^3/s = 10^-6 m^3/s
b['wavelength'] *= 1e-10 # A = 10^-10 m
datablocks.append(b)
d['datablocks'] = datablocks
return d
if __name__ == '__main__':
out = Adf15('src/xxdata_15/test.dat').read()
|
ezekial4/atomic_neu
|
atomic/adf15.py
|
Python
|
mit
| 3,145
|
[
"ADF"
] |
f4f02f8b4220c71337a3f2ee53abbc8b8128736f952b47799fbb826f9ea96861
|
#!/usr/bin/env python
#
# $File: HaplodiploidMating.py $
#
# This file is part of simuPOP, a forward-time population genetics
# simulation environment. Please visit http://simupop.sourceforge.net
# for details.
#
# Copyright (C) 2004 - 2010 Bo Peng (bpeng@mdanderson.org)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This script is an example in the simuPOP user's guide. Please refer to
# the user's guide (http://simupop.sourceforge.net/manual) for a detailed
# description of this example.
#
import simuPOP as sim
pop = sim.Population(10, ploidy=sim.HAPLODIPLOID, loci=[5, 5],
infoFields=['father_idx', 'mother_idx'])
pop.setVirtualSplitter(sim.SexSplitter())
pop.evolve(
initOps=[
sim.InitSex(),
sim.InitGenotype(genotype=[0]*10, subPops=[(0, 'Male')]),
sim.InitGenotype(genotype=[1]*10+[2]*10, subPops=[(0, 'Female')])
],
preOps=sim.Dumper(structure=False),
matingScheme=sim.HaplodiploidMating(
ops=[sim.HaplodiploidGenoTransmitter(), sim.ParentsTagger()]),
postOps=sim.Dumper(structure=False),
gen = 1
)
|
BoPeng/simuPOP
|
docs/HaplodiploidMating.py
|
Python
|
gpl-2.0
| 1,668
|
[
"VisIt"
] |
916e3e2b262abe17ba736766efb1adaf8f194519a89235acd0b6caa5fa560b11
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from future import standard_library
standard_library.install_aliases()
from builtins import *
from builtins import object
from genestack_client import (AlignedReadsQC, BioMetaKeys, BowtieApplication, FilesUtil,
SpecialFolders, VariationCaller2Application, get_connection,
make_connection_parser)
# base class to create multiple files with a CLA
class BatchFilesCreator(object):
def __init__(self, cla, base_folder, friendly_name, custom_args=None):
"""
Constructor of the general batch files creator, to create multiple files from a CLA.
:param cla: a ``CLApplication`` object, wrapper for the corresponding CLA
:param base_folder: accession of the base folder where the pipeline files will be organised into subfolders
:param friendly_name: user-friendly name of the files produced by the app ; used in the on-screen statements
and in the name of the project subfolders
:param custom_args: list of custom command-line argument strings for the files. Default is ``None``
"""
self._cla = cla
self._files_util = FilesUtil(cla.connection)
self._base_folder = base_folder
self._friendly_name = friendly_name
self._custom_args = custom_args
def create_files(self, sources):
print('Creating %s files...' % self._friendly_name)
output_folder = self._files_util.create_folder(self._friendly_name, parent=self._base_folder)
output_files = []
for i, source in enumerate(sources, 1):
output = self._create_output_file(source)
self._files_util.link_file(output, output_folder)
print('Created %s file %s (%d/%d)' % (self._friendly_name, output, i, len(output)))
output_files.append(output)
return output_files
# this method can be overridden in child classes to allow for more complex file creation logic
def _create_output_file(self, source):
output = self._cla.create_file(source)
if self._custom_args:
self._cla.change_command_line_arguments(output, self._custom_args)
return output
# special class for Bowtie to replace the default reference genome
class BowtieBatchFilesCreator(BatchFilesCreator):
def __init__(self, cla, base_folder, friendly_name, custom_args=None, ref_genome=None):
BatchFilesCreator.__init__(self, cla, base_folder, friendly_name, custom_args)
self._ref_genome = ref_genome
def _create_output_file(self, source):
output = BatchFilesCreator._create_output_file(self, source)
# replace reference genome
if self._ref_genome:
self._files_util.remove_metainfo_value([output], BioMetaKeys.REFERENCE_GENOME)
self._cla.replace_file_reference(output, BioMetaKeys.REFERENCE_GENOME, None, self._ref_genome)
return output
# These CLA arguments correspond to all default options except the type of variants to look for (SNPs only).
# The easiest way to know the syntax of the command-line arguments for a specific app is to look at the "Parameters"
# metainfo field of a CLA file on Genestack that has the parameters you want.
VC_ARGUMENTS_NO_INDELS = ["--skip-indels -d 250 -m 1 -E --BCF --output-tags DP,DV,DP4,SP", "",
"--skip-variants indels --multiallelic-caller --variants-only"]
if __name__ == "__main__":
# parse script arguments
parser = make_connection_parser()
parser.add_argument('raw_reads_folder',
help='Genestack accession of the folder containing the raw reads files to process')
parser.add_argument('--name', default="New Project",
help='Name of the Genestack folder where to put the output files')
parser.add_argument('--ref-genome', help='Accession of the reference genome to use for the mapping step')
args = parser.parse_args()
project_name = args.name
print('Connecting to Genestack...')
# get connection and create output folder
connection = get_connection(args)
files_util = FilesUtil(connection)
created_files_folder = files_util.get_special_folder(SpecialFolders.CREATED)
project_folder = files_util.create_folder(project_name, parent=created_files_folder)
# create application wrappers and batch files creators
bowtie_app = BowtieApplication(connection)
mapped_qc_app = AlignedReadsQC(connection)
variant_calling_app = VariationCaller2Application(connection)
bowtie_creator = BowtieBatchFilesCreator(bowtie_app, project_folder, "Mapped Reads", ref_genome=args.ref_genome)
mapped_qc_creator = BatchFilesCreator(mapped_qc_app, project_folder, "Mapped Reads QC")
vc_creator = BatchFilesCreator(variant_calling_app, project_folder, "Variants", custom_args=VC_ARGUMENTS_NO_INDELS)
# collect files
print('Collecting raw reads...')
raw_reads = files_util.get_file_children(args.raw_reads_folder)
files_count = len(raw_reads)
print('Found %d files to process' % files_count)
# Create pipeline files
mapped_reads = bowtie_creator.create_files(raw_reads)
mapped_reads_qcs = mapped_qc_creator.create_files(mapped_reads)
vc_creator.create_files(mapped_reads)
print('All done! Your files are in the folder %s' % project_folder)
|
genestack/python-client
|
docs/source/sample_scripts/run_vc_pipeline.py
|
Python
|
mit
| 5,537
|
[
"Bowtie"
] |
f454c9e00fe0f0282ac45793b99ce8e2a14bdb70616e169c029553e7f91877ea
|
# -*- test-case-name: twisted.web.test.test_domhelpers -*-
# Copyright (c) 2001-2009 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
A library for performing interesting tasks with DOM objects.
"""
import StringIO
from twisted.web import microdom
from twisted.web.microdom import getElementsByTagName, escape, unescape
class NodeLookupError(Exception):
pass
def substitute(request, node, subs):
"""
Look through the given node's children for strings, and
attempt to do string substitution with the given parameter.
"""
for child in node.childNodes:
if hasattr(child, 'nodeValue') and child.nodeValue:
child.replaceData(0, len(child.nodeValue), child.nodeValue % subs)
substitute(request, child, subs)
def _get(node, nodeId, nodeAttrs=('id','class','model','pattern')):
"""
(internal) Get a node with the specified C{nodeId} as any of the C{class},
C{id} or C{pattern} attributes.
"""
if hasattr(node, 'hasAttributes') and node.hasAttributes():
for nodeAttr in nodeAttrs:
if (str (node.getAttribute(nodeAttr)) == nodeId):
return node
if node.hasChildNodes():
if hasattr(node.childNodes, 'length'):
length = node.childNodes.length
else:
length = len(node.childNodes)
for childNum in range(length):
result = _get(node.childNodes[childNum], nodeId)
if result: return result
def get(node, nodeId):
"""
Get a node with the specified C{nodeId} as any of the C{class},
C{id} or C{pattern} attributes. If there is no such node, raise
L{NodeLookupError}.
"""
result = _get(node, nodeId)
if result: return result
raise NodeLookupError, nodeId
def getIfExists(node, nodeId):
"""
Get a node with the specified C{nodeId} as any of the C{class},
C{id} or C{pattern} attributes. If there is no such node, return
C{None}.
"""
return _get(node, nodeId)
def getAndClear(node, nodeId):
"""Get a node with the specified C{nodeId} as any of the C{class},
C{id} or C{pattern} attributes. If there is no such node, raise
L{NodeLookupError}. Remove all child nodes before returning.
"""
result = get(node, nodeId)
if result:
clearNode(result)
return result
def clearNode(node):
"""
Remove all children from the given node.
"""
node.childNodes[:] = []
def locateNodes(nodeList, key, value, noNesting=1):
"""
Find subnodes in the given node where the given attribute
has the given value.
"""
returnList = []
if not isinstance(nodeList, type([])):
return locateNodes(nodeList.childNodes, key, value, noNesting)
for childNode in nodeList:
if not hasattr(childNode, 'getAttribute'):
continue
if str(childNode.getAttribute(key)) == value:
returnList.append(childNode)
if noNesting:
continue
returnList.extend(locateNodes(childNode, key, value, noNesting))
return returnList
def superSetAttribute(node, key, value):
if not hasattr(node, 'setAttribute'): return
node.setAttribute(key, value)
if node.hasChildNodes():
for child in node.childNodes:
superSetAttribute(child, key, value)
def superPrependAttribute(node, key, value):
if not hasattr(node, 'setAttribute'): return
old = node.getAttribute(key)
if old:
node.setAttribute(key, value+'/'+old)
else:
node.setAttribute(key, value)
if node.hasChildNodes():
for child in node.childNodes:
superPrependAttribute(child, key, value)
def superAppendAttribute(node, key, value):
if not hasattr(node, 'setAttribute'): return
old = node.getAttribute(key)
if old:
node.setAttribute(key, old + '/' + value)
else:
node.setAttribute(key, value)
if node.hasChildNodes():
for child in node.childNodes:
superAppendAttribute(child, key, value)
def gatherTextNodes(iNode, dounescape=0, joinWith=""):
"""Visit each child node and collect its text data, if any, into a string.
For example::
>>> doc=microdom.parseString('<a>1<b>2<c>3</c>4</b></a>')
>>> gatherTextNodes(doc.documentElement)
'1234'
With dounescape=1, also convert entities back into normal characters.
@return: the gathered nodes as a single string
@rtype: str
"""
gathered=[]
gathered_append=gathered.append
slice=[iNode]
while len(slice)>0:
c=slice.pop(0)
if hasattr(c, 'nodeValue') and c.nodeValue is not None:
if dounescape:
val=unescape(c.nodeValue)
else:
val=c.nodeValue
gathered_append(val)
slice[:0]=c.childNodes
return joinWith.join(gathered)
class RawText(microdom.Text):
"""This is an evil and horrible speed hack. Basically, if you have a big
chunk of XML that you want to insert into the DOM, but you don't want to
incur the cost of parsing it, you can construct one of these and insert it
into the DOM. This will most certainly only work with microdom as the API
for converting nodes to xml is different in every DOM implementation.
This could be improved by making this class a Lazy parser, so if you
inserted this into the DOM and then later actually tried to mutate this
node, it would be parsed then.
"""
def writexml(self, writer, indent="", addindent="", newl="", strip=0, nsprefixes=None, namespace=None):
writer.write("%s%s%s" % (indent, self.data, newl))
def findNodes(parent, matcher, accum=None):
if accum is None:
accum = []
if not parent.hasChildNodes():
return accum
for child in parent.childNodes:
# print child, child.nodeType, child.nodeName
if matcher(child):
accum.append(child)
findNodes(child, matcher, accum)
return accum
def findNodesShallowOnMatch(parent, matcher, recurseMatcher, accum=None):
if accum is None:
accum = []
if not parent.hasChildNodes():
return accum
for child in parent.childNodes:
# print child, child.nodeType, child.nodeName
if matcher(child):
accum.append(child)
if recurseMatcher(child):
findNodesShallowOnMatch(child, matcher, recurseMatcher, accum)
return accum
def findNodesShallow(parent, matcher, accum=None):
if accum is None:
accum = []
if not parent.hasChildNodes():
return accum
for child in parent.childNodes:
if matcher(child):
accum.append(child)
else:
findNodes(child, matcher, accum)
return accum
def findElementsWithAttributeShallow(parent, attribute):
"""
Return an iterable of the elements which are direct children of C{parent}
and which have the C{attribute} attribute.
"""
return findNodesShallow(parent,
lambda n: getattr(n, 'tagName', None) is not None and
n.hasAttribute(attribute))
def findElements(parent, matcher):
"""
Return an iterable of the elements which are children of C{parent} for
which the predicate C{matcher} returns true.
"""
return findNodes(
parent,
lambda n, matcher=matcher: getattr(n, 'tagName', None) is not None and
matcher(n))
def findElementsWithAttribute(parent, attribute, value=None):
if value:
return findElements(
parent,
lambda n, attribute=attribute, value=value:
n.hasAttribute(attribute) and n.getAttribute(attribute) == value)
else:
return findElements(
parent,
lambda n, attribute=attribute: n.hasAttribute(attribute))
def findNodesNamed(parent, name):
return findNodes(parent, lambda n, name=name: n.nodeName == name)
def writeNodeData(node, oldio):
for subnode in node.childNodes:
if hasattr(subnode, 'data'):
oldio.write(subnode.data)
else:
writeNodeData(subnode, oldio)
def getNodeText(node):
oldio = StringIO.StringIO()
writeNodeData(node, oldio)
return oldio.getvalue()
def getParents(node):
l = []
while node:
l.append(node)
node = node.parentNode
return l
def namedChildren(parent, nodeName):
"""namedChildren(parent, nodeName) -> children (not descendants) of parent
that have tagName == nodeName
"""
return [n for n in parent.childNodes if getattr(n, 'tagName', '')==nodeName]
|
sorenh/cc
|
vendor/Twisted-10.0.0/twisted/web/domhelpers.py
|
Python
|
apache-2.0
| 8,572
|
[
"VisIt"
] |
ee56b05cbab3bdcaebbf7b350f52433519123bfbe6e10916aa168bc0e10a5b36
|
"""
Artificial Intelligence for Humans
Volume 1: Fundamental Algorithms
Python Version
http://www.aifh.org
http://www.jeffheaton.com
Code repository:
https://github.com/jeffheaton/aifh
Copyright 2013 by Jeff Heaton
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
For more information on Heaton Research copyrights, licenses
and trademarks visit:
http://www.heatonresearch.com/copyright
"""
__author__ = 'jheaton'
import math
import numpy as np
from scipy.spatial import distance
class Equilateral(object):
def __init__(self, class_count, normalized_low, normalized_high):
""" Create a lookup table that will be used for both Equilateral encoding and decoding.
"""
# Allocate a matrix to hold the lookup table.
self.encoded = np.ndarray(shape=(class_count, class_count - 1), dtype=float)
# Seed the result.
self.encoded[0][0] = -1
self.encoded[1][0] = 1.0
for k in range(2, class_count):
# scale the matrix so far
r = k
f = math.sqrt(r * r - 1.0) / r
for i in range(0, k):
for j in range(0, k - 1):
self.encoded[i][j] *= f
r = -1.0 / r
for i in range(0, k):
self.encoded[i][k - 1] = r
for i in range(0, k - 1):
self.encoded[k][i] = 0.0
self.encoded[k][k - 1] = 1.0
# Scale it.
min_eq = -1
max_eq = 1
for row in range(0, len(self.encoded)):
for col in range(0, len(self.encoded[row])):
self.encoded[row][col] = ((self.encoded[row][col] - min_eq) / (max_eq - min_eq)) \
* (normalized_high - normalized_low) + normalized_low
def encode(self, class_num):
""" Provide the equilateral encoded vector for the specified class.
"""
return self.encoded[class_num]
def decode(self, vec):
""" Match the specified vector to the class that it most closely fits.
"""
min_dist = float('inf')
result = -1
for i in range(0, len(self.encoded)):
dist = distance.euclidean(vec, self.encoded[i])
if dist < min_dist:
result = i
min_dist = dist
return result
|
PeterLauris/aifh
|
vol1/python-examples/lib/aifh/equilateral.py
|
Python
|
apache-2.0
| 2,876
|
[
"VisIt"
] |
df2f09dc9d67b4b2866918f7e2b93dff74973beca528c69d334a8db3b2e9e82a
|
"""
Development tool and utility functions for testing and test data generation.
"""
# Copyright (C) 2009-2021 Thomas Aglassinger
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import fnmatch
import logging
import os
import random
from datetime import datetime, timedelta
_log = logging.getLogger("cutplace.tests")
# Most popular names in the USA according to U.S. Census Bureau, Population Division,
# Population Analysis & Evaluation Staff from 2005-11-20.
_MALE_NAMES = [
"Aaron",
"Adam",
"Adrian",
"Alan",
"Albert",
"Alberto",
"Alex",
"Alexander",
"Alfred",
"Alfredo",
"Allan",
"Allen",
"Alvin",
"Andre",
"Andrew",
"Andy",
"Angel",
"Anthony",
"Antonio",
"Armando",
"Arnold",
"Arthur",
"Barry",
"Ben",
"Benjamin",
"Bernard",
"Bill",
"Billy",
"Bob",
"Bobby",
"Brad",
"Bradley",
"Brandon",
"Brent",
"Brett",
"Brian",
"Bruce",
"Bryan",
"Byron",
"Calvin",
"Carl",
"Carlos",
"Casey",
"Cecil",
"Chad",
"Charles",
"Charlie",
"Chester",
"Chris",
"Christian",
"Christopher",
"Clarence",
"Claude",
"Clayton",
"Clifford",
"Clifton",
"Clinton",
"Clyde",
"Cody",
"Corey",
"Cory",
"Craig",
"Curtis",
"Dale",
"Dan",
"Daniel",
"Danny",
"Darrell",
"Darren",
"Darryl",
"Daryl",
"Dave",
"David",
"Dean",
"Dennis",
"Derek",
"Derrick",
"Don",
"Donald",
"Douglas",
"Duane",
"Dustin",
"Dwayne",
"Dwight",
"Earl",
"Eddie",
"Edgar",
"Eduardo",
"Edward",
"Edwin",
"Elmer",
"Enrique",
"Eric",
"Erik",
"Ernest",
"Eugene",
"Everett",
"Felix",
"Fernando",
"Floyd",
"Francis",
"Francisco",
"Frank",
"Franklin",
"Fred",
"Freddie",
"Frederick",
"Gabriel",
"Gary",
"Gene",
"George",
"Gerald",
"Gilbert",
"Glen",
"Glenn",
"Gordon",
"Greg",
"Gregory",
"Guy",
"Harold",
"Harry",
"Harvey",
"Hector",
"Henry",
"Herbert",
"Herman",
"Howard",
"Hugh",
"Ian",
"Isaac",
"Ivan",
"Jack",
"Jacob",
"Jaime",
"James",
"Jamie",
"Jared",
"Jason",
"Javier",
"Jay",
"Jeff",
"Jeffery",
"Jeffrey",
"Jeremy",
"Jerome",
"Jerry",
"Jesse",
"Jessie",
"Jesus",
"Jim",
"Jimmie",
"Jimmy",
"Joe",
"Joel",
"John",
"Johnnie",
"Johnny",
"Jon",
"Jonathan",
"Jordan",
"Jorge",
"Jose",
"Joseph",
"Joshua",
"Juan",
"Julian",
"Julio",
"Justin",
"Karl",
"Keith",
"Kelly",
"Ken",
"Kenneth",
"Kent",
"Kevin",
"Kirk",
"Kurt",
"Kyle",
"Lance",
"Larry",
"Lawrence",
"Lee",
"Leo",
"Leon",
"Leonard",
"Leroy",
"Leslie",
"Lester",
"Lewis",
"Lloyd",
"Lonnie",
"Louis",
"Luis",
"Manuel",
"Marc",
"Marcus",
"Mario",
"Marion",
"Mark",
"Marshall",
"Martin",
"Marvin",
"Mathew",
"Matthew",
"Maurice",
"Max",
"Melvin",
"Michael",
"Micheal",
"Miguel",
"Mike",
"Milton",
"Mitchell",
"Morris",
"Nathan",
"Nathaniel",
"Neil",
"Nelson",
"Nicholas",
"Norman",
"Oscar",
"Patrick",
"Paul",
"Pedro",
"Perry",
"Peter",
"Philip",
"Phillip",
"Rafael",
"Ralph",
"Ramon",
"Randall",
"Randy",
"Raul",
"Ray",
"Raymond",
"Reginald",
"Rene",
"Ricardo",
"Richard",
"Rick",
"Ricky",
"Robert",
"Roberto",
"Rodney",
"Roger",
"Roland",
"Ron",
"Ronald",
"Ronnie",
"Ross",
"Roy",
"Ruben",
"Russell",
"Ryan",
"Salvador",
"Sam",
"Samuel",
"Scott",
"Sean",
"Sergio",
"Seth",
"Shane",
"Shawn",
"Sidney",
"Stanley",
"Stephen",
"Steve",
"Steven",
"Ted",
"Terrance",
"Terrence",
"Terry",
"Theodore",
"Thomas",
"Tim",
"Timothy",
"Todd",
"Tom",
"Tommy",
"Tony",
"Tracy",
"Travis",
"Troy",
"Tyler",
"Tyrone",
"Vernon",
"Victor",
"Vincent",
"Virgil",
"Wade",
"Wallace",
"Walter",
"Warren",
"Wayne",
"Wesley",
"Willard",
"William",
"Willie",
"Zachary",
]
_FEMALE_NAMES = [
"Agnes",
"Alice",
"Alicia",
"Allison",
"Alma",
"Amanda",
"Amber",
"Amy",
"Ana",
"Andrea",
"Angela",
"Anita",
"Ann",
"Anna",
"Anne",
"Annette",
"Annie",
"April",
"Arlene",
"Ashley",
"Audrey",
"Barbara",
"Beatrice",
"Becky",
"Bernice",
"Bertha",
"Bessie",
"Beth",
"Betty",
"Beverly",
"Billie",
"Bobbie",
"Bonnie",
"Brandy",
"Brenda",
"Brittany",
"Carla",
"Carmen",
"Carol",
"Carole",
"Caroline",
"Carolyn",
"Carrie",
"Cassandra",
"Catherine",
"Cathy",
"Charlene",
"Charlotte",
"Cheryl",
"Christina",
"Christine",
"Christy",
"Cindy",
"Claire",
"Clara",
"Claudia",
"Colleen",
"Connie",
"Constance",
"Courtney",
"Crystal",
"Cynthia",
"Daisy",
"Dana",
"Danielle",
"Darlene",
"Dawn",
"Deanna",
"Debbie",
"Deborah",
"Debra",
"Delores",
"Denise",
"Diana",
"Diane",
"Dianne",
"Dolores",
"Donna",
"Dora",
"Doris",
"Dorothy",
"Edith",
"Edna",
"Eileen",
"Elaine",
"Eleanor",
"Elizabeth",
"Ella",
"Ellen",
"Elsie",
"Emily",
"Emma",
"Erica",
"Erika",
"Erin",
"Esther",
"Ethel",
"Eva",
"Evelyn",
"Felicia",
"Florence",
"Frances",
"Gail",
"Georgia",
"Geraldine",
"Gertrude",
"Gina",
"Gladys",
"Glenda",
"Gloria",
"Grace",
"Gwendolyn",
"Hazel",
"Heather",
"Heidi",
"Helen",
"Hilda",
"Holly",
"Ida",
"Irene",
"Irma",
"Jackie",
"Jacqueline",
"Jamie",
"Jane",
"Janet",
"Janice",
"Jean",
"Jeanette",
"Jeanne",
"Jennie",
"Jennifer",
"Jenny",
"Jessica",
"Jessie",
"Jill",
"Jo",
"Joan",
"Joann",
"Joanne",
"Josephine",
"Joy",
"Joyce",
"Juanita",
"Judith",
"Judy",
"Julia",
"Julie",
"June",
"Karen",
"Katherine",
"Kathleen",
"Kathryn",
"Kathy",
"Katie",
"Katrina",
"Kay",
"Kelly",
"Kim",
"Kimberly",
"Kristen",
"Kristin",
"Kristina",
"Laura",
"Lauren",
"Laurie",
"Leah",
"Lena",
"Leona",
"Leslie",
"Lillian",
"Lillie",
"Linda",
"Lisa",
"Lois",
"Loretta",
"Lori",
"Lorraine",
"Louise",
"Lucille",
"Lucy",
"Lydia",
"Lynn",
"Mabel",
"Mae",
"Marcia",
"Margaret",
"Margie",
"Maria",
"Marian",
"Marie",
"Marilyn",
"Marion",
"Marjorie",
"Marlene",
"Marsha",
"Martha",
"Mary",
"Mattie",
"Maureen",
"Maxine",
"Megan",
"Melanie",
"Melinda",
"Melissa",
"Michele",
"Michelle",
"Mildred",
"Minnie",
"Miriam",
"Misty",
"Monica",
"Myrtle",
"Nancy",
"Naomi",
"Natalie",
"Nellie",
"Nicole",
"Nina",
"Nora",
"Norma",
"Olga",
"Pamela",
"Patricia",
"Patsy",
"Paula",
"Pauline",
"Pearl",
"Peggy",
"Penny",
"Phyllis",
"Priscilla",
"Rachel",
"Ramona",
"Rebecca",
"Regina",
"Renee",
"Rhonda",
"Rita",
"Roberta",
"Robin",
"Rosa",
"Rose",
"Rosemary",
"Ruby",
"Ruth",
"Sally",
"Samantha",
"Sandra",
"Sara",
"Sarah",
"Shannon",
"Sharon",
"Sheila",
"Shelly",
"Sherri",
"Sherry",
"Shirley",
"Sonia",
"Stacey",
"Stacy",
"Stella",
"Stephanie",
"Sue",
"Susan",
"Suzanne",
"Sylvia",
"Tamara",
"Tammy",
"Tanya",
"Tara",
"Teresa",
"Terri",
"Terry",
"Thelma",
"Theresa",
"Tiffany",
"Tina",
"Toni",
"Tonya",
"Tracey",
"Tracy",
"Valerie",
"Vanessa",
"Velma",
"Vera",
"Veronica",
"Vicki",
"Vickie",
"Victoria",
"Viola",
"Violet",
"Virginia",
"Vivian",
"Wanda",
"Wendy",
"Willie",
"Wilma",
"Yolanda",
"Yvonne",
]
_SURNAMES = [
"Adams",
"Alexander",
"Allen",
"Alvarez",
"Anderson",
"Andrews",
"Armstrong",
"Arnold",
"Austin",
"Bailey",
"Baker",
"Banks",
"Barnes",
"Barnett",
"Barrett",
"Bates",
"Beck",
"Bell",
"Bennett",
"Berry",
"Bishop",
"Black",
"Bowman",
"Boyd",
"Bradley",
"Brewer",
"Brooks",
"Brown",
"Bryant",
"Burke",
"Burns",
"Burton",
"Butler",
"Byrd",
"Caldwell",
"Campbell",
"Carlson",
"Carpenter",
"Carr",
"Carroll",
"Carter",
"Castillo",
"Castro",
"Chambers",
"Chapman",
"Chavez",
"Clark",
"Cole",
"Coleman",
"Collins",
"Cook",
"Cooper",
"Cox",
"Craig",
"Crawford",
"Cruz",
"Cunningham",
"Curtis",
"Daniels",
"Davidson",
"Davis",
"Day",
"Dean",
"Diaz",
"Dixon",
"Douglas",
"Duncan",
"Dunn",
"Edwards",
"Elliott",
"Ellis",
"Evans",
"Ferguson",
"Fernandez",
"Fields",
"Fisher",
"Fleming",
"Fletcher",
"Flores",
"Ford",
"Foster",
"Fowler",
"Fox",
"Franklin",
"Frazier",
"Freeman",
"Fuller",
"Garcia",
"Gardner",
"Garrett",
"Garza",
"George",
"Gibson",
"Gilbert",
"Gomez",
"Gonzales",
"Gonzalez",
"Gordon",
"Graham",
"Grant",
"Graves",
"Gray",
"Green",
"Greene",
"Gregory",
"Griffin",
"Gutierrez",
"Hale",
"Hall",
"Hamilton",
"Hansen",
"Hanson",
"Harper",
"Harris",
"Harrison",
"Hart",
"Harvey",
"Hawkins",
"Hayes",
"Henderson",
"Henry",
"Hernandez",
"Herrera",
"Hicks",
"Hill",
"Hoffman",
"Holland",
"Holmes",
"Holt",
"Hopkins",
"Horton",
"Howard",
"Howell",
"Hudson",
"Hughes",
"Hunt",
"Hunter",
"Jackson",
"Jacobs",
"James",
"Jenkins",
"Jennings",
"Jensen",
"Jimenez",
"Johnson",
"Johnston",
"Jones",
"Jordan",
"Kelley",
"Kelly",
"Kennedy",
"Kim",
"King",
"Knight",
"Lambert",
"Lane",
"Larson",
"Lawrence",
"Lawson",
"Lee",
"Lewis",
"Little",
"Long",
"Lopez",
"Lowe",
"Lucas",
"Lynch",
"Marshall",
"Martin",
"Martinez",
"Mason",
"Matthews",
"May",
"Mccoy",
"Mcdonald",
"Mckinney",
"Medina",
"Mendoza",
"Meyer",
"Miles",
"Miller",
"Mills",
"Mitchell",
"Montgomery",
"Moore",
"Morales",
"Moreno",
"Morgan",
"Morris",
"Morrison",
"Murphy",
"Murray",
"Myers",
"Neal",
"Nelson",
"Newman",
"Nguyen",
"Nichols",
"Obrien",
"Oliver",
"Olson",
"Ortiz",
"Owens",
"Palmer",
"Parker",
"Patterson",
"Payne",
"Pearson",
"Pena",
"Perez",
"Perkins",
"Perry",
"Peters",
"Peterson",
"Phillips",
"Pierce",
"Porter",
"Powell",
"Price",
"Ramirez",
"Ramos",
"Ray",
"Reed",
"Reid",
"Reyes",
"Reynolds",
"Rhodes",
"Rice",
"Richards",
"Richardson",
"Riley",
"Rivera",
"Roberts",
"Robertson",
"Robinson",
"Rodriguez",
"Rodriquez",
"Rogers",
"Romero",
"Rose",
"Ross",
"Ruiz",
"Russell",
"Ryan",
"Sanchez",
"Sanders",
"Schmidt",
"Scott",
"Shaw",
"Shelton",
"Silva",
"Simmons",
"Simpson",
"Sims",
"Smith",
"Snyder",
"Soto",
"Spencer",
"Stanley",
"Stephens",
"Stevens",
"Stewart",
"Stone",
"Sullivan",
"Sutton",
"Taylor",
"Terry",
"Thomas",
"Thompson",
"Torres",
"Tucker",
"Turner",
"Vargas",
"Vasquez",
"Wade",
"Wagner",
"Walker",
"Wallace",
"Walters",
"Ward",
"Warren",
"Washington",
"Watkins",
"Watson",
"Watts",
"Weaver",
"Webb",
"Welch",
"Wells",
"West",
"Wheeler",
"White",
"Williams",
"Williamson",
"Willis",
"Wilson",
"Wood",
"Woods",
"Wright",
"Young",
]
def _random_datetime(randomizer, start_text="1930-01-01 00:00:00", end_text="2015-09-30 23:59:59"):
"""
A Random datetime between two datetime objects.
Based on <http://stackoverflow.com/questions/553303/generate-a-random-date-between-two-other-dates>.
"""
# TODO: Improve default time range: from (now - 110 years) to now.
time_format = "%Y-%m-%d %H:%M:%S"
start = datetime.strptime(start_text, time_format)
end = datetime.strptime(end_text, time_format)
delta_datetime = end - start
delta_in_seconds = (delta_datetime.days * 24 * 60 * 60) + delta_datetime.seconds
random_seconds = randomizer.randrange(delta_in_seconds)
return start + timedelta(seconds=random_seconds)
def random_datetime(randomizer, time_format="%Y-%m-%d %H:%M:%S"):
sometimes = _random_datetime(randomizer)
return str(sometimes.strftime(time_format))
def random_first_name(randomizer, is_male=True):
if is_male:
first_names_pool = _MALE_NAMES
else:
first_names_pool = _FEMALE_NAMES
result = randomizer.choice(first_names_pool)
return result
def random_surname(randomizer):
result = randomizer.choice(_SURNAMES)
return result
def random_name(randomizer, is_male=True):
result = "%s %s" % (random_first_name(randomizer, is_male), random_surname(randomizer))
return result
def _path_to_project_folder(folder):
assert folder is not None
result = os.getcwd()
cutplace_init_path = os.path.join("cutplace", "__init__.py")
if not os.path.exists(os.path.join(result, cutplace_init_path)):
project_folder_found = False
previous_result = None
while not project_folder_found and (result != previous_result):
previous_result = result
result = os.path.dirname(result)
project_folder_found = os.path.exists(os.path.join(result, cutplace_init_path))
if not project_folder_found:
raise IOError(
"cannot find project folder: test must run from project folder; "
+ "currently attempting to find project folder in: %r" % os.getcwd()
)
if folder is not None:
result = os.path.join(result, folder)
return result
_EXAMPLES_FOLDER_PATH = _path_to_project_folder("examples")
_TESTS_FOLDER_PATH = _path_to_project_folder("tests")
def path_to_examples_folder():
"""
Path to sub folder `folder` in 'tests' folder.
"""
return _EXAMPLES_FOLDER_PATH
def path_to_example(file_name):
"""
Path of example file ``file_name``.
"""
assert file_name
return os.path.join(_EXAMPLES_FOLDER_PATH, file_name)
def path_to_test_folder(folder):
"""
Path to sub folder `folder` in 'tests' folder.
"""
assert folder
return os.path.join(_TESTS_FOLDER_PATH, folder)
def path_to_test_file(folder, file_name):
"""
Path of file `file_name` in `folder` located in 'tests' folder.
"""
assert folder
assert file_name
result = os.path.join(path_to_test_folder(folder), file_name)
return result
def path_to_test_data(file_name):
"""
Path to test file `file_name` in 'tests/data' folder.
"""
assert file_name
return path_to_test_file("data", file_name)
def path_to_test_result(file_name):
"""
Path to test file `file_name` in 'tests/result' folder.
"""
assert file_name
return path_to_test_file("results", file_name)
def path_to_test_cid(cid_file_name):
"""
Path to test CID `cid_file_name` which has to be located in 'examples'
or 'tests/input/cids'.
"""
assert cid_file_name
result = path_to_example(cid_file_name)
if not os.path.exists(result):
result = path_to_test_file(os.path.join("data", "cids"), cid_file_name)
return result
def path_to_test_plugins():
"""
Path to folder containing test plugins.
"""
return _EXAMPLES_FOLDER_PATH
CID_CUSTOMERS_ODS_PATH = path_to_example("cid_customers.ods")
CID_CUSTOMERS_XLS_PATH = path_to_test_cid("cid_customers.xls")
CUSTOMERS_CSV_PATH = path_to_example("customers.csv")
def create_test_customer_row(customer_id, randomizer=None):
actual_randomizer = randomizer if randomizer is not None else random.Random()
gender = actual_randomizer.choice(["female", "male"])
first_name = random_first_name(actual_randomizer, gender == "male")
surname = random_surname(actual_randomizer)
if actual_randomizer.randint(0, 100) == 0:
gender = ""
date_of_birth = random_datetime(actual_randomizer, "%Y-%m-%d")
return [str(customer_id), surname, first_name, date_of_birth, gender]
def assert_fnmatches(test_case, actual_value, expected_pattern):
assert test_case is not None
assert expected_pattern is not None
test_case.assertNotEqual(None, actual_value)
if not fnmatch.fnmatch(actual_value, expected_pattern):
test_case.fail("%r must match pattern %r" % (actual_value, expected_pattern))
def assert_error_fnmatches(test_case, actual_error, expected_message_pattern):
assert test_case is not None
assert actual_error is not None
assert isinstance(actual_error, Exception)
assert expected_message_pattern is not None
actual_message = str(actual_error)
assert_fnmatches(test_case, actual_message, expected_message_pattern)
def assert_raises_and_fnmatches(test_case, expected_error, expected_message_pattern, function, *arguments):
try:
function(*arguments)
test_case.fail("function " + function.__name__ + "() must fail with " + expected_error.__name__)
except expected_error as error:
assert_error_fnmatches(test_case, error, expected_message_pattern)
def unified_newlines(text):
"""
Same as ``text`` but with newline sequences unified to ``'\n'``.
"""
assert text is not None
return text.replace("\r\n", "\n").replace("\r", "\n")
|
roskakori/cutplace
|
tests/dev_test.py
|
Python
|
lgpl-3.0
| 19,440
|
[
"Amber",
"Brian",
"CRYSTAL"
] |
0064b4e79ab930695a23807eb12d9015070843493e5af1c9b285c856c447337d
|
import numpy as np
from pyiid.adp import has_adp
from pyiid.experiments.elasticscatter.kernels.cpu_nxn import *
from pyiid.experiments.elasticscatter.kernels.cpu_flat import (
get_normalization_array as flat_norm)
__author__ = 'christopher'
def wrap_fq(atoms, qbin=.1, sum_type='fq'):
"""
Generate the reduced structure function
Parameters
----------
atoms: ase.Atoms
The atomic configuration
qbin: float
The size of the scatter vector increment
sum_type: {'fq', 'pdf'}
Which scatter array should be used for the calculation
Returns
-------
fq: np.ndarray
The reduced structure function
"""
q = atoms.get_positions().astype(np.float32)
# get scatter array
if sum_type == 'fq':
scatter_array = atoms.get_array('F(Q) scatter')
else:
scatter_array = atoms.get_array('PDF scatter')
# define scatter_q information and initialize constants
n, qmax_bin = scatter_array.shape
# Get pair coordinate distance array
d = np.zeros((n, n, 3), np.float32)
get_d_array(d, q)
# Get pair distance array
r = np.zeros((n, n), np.float32)
get_r_array(r, d)
# Get normalization array
norm = np.zeros((n, n, qmax_bin), np.float32)
get_normalization_array(norm, scatter_array)
# Get omega
omega = np.zeros((n, n, qmax_bin), np.float32)
get_omega(omega, r, qbin)
adps = has_adp(atoms)
if adps:
adps = adps.get_positions().astype(np.float32)
# get non-normalized fq
if adps is None:
get_fq_inplace(omega, norm)
fq = omega
else:
sigma = np.zeros((n, n), np.float32)
get_sigma_from_adp(sigma, adps, r, d)
tau = np.zeros((n, n, qmax_bin), np.float32)
get_tau(tau, sigma, qbin)
fq = np.zeros((n, n, qmax_bin), np.float32)
get_adp_fq(fq, omega, tau, norm)
del tau, sigma, adps
# Normalize fq
fq = np.sum(fq, axis=(0, 1), dtype=np.float64)
fq = fq.astype(np.float32)
norm2 = np.zeros((int(n * (n - 1) / 2.), qmax_bin), np.float32)
flat_norm(norm2, scatter_array, 0)
na = np.mean(norm2, axis=0, dtype=np.float32) * np.float32(n)
# na = np.mean(norm2, axis=0, dtype=np.float64) * n
old_settings = np.seterr(all='ignore')
fq = np.nan_to_num(fq / na)
np.seterr(**old_settings)
del q, d, r, norm, omega, na
return fq
def wrap_fq_grad(atoms, qbin=.1, sum_type='fq'):
"""
Generate the reduced structure function gradient
Parameters
----------
atoms: ase.Atoms
The atomic configuration
qbin: float
The size of the scatter vector increment
sum_type: {'fq', 'pdf'}
Which scatter array should be used for the calculation
Returns
-------
dfq_dq:ndarray
The reduced structure function gradient
"""
q = atoms.get_positions().astype(np.float32)
qbin = np.float32(qbin)
# get scatter array
if sum_type == 'fq':
scatter_array = atoms.get_array('F(Q) scatter')
else:
scatter_array = atoms.get_array('PDF scatter')
# define scatter_q information and initialize constants
qmax_bin = scatter_array.shape[1]
n = len(q)
# Get pair coordinate distance array
d = np.zeros((n, n, 3), np.float32)
get_d_array(d, q)
# Get pair distance array
r = np.zeros((n, n), np.float32)
get_r_array(r, d)
# Get normalization array
norm = np.zeros((n, n, qmax_bin), np.float32)
get_normalization_array(norm, scatter_array)
# Get omega
omega = np.zeros((n, n, qmax_bin), np.float32)
get_omega(omega, r, qbin)
# Get grad omega
grad_omega = np.zeros((n, n, 3, qmax_bin), np.float32)
get_grad_omega(grad_omega, omega, r, d, qbin)
adps = has_adp(atoms)
if adps:
adps = adps.get_positions().astype(np.float32)
if adps is None:
# Get grad FQ
get_grad_fq_inplace(grad_omega, norm)
grad_fq = grad_omega
else:
sigma = np.zeros((n, n), np.float32)
get_sigma_from_adp(sigma, adps, r, d)
tau = np.zeros((n, n, qmax_bin), np.float32)
get_tau(tau, sigma, qbin)
grad_tau = np.zeros((n, n, 3, qmax_bin), np.float32)
get_grad_tau(grad_tau, tau, r, d, sigma, adps, qbin)
grad_fq = np.zeros((n, n, 3, qmax_bin), np.float32)
get_adp_grad_fq(grad_fq, omega, tau, grad_omega, grad_tau, norm)
del tau, sigma, adps
# Normalize FQ
grad_fq = grad_fq.sum(1)
# '''
norm = np.zeros((int(n * (n - 1) / 2.), qmax_bin), np.float32)
flat_norm(norm, scatter_array, 0)
na = np.mean(norm, axis=0) * np.float32(n)
old_settings = np.seterr(all='ignore')
grad_fq = np.nan_to_num(grad_fq / na)
np.seterr(**old_settings)
del d, r, scatter_array, norm, omega, grad_omega
return grad_fq
def wrap_fq_dadp(atoms, qbin=.1, sum_type='fq'):
q = atoms.get_positions().astype(np.float32)
qbin = np.float32(qbin)
# get scatter array
if sum_type == 'fq':
scatter_array = atoms.get_array('F(Q) scatter')
else:
scatter_array = atoms.get_array('PDF scatter')
# define scatter_q information and initialize constants
qmax_bin = scatter_array.shape[1]
n = len(q)
adps = has_adp(atoms)
if adps:
adps = adps.get_positions().astype(np.float32)
if adps is None:
return np.zeros((n, 3, qmax_bin), np.float32)
# Get pair coordinate distance array
d = np.zeros((n, n, 3), np.float32)
get_d_array(d, q)
# Get pair distance array
r = np.zeros((n, n), np.float32)
get_r_array(r, d)
# Get normalization array
norm = np.zeros((n, n, qmax_bin), np.float32)
get_normalization_array(norm, scatter_array)
# Get omega
omega = np.zeros((n, n, qmax_bin), np.float32)
get_omega(omega, r, qbin)
sigma = np.zeros((n, n), np.float32)
get_sigma_from_adp(sigma, adps, r, d)
tau = np.zeros((n, n, qmax_bin), np.float32)
get_tau(tau, sigma, qbin)
dtau_dadp = np.zeros((n, n, 3, qmax_bin), np.float32)
get_dtau_dadp(dtau_dadp, tau, sigma, r, d, qbin)
get_dfq_dadp_inplace(dtau_dadp, omega, norm)
grad_fq = dtau_dadp
del tau, sigma, adps
# Normalize FQ
grad_fq = grad_fq.sum(1)
# '''
norm = np.zeros((int(n * (n - 1) / 2.), qmax_bin), np.float32)
flat_norm(norm, scatter_array, 0)
na = np.mean(norm, axis=0) * np.float32(n)
old_settings = np.seterr(all='ignore')
grad_fq = np.nan_to_num(grad_fq / na)
np.seterr(**old_settings)
del d, r, scatter_array, norm, omega
return grad_fq
|
CJ-Wright/pyIID
|
pyiid/experiments/elasticscatter/cpu_wrappers/nxn_cpu_wrap.py
|
Python
|
bsd-3-clause
| 6,631
|
[
"ASE"
] |
1445dbcc4a6e47a3ee592f883078ffda94448acb77ec3a0194f3a03674ff5ba6
|
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
# Standard imports
from future import standard_library
standard_library.install_aliases()
from builtins import *
import logging
# Our imports
import emission.analysis.intake.segmentation.section_segmentation_methods.smoothed_high_confidence_motion as eaisms
import emission.core.wrapper.motionactivity as ecwm
import emission.core.wrapper.location as ecwl
class SmoothedHighConfidenceMotionWithVisitTransitions(eaisms.SmoothedHighConfidenceMotion):
def create_unknown_section(self, location_points_df):
assert(len(location_points_df) > 0)
return (location_points_df.iloc[0], location_points_df.iloc[-1], ecwm.MotionTypes.UNKNOWN)
def get_section_if_applicable(self, timeseries, distance_from_start, time_query, location_points):
# We don't have any motion changes. So let's check to see if we
# have a visit transition, which will help us distinguish between
# real and fake trips.
# Yech! This feels really hacky, but if we have a really short trip,
# then we may get the visit ending message after the trip has ended.
# So let's expand the time query by 5 minutes.
# This is based on the 10:06 -> 10:07 trip from the 22 Feb test case
time_query.endTs = time_query.endTs + 5 * 60
transition_df = timeseries.get_data_df('statemachine/transition', time_query)
if len(transition_df) == 0:
logging.debug("there are no transitions, which means no visit transitions, not creating a section")
return None
if distance_from_start > self.distance_threshold:
logging.debug("found distance %s > threshold %s, returning dummy section" %
(distance_from_start, self.distance_threshold))
return self.create_unknown_section(location_points)
visit_ended_transition_df = transition_df[transition_df.transition == 14]
if len(visit_ended_transition_df) == 0:
logging.debug("there are some transitions, but none of them are visit, not creating a section")
return None
# We have a visit transition, so we have a pretty good idea that
# this is a real section. So let's create a dummy section for it and return
logging.debug("found visit transition %s, returning dummy section" % visit_ended_transition_df[["transition", "fmt_time"]])
return self.create_unknown_section(location_points)
def extend_activity_to_location(self, motion_change, location_point):
new_mc = ecwm.Motionactivity({
'type': motion_change.type,
'confidence': motion_change.confidence,
'ts': location_point.data.ts,
'local_dt': location_point.data.local_dt,
'fmt_time': location_point.data.fmt_time
})
return new_mc
def segment_into_sections(self, timeseries, distance_from_place, time_query):
"""
Determine locations within the specified time that represent segmentation points for a trip.
:param timeseries: the time series for this user
:param time_query: the range to consider for segmentation
:return: a list of tuples [(start1, end1), (start2, end2), ...] that represent the start and end of sections
in this time range. end[n] and start[n+1] are typically assumed to be adjacent.
"""
motion_changes = self.segment_into_motion_changes(timeseries, time_query)
location_points = timeseries.get_data_df("background/filtered_location", time_query)
if len(location_points) == 0:
logging.debug("There are no points in the trip. How the heck did we segment it?")
return []
if len(motion_changes) == 0:
dummy_sec = self.get_section_if_applicable(timeseries, distance_from_place,
time_query, location_points)
if dummy_sec is not None:
return [dummy_sec]
else:
return []
# Now, we know that we have location points and we have motion_changes.
section_list = []
# Sometimes, on iOS, we have no overlap between motion detection
# and location points.
# In a concrete example, the motion points are:
# 13 100 high 10 2016-02-22T15:36:06.491621-08:00
# 14 100 high 0 2016-02-22T15:36:09.353743-08:00
# 15 100 high 10 2016-02-22T15:36:13.169997-08:00
# 16 75 medium 0 2016-02-22T15:36:13.805993-08:00
# while the trip points are 2016-02-22T15:36:00 and then
# 2016-02-22T15:36:23. So there are no location points within
# that very narrow range. And there are no more motion points
# until the trip end at 15:37:35. This is because, unlike android,
# we cannot specify a sampling frequency for the motion activity
# So let us extend the first motion change to the beginning of the
# trip, and the last motion change to the end of the trip
motion_changes[0] = (self.extend_activity_to_location(motion_changes[0][0],
timeseries.df_row_to_entry("background/filtered_location",
location_points.iloc[0])),
motion_changes[0][1])
motion_changes[-1] = (motion_changes[-1][0],
self.extend_activity_to_location(motion_changes[-1][1],
timeseries.df_row_to_entry("background/filtered_location",
location_points.iloc[-1])))
for (start_motion, end_motion) in motion_changes:
logging.debug("Considering %s from %s -> %s" %
(start_motion.type, start_motion.fmt_time, end_motion.fmt_time))
# Find points that correspond to this section
raw_section_df = location_points[(location_points.ts >= start_motion.ts) &
(location_points.ts <= end_motion.ts)]
if len(raw_section_df) == 0:
logging.info("Found no location points between %s and %s" % (start_motion, end_motion))
else:
logging.debug("with iloc, section start point = %s, section end point = %s" %
(ecwl.Location(raw_section_df.iloc[0]), ecwl.Location(raw_section_df.iloc[-1])))
section_list.append((raw_section_df.iloc[0], raw_section_df.iloc[-1], start_motion.type))
# if this lack of overlap is part of an existing set of sections,
# then it is fine, because in the section segmentation code, we
# will mark it as a transition
return section_list
|
shankari/e-mission-server
|
emission/analysis/intake/segmentation/section_segmentation_methods/smoothed_high_confidence_with_visit_transitions.py
|
Python
|
bsd-3-clause
| 6,940
|
[
"VisIt"
] |
8c07c26d4e3ea6ff54ff38a9ce44b72ae4aceca919681cf4cbe6a5d6cb35914d
|
# $Id$
__author__ = 'Martin Felder'
import numpy as np
from pybrain.supervised.trainers import RPropMinusTrainer, BackpropTrainer
from pybrain.structure.modules.mixturedensity import MixtureDensityLayer
def gaussian(x, mean, stddev):
""" return value of homogenous Gaussian at given vector point
x: vector, mean: vector, stddev: scalar """
tmp = -0.5 * sum(((x-mean)/stddev)**2)
return np.exp(tmp) / (np.power(2.*np.pi, 0.5*len(x)) * stddev)
class BackpropTrainerMix(BackpropTrainer):
""" Trainer for mixture model network. See Bishop 2006, Eqn. 5.153-5.157.
Due to PyBrain conventions it is more convenient (if not pretty) to treat the
MixtureDensityLayer as having a linear transfer function, and calculate
its derivative here."""
def setData(self, dataset):
# different output dimension check
self.ds = dataset
if dataset:
assert dataset.indim == self.module.indim
assert dataset.outdim == self.module.modulesSorted[-1].nDims
def _calcDerivs(self, seq):
""" calculate derivatives assuming we have a Network with a MixtureDensityLayer as output """
assert isinstance(self.module.modulesSorted[-1], MixtureDensityLayer)
self.module.reset()
for time, sample in enumerate(seq):
input = sample[0]
self.module.inputbuffer[time] = input
self.module.forward()
error = 0
nDims = self.module.modulesSorted[-1].nDims
nGauss = self.module.modulesSorted[-1].nGaussians
for time, sample in reversed(list(enumerate(seq))):
# Should these three lines be inside this 'for' block
# or outside? I moved them inside - Jack
gamma = []
means = []
stddevs = []
dummy, target = sample
par = self.module.outputbuffer[time] # parameters for mixture
# calculate error contributions from all Gaussians in the mixture
for k in range(nGauss):
coeff = par[k]
stddevs.append(par[k+nGauss])
idxm = 2*nGauss + k*nDims
means.append(par[idxm:idxm+nDims])
gamma.append(coeff * gaussian(target, means[-1], stddevs[-1]))
# calculate error for this pattern, and posterior for target
sumg = sum(gamma)
error -= np.log(sumg)
gamma = np.array(gamma)/sumg
invvariance = 1./par[nGauss:2*nGauss]**2
invstddev = 1./np.array(stddevs)
# calculate gradient wrt. mixture coefficients
grad_c = par[0:nGauss] - gamma
# calculate gradient wrt. standard deviations
grad_m = []
grad_s = []
for k in range(nGauss):
delta = means[k]-target
grad_m.append(gamma[k]*delta*invvariance[k])
grad_s.append(-gamma[k]*(np.dot(delta,delta)*invvariance[k]*invstddev[k] - invstddev[k]))
self.module.outputerror[time] = -np.r_[grad_c,grad_s,np.array(grad_m).flatten()]
self.module.backward()
return error, 1.0
class RPropMinusTrainerMix(BackpropTrainerMix,RPropMinusTrainer):
""" RProp trainer for mixture model network. See Bishop 2006, Eqn. 5.153-5.157. """
dummy = 0
|
arabenjamin/pybrain
|
pybrain/supervised/trainers/mixturedensity.py
|
Python
|
bsd-3-clause
| 3,420
|
[
"Gaussian"
] |
f99c16e39b77480a05a6e3febbe31dbb14695535622584ca50173c0c99b84c13
|
from __future__ import absolute_import
from typing import Any, List, Set, Tuple, TypeVar, \
Union, Optional, Sequence, AbstractSet
from typing.re import Match
from django.db import models
from django.db.models.query import QuerySet
from django.conf import settings
from django.contrib.auth.models import AbstractBaseUser, UserManager, \
PermissionsMixin
from django.dispatch import receiver
from zerver.lib.cache import cache_with_key, flush_user_profile, flush_realm, \
user_profile_by_id_cache_key, user_profile_by_email_cache_key, \
generic_bulk_cached_fetch, cache_set, flush_stream, \
display_recipient_cache_key, cache_delete, \
get_stream_cache_key, active_user_dicts_in_realm_cache_key, \
active_bot_dicts_in_realm_cache_key, active_user_dict_fields, \
active_bot_dict_fields
from zerver.lib.utils import make_safe_digest, generate_random_token
from django.db import transaction
from zerver.lib.avatar import gravatar_hash, get_avatar_url
from zerver.lib.camo import get_camo_url
from django.utils import timezone
from django.contrib.sessions.models import Session
from zerver.lib.timestamp import datetime_to_timestamp
from django.db.models.signals import pre_save, post_save, post_delete
from django.core.validators import MinLengthValidator, RegexValidator
from django.utils.translation import ugettext_lazy as _
import zlib
from bitfield import BitField
from collections import defaultdict
from datetime import timedelta
import pylibmc
import re
import ujson
import logging
from six import text_type
import time
import datetime
bugdown = None # type: Any
MAX_SUBJECT_LENGTH = 60
MAX_MESSAGE_LENGTH = 10000
STREAM_NAMES = TypeVar('STREAM_NAMES', Sequence[str], AbstractSet[str])
# Doing 1000 remote cache requests to get_display_recipient is quite slow,
# so add a local cache as well as the remote cache cache.
per_request_display_recipient_cache = {} # type: Dict[int, List[Dict[text_type, Any]]]
def get_display_recipient_by_id(recipient_id, recipient_type, recipient_type_id):
## type: (int, int, int) -> Union[text_type, List[Dict[text_type, Any]]]
if recipient_id not in per_request_display_recipient_cache:
result = get_display_recipient_remote_cache(recipient_id, recipient_type, recipient_type_id)
per_request_display_recipient_cache[recipient_id] = result
return per_request_display_recipient_cache[recipient_id]
def get_display_recipient(recipient):
## type: (Recipient) -> Union[text_type, List[Dict[text_type, Any]]]
return get_display_recipient_by_id(
recipient.id,
recipient.type,
recipient.type_id
)
def flush_per_request_caches():
# type: () -> None
global per_request_display_recipient_cache
per_request_display_recipient_cache = {}
global per_request_realm_filters_cache
per_request_realm_filters_cache = {}
@cache_with_key(lambda *args: display_recipient_cache_key(args[0]),
timeout=3600*24*7)
def get_display_recipient_remote_cache(recipient_id, recipient_type, recipient_type_id):
## type: (int, int, int) -> Union[text_type, List[Dict[text_type, Any]]]
"""
returns: an appropriate object describing the recipient. For a
stream this will be the stream name as a string. For a huddle or
personal, it will be an array of dicts about each recipient.
"""
if recipient_type == Recipient.STREAM:
stream = Stream.objects.get(id=recipient_type_id)
return stream.name
# We don't really care what the ordering is, just that it's deterministic.
user_profile_list = (UserProfile.objects.filter(subscription__recipient_id=recipient_id)
.select_related()
.order_by('email'))
return [{'email': user_profile.email,
'domain': user_profile.realm.domain,
'full_name': user_profile.full_name,
'short_name': user_profile.short_name,
'id': user_profile.id,
'is_mirror_dummy': user_profile.is_mirror_dummy,} for user_profile in user_profile_list]
def completely_open(domain):
# type: (text_type) -> bool
# This domain is completely open to everyone on the internet to
# join. E-mail addresses do not need to match the domain and
# an invite from an existing user is not required.
realm = get_realm(domain)
if not realm:
return False
return not realm.invite_required and not realm.restricted_to_domain
def get_unique_open_realm():
# type: () -> Optional[Realm]
# We only return a realm if there is a unique realm and it is completely open.
realms = Realm.objects.filter(deactivated=False)
if settings.VOYAGER:
# On production installations, the "zulip.com" realm is an
# empty realm just used for system bots, so don't include it
# in this accounting.
realms = realms.exclude(domain="zulip.com")
if len(realms) != 1:
return None
realm = realms[0]
if realm.invite_required or realm.restricted_to_domain:
return None
return realm
def get_realm_emoji_cache_key(realm):
# type: (Realm) -> str
return 'realm_emoji:%s' % (realm.id,)
class Realm(models.Model):
# domain is a domain in the Internet sense. It must be structured like a
# valid email domain. We use is to restrict access, identify bots, etc.
domain = models.CharField(max_length=40, db_index=True, unique=True)
# name is the user-visible identifier for the realm. It has no required
# structure.
name = models.CharField(max_length=40, null=True)
restricted_to_domain = models.BooleanField(default=True)
invite_required = models.BooleanField(default=False)
invite_by_admins_only = models.BooleanField(default=False)
create_stream_by_admins_only = models.BooleanField(default=False)
mandatory_topics = models.BooleanField(default=False)
show_digest_email = models.BooleanField(default=True)
name_changes_disabled = models.BooleanField(default=False)
date_created = models.DateTimeField(default=timezone.now)
notifications_stream = models.ForeignKey('Stream', related_name='+', null=True, blank=True)
deactivated = models.BooleanField(default=False)
DEFAULT_NOTIFICATION_STREAM_NAME = 'announce'
def __repr__(self):
# type: () -> str
return (u"<Realm: %s %s>" % (self.domain, self.id)).encode("utf-8")
def __str__(self):
# type: () -> str
return self.__repr__()
@cache_with_key(get_realm_emoji_cache_key, timeout=3600*24*7)
def get_emoji(self):
# type: () -> Dict[str, Dict[str, str]]
return get_realm_emoji_uncached(self)
@property
def deployment(self):
# type: () -> Any # returns a Deployment from zilencer.models
try:
return self._deployments.all()[0]
except IndexError:
return None
@deployment.setter # type: ignore # https://github.com/python/mypy/issues/220
def set_deployments(self, value):
# type: (Any) -> None
self._deployments = [value] # type: Any
def get_admin_users(self):
# type: () -> List[UserProfile]
return UserProfile.objects.filter(realm=self, is_realm_admin=True,
is_active=True).select_related()
def get_active_users(self):
# type: () -> List[UserProfile]
return UserProfile.objects.filter(realm=self, is_active=True).select_related()
class Meta(object):
permissions = (
('administer', "Administer a realm"),
('api_super_user', "Can send messages as other users for mirroring"),
)
post_save.connect(flush_realm, sender=Realm)
class RealmAlias(models.Model):
realm = models.ForeignKey(Realm, null=True)
domain = models.CharField(max_length=80, db_index=True, unique=True)
# These functions should only be used on email addresses that have
# been validated via django.core.validators.validate_email
#
# Note that we need to use some care, since can you have multiple @-signs; e.g.
# "tabbott@test"@zulip.com
# is valid email address
def email_to_username(email):
# type: (text_type) -> text_type
return "@".join(email.split("@")[:-1]).lower()
# Returns the raw domain portion of the desired email address
def split_email_to_domain(email):
# type: (text_type) -> text_type
return email.split("@")[-1].lower()
# Returns the domain, potentually de-aliased, for the realm
# that this user's email is in
def resolve_email_to_domain(email):
# type: (text_type) -> text_type
domain = split_email_to_domain(email)
alias = alias_for_realm(domain)
if alias is not None:
domain = alias.realm.domain
return domain
# Is a user with the given email address allowed to be in the given realm?
# (This function does not check whether the user has been invited to the realm.
# So for invite-only realms, this is the test for whether a user can be invited,
# not whether the user can sign up currently.)
def email_allowed_for_realm(email, realm):
# type: (text_type, Realm) -> bool
# Anyone can be in an open realm
if not realm.restricted_to_domain:
return True
# Otherwise, domains must match (case-insensitively)
email_domain = resolve_email_to_domain(email)
return email_domain == realm.domain.lower()
def alias_for_realm(domain):
# type: (text_type) -> Optional[RealmAlias]
try:
return RealmAlias.objects.get(domain=domain)
except RealmAlias.DoesNotExist:
return None
def remote_user_to_email(remote_user):
# type: (text_type) -> text_type
if settings.SSO_APPEND_DOMAIN is not None:
remote_user += "@" + settings.SSO_APPEND_DOMAIN
return remote_user
class RealmEmoji(models.Model):
realm = models.ForeignKey(Realm)
# Second part of the regex (negative lookbehind) disallows names ending with one of the punctuation characters
name = models.TextField(validators=[MinLengthValidator(1),
RegexValidator(regex=r'^[0-9a-zA-Z.\-_]+(?<![.\-_])$',
message=_("Invalid characters in Emoji name"))])
# URLs start having browser compatibility problem below 2000
# characters, so 1000 seems like a safe limit.
img_url = models.URLField(max_length=1000)
class Meta(object):
unique_together = ("realm", "name")
def __str__(self):
# type: () -> str
return "<RealmEmoji(%s): %s %s>" % (self.realm.domain, self.name, self.img_url)
def get_realm_emoji_uncached(realm):
# type: (Realm) -> Dict[str, Dict[str, str]]
d = {}
for row in RealmEmoji.objects.filter(realm=realm):
d[row.name] = dict(source_url=row.img_url,
display_url=get_camo_url(row.img_url))
return d
def flush_realm_emoji(sender, **kwargs):
# type: (Any, **Any) -> None
realm = kwargs['instance'].realm
cache_set(get_realm_emoji_cache_key(realm),
get_realm_emoji_uncached(realm),
timeout=3600*24*7)
post_save.connect(flush_realm_emoji, sender=RealmEmoji)
post_delete.connect(flush_realm_emoji, sender=RealmEmoji)
class RealmFilter(models.Model):
realm = models.ForeignKey(Realm)
pattern = models.TextField()
url_format_string = models.TextField()
class Meta(object):
unique_together = ("realm", "pattern")
def __str__(self):
# type: () -> str
return "<RealmFilter(%s): %s %s>" % (self.realm.domain, self.pattern, self.url_format_string)
def get_realm_filters_cache_key(domain):
# type: (str) -> str
return 'all_realm_filters:%s' % (domain,)
# We have a per-process cache to avoid doing 1000 remote cache queries during page load
per_request_realm_filters_cache = {} # type: Dict[str, List[Tuple[str, str]]]
def realm_filters_for_domain(domain):
# type: (str) -> List[Tuple[str, str]]
domain = domain.lower()
if domain not in per_request_realm_filters_cache:
per_request_realm_filters_cache[domain] = realm_filters_for_domain_remote_cache(domain)
return per_request_realm_filters_cache[domain]
@cache_with_key(get_realm_filters_cache_key, timeout=3600*24*7)
def realm_filters_for_domain_remote_cache(domain):
# type: (str) -> List[Tuple[str, str]]
filters = []
for realm_filter in RealmFilter.objects.filter(realm=get_realm(domain)):
filters.append((realm_filter.pattern, realm_filter.url_format_string))
return filters
def all_realm_filters():
# type: () -> Dict[str, List[Tuple[str, str]]]
filters = defaultdict(list) # type: Dict[str, List[Tuple[str, str]]]
for realm_filter in RealmFilter.objects.all():
filters[realm_filter.realm.domain].append((realm_filter.pattern, realm_filter.url_format_string))
return filters
def flush_realm_filter(sender, **kwargs):
# type: (Any, **Any) -> None
realm = kwargs['instance'].realm
cache_delete(get_realm_filters_cache_key(realm.domain))
try:
per_request_realm_filters_cache.pop(realm.domain.lower())
except KeyError:
pass
post_save.connect(flush_realm_filter, sender=RealmFilter)
post_delete.connect(flush_realm_filter, sender=RealmFilter)
class UserProfile(AbstractBaseUser, PermissionsMixin):
DEFAULT_BOT = 1
# Fields from models.AbstractUser minus last_name and first_name,
# which we don't use; email is modified to make it indexed and unique.
email = models.EmailField(blank=False, db_index=True, unique=True)
is_staff = models.BooleanField(default=False)
is_active = models.BooleanField(default=True, db_index=True)
is_realm_admin = models.BooleanField(default=False, db_index=True)
is_bot = models.BooleanField(default=False, db_index=True)
bot_type = models.PositiveSmallIntegerField(null=True, db_index=True)
is_api_super_user = models.BooleanField(default=False, db_index=True)
date_joined = models.DateTimeField(default=timezone.now)
is_mirror_dummy = models.BooleanField(default=False)
bot_owner = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
USERNAME_FIELD = 'email'
MAX_NAME_LENGTH = 100
# Our custom site-specific fields
full_name = models.CharField(max_length=MAX_NAME_LENGTH)
short_name = models.CharField(max_length=MAX_NAME_LENGTH)
# pointer points to Message.id, NOT UserMessage.id.
pointer = models.IntegerField()
last_pointer_updater = models.CharField(max_length=64)
realm = models.ForeignKey(Realm)
api_key = models.CharField(max_length=32)
### Notifications settings. ###
# Stream notifications.
enable_stream_desktop_notifications = models.BooleanField(default=False)
enable_stream_sounds = models.BooleanField(default=False)
# PM + @-mention notifications.
enable_desktop_notifications = models.BooleanField(default=True)
enable_sounds = models.BooleanField(default=True)
enable_offline_email_notifications = models.BooleanField(default=True)
enable_offline_push_notifications = models.BooleanField(default=True)
enable_digest_emails = models.BooleanField(default=True)
# Old notification field superseded by existence of stream notification
# settings.
default_desktop_notifications = models.BooleanField(default=True)
###
last_reminder = models.DateTimeField(default=timezone.now, null=True)
rate_limits = models.CharField(default="", max_length=100) # comma-separated list of range:max pairs
# Default streams
default_sending_stream = models.ForeignKey('zerver.Stream', null=True, related_name='+')
default_events_register_stream = models.ForeignKey('zerver.Stream', null=True, related_name='+')
default_all_public_streams = models.BooleanField(default=False)
# UI vars
enter_sends = models.NullBooleanField(default=True)
autoscroll_forever = models.BooleanField(default=False)
left_side_userlist = models.BooleanField(default=False)
# display settings
twenty_four_hour_time = models.BooleanField(default=False)
# Hours to wait before sending another email to a user
EMAIL_REMINDER_WAITPERIOD = 24
# Minutes to wait before warning a bot owner that her bot sent a message
# to a nonexistent stream
BOT_OWNER_STREAM_ALERT_WAITPERIOD = 1
AVATAR_FROM_GRAVATAR = 'G'
AVATAR_FROM_USER = 'U'
AVATAR_FROM_SYSTEM = 'S'
AVATAR_SOURCES = (
(AVATAR_FROM_GRAVATAR, 'Hosted by Gravatar'),
(AVATAR_FROM_USER, 'Uploaded by user'),
(AVATAR_FROM_SYSTEM, 'System generated'),
)
avatar_source = models.CharField(default=AVATAR_FROM_GRAVATAR, choices=AVATAR_SOURCES, max_length=1)
TUTORIAL_WAITING = 'W'
TUTORIAL_STARTED = 'S'
TUTORIAL_FINISHED = 'F'
TUTORIAL_STATES = ((TUTORIAL_WAITING, "Waiting"),
(TUTORIAL_STARTED, "Started"),
(TUTORIAL_FINISHED, "Finished"))
tutorial_status = models.CharField(default=TUTORIAL_WAITING, choices=TUTORIAL_STATES, max_length=1)
# Contains serialized JSON of the form:
# [("step 1", true), ("step 2", false)]
# where the second element of each tuple is if the step has been
# completed.
onboarding_steps = models.TextField(default=ujson.dumps([]))
invites_granted = models.IntegerField(default=0)
invites_used = models.IntegerField(default=0)
alert_words = models.TextField(default=ujson.dumps([])) # json-serialized list of strings
# Contains serialized JSON of the form:
# [["social", "mit"], ["devel", "ios"]]
muted_topics = models.TextField(default=ujson.dumps([]))
objects = UserManager() # type: UserManager
def can_admin_user(self, target_user):
# type: (UserProfile) -> bool
"""Returns whether this user has permission to modify target_user"""
if target_user.bot_owner == self:
return True
elif self.is_realm_admin and self.realm == target_user.realm:
return True
else:
return False
def last_reminder_tzaware(self):
# type: () -> str
if self.last_reminder is not None and timezone.is_naive(self.last_reminder):
logging.warning("Loaded a user_profile.last_reminder for user %s that's not tz-aware: %s"
% (self.email, self.last_reminder))
return self.last_reminder.replace(tzinfo=timezone.utc)
return self.last_reminder
def __repr__(self):
# type: () -> str
return (u"<UserProfile: %s %s>" % (self.email, self.realm)).encode("utf-8")
def __str__(self):
# type: () -> str
return self.__repr__()
@staticmethod
def emails_from_ids(user_ids):
# type: (Sequence[int]) -> Dict[int, text_type]
rows = UserProfile.objects.filter(id__in=user_ids).values('id', 'email')
return {row['id']: row['email'] for row in rows}
def can_create_streams(self):
# type: () -> bool
if self.is_realm_admin or not self.realm.create_stream_by_admins_only:
return True
else:
return False
def receives_offline_notifications(user_profile):
# type: (UserProfile) -> bool
return ((user_profile.enable_offline_email_notifications or
user_profile.enable_offline_push_notifications) and
not user_profile.is_bot)
# Make sure we flush the UserProfile object from our remote cache
# whenever we save it.
post_save.connect(flush_user_profile, sender=UserProfile)
class PreregistrationUser(models.Model):
email = models.EmailField()
referred_by = models.ForeignKey(UserProfile, null=True)
streams = models.ManyToManyField('Stream')
invited_at = models.DateTimeField(auto_now=True)
# status: whether an object has been confirmed.
# if confirmed, set to confirmation.settings.STATUS_ACTIVE
status = models.IntegerField(default=0)
realm = models.ForeignKey(Realm, null=True)
class PushDeviceToken(models.Model):
APNS = 1
GCM = 2
KINDS = (
(APNS, 'apns'),
(GCM, 'gcm'),
)
kind = models.PositiveSmallIntegerField(choices=KINDS)
# The token is a unique device-specific token that is
# sent to us from each device:
# - APNS token if kind == APNS
# - GCM registration id if kind == GCM
token = models.CharField(max_length=4096, unique=True)
last_updated = models.DateTimeField(auto_now=True)
# The user who's device this is
user = models.ForeignKey(UserProfile, db_index=True)
# [optional] Contains the app id of the device if it is an iOS device
ios_app_id = models.TextField(null=True)
class MitUser(models.Model):
email = models.EmailField(unique=True)
# status: whether an object has been confirmed.
# if confirmed, set to confirmation.settings.STATUS_ACTIVE
status = models.IntegerField(default=0)
def generate_email_token_for_stream():
# type: () -> str
return generate_random_token(32)
class Stream(models.Model):
MAX_NAME_LENGTH = 60
name = models.CharField(max_length=MAX_NAME_LENGTH, db_index=True)
realm = models.ForeignKey(Realm, db_index=True)
invite_only = models.NullBooleanField(default=False)
# Used by the e-mail forwarder. The e-mail RFC specifies a maximum
# e-mail length of 254, and our max stream length is 30, so we
# have plenty of room for the token.
email_token = models.CharField(
max_length=32, default=generate_email_token_for_stream)
description = models.CharField(max_length=1024, default='')
date_created = models.DateTimeField(default=timezone.now)
deactivated = models.BooleanField(default=False)
def __repr__(self):
# type: () -> str
return (u"<Stream: %s>" % (self.name,)).encode("utf-8")
def __str__(self):
# type: () -> str
return self.__repr__()
def is_public(self):
# type: () -> bool
# All streams are private at MIT.
return self.realm.domain != "mit.edu" and not self.invite_only
class Meta(object):
unique_together = ("name", "realm")
@classmethod
def create(cls, name, realm):
# type: (Any, str, Realm) -> Tuple[Stream, Recipient]
stream = cls(name=name, realm=realm)
stream.save()
recipient = Recipient.objects.create(type_id=stream.id,
type=Recipient.STREAM)
return (stream, recipient)
def num_subscribers(self):
# type: () -> int
return Subscription.objects.filter(
recipient__type=Recipient.STREAM,
recipient__type_id=self.id,
user_profile__is_active=True,
active=True
).count()
# This is stream information that is sent to clients
def to_dict(self):
# type: () -> Dict[str, Any]
return dict(name=self.name,
stream_id=self.id,
description=self.description,
invite_only=self.invite_only)
post_save.connect(flush_stream, sender=Stream)
post_delete.connect(flush_stream, sender=Stream)
def valid_stream_name(name):
# type: (text_type) -> bool
return name != ""
# The Recipient table is used to map Messages to the set of users who
# received the message. It is implemented as a set of triples (id,
# type_id, type). We have 3 types of recipients: Huddles (for group
# private messages), UserProfiles (for 1:1 private messages), and
# Ttreams. The recipient table maps a globally unique recipient id
# (used by the Message table) to the type-specific unique id (the
# stream id, user_profile id, or huddle id).
class Recipient(models.Model):
type_id = models.IntegerField(db_index=True)
type = models.PositiveSmallIntegerField(db_index=True)
# Valid types are {personal, stream, huddle}
PERSONAL = 1
STREAM = 2
HUDDLE = 3
class Meta(object):
unique_together = ("type", "type_id")
# N.B. If we used Django's choice=... we would get this for free (kinda)
_type_names = {
PERSONAL: 'personal',
STREAM: 'stream',
HUDDLE: 'huddle' }
def type_name(self):
# type: () -> str
# Raises KeyError if invalid
return self._type_names[self.type]
def __repr__(self):
# type: () -> str
display_recipient = get_display_recipient(self)
return (u"<Recipient: %s (%d, %s)>" % (display_recipient, self.type_id, self.type)).encode("utf-8")
class Client(models.Model):
name = models.CharField(max_length=30, db_index=True, unique=True)
def __repr__(self):
# type: () -> str
return "<Client: %s>" % (self.name,)
get_client_cache = {} # type: Dict[str, Client]
def get_client(name):
# type: (str) -> Client
if name not in get_client_cache:
result = get_client_remote_cache(name)
get_client_cache[name] = result
return get_client_cache[name]
def get_client_cache_key(name):
# type: (str) -> str
return 'get_client:%s' % (make_safe_digest(name),)
@cache_with_key(get_client_cache_key, timeout=3600*24*7)
def get_client_remote_cache(name):
# type: (str) -> Client
(client, _) = Client.objects.get_or_create(name=name)
return client
# get_stream_backend takes either a realm id or a realm
@cache_with_key(get_stream_cache_key, timeout=3600*24*7)
def get_stream_backend(stream_name, realm):
# type: (text_type, Realm) -> Stream
if isinstance(realm, Realm):
realm_id = realm.id
else:
realm_id = realm
return Stream.objects.select_related("realm").get(
name__iexact=stream_name.strip(), realm_id=realm_id)
def get_active_streams(realm):
# type: (Realm) -> QuerySet
"""
Return all streams (including invite-only streams) that have not been deactivated.
"""
return Stream.objects.filter(realm=realm, deactivated=False)
# get_stream takes either a realm id or a realm
def get_stream(stream_name, realm):
# type: (text_type, Union[int, Realm]) -> Optional[Stream]
try:
return get_stream_backend(stream_name, realm)
except Stream.DoesNotExist:
return None
def bulk_get_streams(realm, stream_names):
# type: (Realm, STREAM_NAMES) -> Dict[text_type, Any]
if isinstance(realm, Realm):
realm_id = realm.id
else:
realm_id = realm
def fetch_streams_by_name(stream_names):
# type: (List[str]) -> List[str]
#
# This should be just
#
# Stream.objects.select_related("realm").filter(name__iexact__in=stream_names,
# realm_id=realm_id)
#
# But chaining __in and __iexact doesn't work with Django's
# ORM, so we have the following hack to construct the relevant where clause
if len(stream_names) == 0:
return []
upper_list = ", ".join(["UPPER(%s)"] * len(stream_names))
where_clause = "UPPER(zerver_stream.name::text) IN (%s)" % (upper_list,)
return get_active_streams(realm_id).select_related("realm").extra(
where=[where_clause],
params=stream_names)
return generic_bulk_cached_fetch(lambda stream_name: get_stream_cache_key(stream_name, realm),
fetch_streams_by_name,
[stream_name.lower() for stream_name in stream_names],
id_fetcher=lambda stream: stream.name.lower())
def get_recipient_cache_key(type, type_id):
# type: (int, int) -> str
return "get_recipient:%s:%s" % (type, type_id,)
@cache_with_key(get_recipient_cache_key, timeout=3600*24*7)
def get_recipient(type, type_id):
# type: (int, int) -> Recipient
return Recipient.objects.get(type_id=type_id, type=type)
def bulk_get_recipients(type, type_ids):
# type: (int, List[int]) -> Dict[int, Any]
def cache_key_function(type_id):
# type: (int) -> str
return get_recipient_cache_key(type, type_id)
def query_function(type_ids):
# type: (List[int]) -> List[Recipient]
return Recipient.objects.filter(type=type, type_id__in=type_ids)
return generic_bulk_cached_fetch(cache_key_function, query_function, type_ids,
id_fetcher=lambda recipient: recipient.type_id)
# NB: This function is currently unused, but may come in handy.
def linebreak(string):
# type: (str) -> str
return string.replace('\n\n', '<p/>').replace('\n', '<br/>')
def extract_message_dict(message_str):
# type: (str) -> Dict[str, Any]
return ujson.loads(zlib.decompress(message_str).decode("utf-8"))
def stringify_message_dict(message_dict):
# type: (Dict[Any, Any]) -> str
return zlib.compress(ujson.dumps(message_dict).encode("utf-8"))
def to_dict_cache_key_id(message_id, apply_markdown):
# type: (int, bool) -> str
return 'message_dict:%d:%d' % (message_id, apply_markdown)
def to_dict_cache_key(message, apply_markdown):
# type: (Message, bool) -> str
return to_dict_cache_key_id(message.id, apply_markdown)
class Message(models.Model):
sender = models.ForeignKey(UserProfile)
recipient = models.ForeignKey(Recipient)
subject = models.CharField(max_length=MAX_SUBJECT_LENGTH, db_index=True)
content = models.TextField()
rendered_content = models.TextField(null=True)
rendered_content_version = models.IntegerField(null=True)
pub_date = models.DateTimeField('date published', db_index=True)
sending_client = models.ForeignKey(Client)
last_edit_time = models.DateTimeField(null=True)
edit_history = models.TextField(null=True)
has_attachment = models.BooleanField(default=False, db_index=True)
has_image = models.BooleanField(default=False, db_index=True)
has_link = models.BooleanField(default=False, db_index=True)
def __repr__(self):
# type: () -> str
display_recipient = get_display_recipient(self.recipient)
return (u"<Message: %s / %s / %r>" % (display_recipient, self.subject, self.sender)).encode("utf-8")
def __str__(self):
# type: () -> str
return self.__repr__()
def get_realm(self):
# type: () -> Realm
return self.sender.realm
def render_markdown(self, content, domain=None):
# type: (str, Optional[str]) -> str
"""Return HTML for given markdown. Bugdown may add properties to the
message object such as `mentions_user_ids` and `mentions_wildcard`.
These are only on this Django object and are not saved in the
database.
"""
global bugdown
if bugdown is None:
from zerver.lib import bugdown
self.mentions_wildcard = False
self.is_me_message = False
self.mentions_user_ids = set() # type: Set[int]
self.user_ids_with_alert_words = set() # type: Set[int]
if not domain:
domain = self.sender.realm.domain
if self.sending_client.name == "zephyr_mirror" and domain == "mit.edu":
# Use slightly customized Markdown processor for content
# delivered via zephyr_mirror
domain = "mit.edu/zephyr_mirror"
rendered_content = bugdown.convert(content, domain, self)
# For /me syntax, JS can detect the is_me_message flag
# and do special rendering.
if content.startswith('/me ') and '\n' not in content:
if rendered_content.startswith('<p>') and rendered_content.endswith('</p>'):
self.is_me_message = True
return rendered_content
def set_rendered_content(self, rendered_content, save = False):
# type: (str, bool) -> bool
"""Set the content on the message.
"""
global bugdown
if bugdown is None:
from zerver.lib import bugdown
self.rendered_content = rendered_content
self.rendered_content_version = bugdown.version
if self.rendered_content is not None:
if save:
self.save_rendered_content()
return True
else:
return False
def save_rendered_content(self):
# type: () -> None
self.save(update_fields=["rendered_content", "rendered_content_version"])
def maybe_render_content(self, domain, save = False):
# type: (str, bool) -> bool
"""Render the markdown if there is no existing rendered_content"""
global bugdown
if bugdown is None:
from zerver.lib import bugdown
if Message.need_to_render_content(self.rendered_content, self.rendered_content_version):
return self.set_rendered_content(self.render_markdown(self.content, domain), save)
else:
return True
@staticmethod
def need_to_render_content(rendered_content, rendered_content_version):
# type: (str, int) -> bool
return rendered_content is None or rendered_content_version < bugdown.version
def to_dict(self, apply_markdown):
# type: (bool) -> Dict[str, Any]
return extract_message_dict(self.to_dict_json(apply_markdown))
@cache_with_key(to_dict_cache_key, timeout=3600*24)
def to_dict_json(self, apply_markdown):
# type: (bool) -> str
return stringify_message_dict(self.to_dict_uncached(apply_markdown))
def to_dict_uncached(self, apply_markdown):
# type: (bool) -> Dict[str, Any]
return Message.build_message_dict(
apply_markdown = apply_markdown,
message = self,
message_id = self.id,
last_edit_time = self.last_edit_time,
edit_history = self.edit_history,
content = self.content,
subject = self.subject,
pub_date = self.pub_date,
rendered_content = self.rendered_content,
rendered_content_version = self.rendered_content_version,
sender_id = self.sender.id,
sender_email = self.sender.email,
sender_realm_domain = self.sender.realm.domain,
sender_full_name = self.sender.full_name,
sender_short_name = self.sender.short_name,
sender_avatar_source = self.sender.avatar_source,
sender_is_mirror_dummy = self.sender.is_mirror_dummy,
sending_client_name = self.sending_client.name,
recipient_id = self.recipient.id,
recipient_type = self.recipient.type,
recipient_type_id = self.recipient.type_id,
)
@staticmethod
def build_dict_from_raw_db_row(row, apply_markdown):
# type: (Dict[str, Any], bool) -> Dict[str, Any]
'''
row is a row from a .values() call, and it needs to have
all the relevant fields populated
'''
return Message.build_message_dict(
apply_markdown = apply_markdown,
message = None,
message_id = row['id'],
last_edit_time = row['last_edit_time'],
edit_history = row['edit_history'],
content = row['content'],
subject = row['subject'],
pub_date = row['pub_date'],
rendered_content = row['rendered_content'],
rendered_content_version = row['rendered_content_version'],
sender_id = row['sender_id'],
sender_email = row['sender__email'],
sender_realm_domain = row['sender__realm__domain'],
sender_full_name = row['sender__full_name'],
sender_short_name = row['sender__short_name'],
sender_avatar_source = row['sender__avatar_source'],
sender_is_mirror_dummy = row['sender__is_mirror_dummy'],
sending_client_name = row['sending_client__name'],
recipient_id = row['recipient_id'],
recipient_type = row['recipient__type'],
recipient_type_id = row['recipient__type_id'],
)
@staticmethod
def build_message_dict(
apply_markdown,
message,
message_id,
last_edit_time,
edit_history,
content,
subject,
pub_date,
rendered_content,
rendered_content_version,
sender_id,
sender_email,
sender_realm_domain,
sender_full_name,
sender_short_name,
sender_avatar_source,
sender_is_mirror_dummy,
sending_client_name,
recipient_id,
recipient_type,
recipient_type_id,
):
# type: (bool, Message, int, datetime.datetime, str, str, str, datetime.datetime, str, int, int, str, str, str, str, str, bool, str, int, int, int) -> Dict[str, Any]
global bugdown
if bugdown is None:
from zerver.lib import bugdown
avatar_url = get_avatar_url(sender_avatar_source, sender_email)
display_recipient = get_display_recipient_by_id(
recipient_id,
recipient_type,
recipient_type_id
)
if recipient_type == Recipient.STREAM:
display_type = "stream"
elif recipient_type in (Recipient.HUDDLE, Recipient.PERSONAL):
display_type = "private"
if len(display_recipient) == 1:
# add the sender in if this isn't a message between
# someone and his self, preserving ordering
recip = {'email': sender_email,
'domain': sender_realm_domain,
'full_name': sender_full_name,
'short_name': sender_short_name,
'id': sender_id,
'is_mirror_dummy': sender_is_mirror_dummy}
if recip['email'] < display_recipient[0]['email']:
display_recipient = [recip, display_recipient[0]]
elif recip['email'] > display_recipient[0]['email']:
display_recipient = [display_recipient[0], recip]
obj = dict(
id = message_id,
sender_email = sender_email,
sender_full_name = sender_full_name,
sender_short_name = sender_short_name,
sender_domain = sender_realm_domain,
sender_id = sender_id,
type = display_type,
display_recipient = display_recipient,
recipient_id = recipient_id,
subject = subject,
timestamp = datetime_to_timestamp(pub_date),
gravatar_hash = gravatar_hash(sender_email), # Deprecated June 2013
avatar_url = avatar_url,
client = sending_client_name)
obj['subject_links'] = bugdown.subject_links(sender_realm_domain.lower(), subject)
if last_edit_time != None:
obj['last_edit_timestamp'] = datetime_to_timestamp(last_edit_time)
obj['edit_history'] = ujson.loads(edit_history)
if apply_markdown:
if Message.need_to_render_content(rendered_content, rendered_content_version):
if message is None:
# We really shouldn't be rendering objects in this method, but there is
# a scenario where we upgrade the version of bugdown and fail to run
# management commands to re-render historical messages, and then we
# need to have side effects. This method is optimized to not need full
# blown ORM objects, but the bugdown renderer is unfortunately highly
# coupled to Message, and we also need to persist the new rendered content.
# If we don't have a message object passed in, we get one here. The cost
# of going to the DB here should be overshadowed by the cost of rendering
# and updating the row.
message = Message.objects.select_related().get(id=message_id)
# It's unfortunate that we need to have side effects on the message
# in some cases.
rendered_content = message.render_markdown(content, sender_realm_domain)
message.set_rendered_content(rendered_content, True)
if rendered_content is not None:
obj['content'] = rendered_content
else:
obj['content'] = '<p>[Zulip note: Sorry, we could not understand the formatting of your message]</p>'
obj['content_type'] = 'text/html'
else:
obj['content'] = content
obj['content_type'] = 'text/x-markdown'
return obj
def to_log_dict(self):
# type: () -> Dict[str, Any]
return dict(
id = self.id,
sender_id = self.sender.id,
sender_email = self.sender.email,
sender_domain = self.sender.realm.domain,
sender_full_name = self.sender.full_name,
sender_short_name = self.sender.short_name,
sending_client = self.sending_client.name,
type = self.recipient.type_name(),
recipient = get_display_recipient(self.recipient),
subject = self.subject,
content = self.content,
timestamp = datetime_to_timestamp(self.pub_date))
@staticmethod
def get_raw_db_rows(needed_ids):
# type: (List[int]) -> List[Dict[str, Any]]
# This is a special purpose function optimized for
# callers like get_old_messages_backend().
fields = [
'id',
'subject',
'pub_date',
'last_edit_time',
'edit_history',
'content',
'rendered_content',
'rendered_content_version',
'recipient_id',
'recipient__type',
'recipient__type_id',
'sender_id',
'sending_client__name',
'sender__email',
'sender__full_name',
'sender__short_name',
'sender__realm__id',
'sender__realm__domain',
'sender__avatar_source',
'sender__is_mirror_dummy',
]
return Message.objects.filter(id__in=needed_ids).values(*fields)
@classmethod
def remove_unreachable(cls):
# type: (Any) -> None
"""Remove all Messages that are not referred to by any UserMessage."""
cls.objects.exclude(id__in = UserMessage.objects.values('message_id')).delete()
def sent_by_human(self):
# type: () -> bool
sending_client = self.sending_client.name.lower()
return (sending_client in ('zulipandroid', 'zulipios', 'zulipdesktop',
'website', 'ios', 'android')) or \
('desktop app' in sending_client)
@staticmethod
def content_has_attachment(content):
# type: (text_type) -> Match
return re.search('[/\-]user[\-_]uploads[/\.-]', content)
@staticmethod
def content_has_image(content):
# type: (text_type) -> bool
return bool(re.search('[/\-]user[\-_]uploads[/\.-]\S+\.(bmp|gif|jpg|jpeg|png|webp)', content, re.IGNORECASE))
@staticmethod
def content_has_link(content):
# type: (text_type) -> bool
return 'http://' in content or 'https://' in content or '/user_uploads' in content
def update_calculated_fields(self):
# type: () -> None
# TODO: rendered_content could also be considered a calculated field
content = self.content
self.has_attachment = bool(Message.content_has_attachment(content))
self.has_image = bool(Message.content_has_image(content))
self.has_link = bool(Message.content_has_link(content))
@receiver(pre_save, sender=Message)
def pre_save_message(sender, **kwargs):
# type: (Any, **Any) -> None
if kwargs['update_fields'] is None or "content" in kwargs['update_fields']:
message = kwargs['instance']
message.update_calculated_fields()
def get_context_for_message(message):
# type: (Message) -> List[Message]
return Message.objects.filter(
recipient_id=message.recipient_id,
subject=message.subject,
id__lt=message.id,
pub_date__gt=message.pub_date - timedelta(minutes=15),
).order_by('-id')[:10]
# Whenever a message is sent, for each user current subscribed to the
# corresponding Recipient object, we add a row to the UserMessage
# table, which has has columns (id, user profile id, message id,
# flags) indicating which messages each user has received. This table
# allows us to quickly query any user's last 1000 messages to generate
# the home view.
#
# Additionally, the flags field stores metadata like whether the user
# has read the message, starred the message, collapsed or was
# mentioned the message, etc.
#
# UserMessage is the largest table in a Zulip installation, even
# though each row is only 4 integers.
class UserMessage(models.Model):
user_profile = models.ForeignKey(UserProfile)
message = models.ForeignKey(Message)
# We're not using the archived field for now, but create it anyway
# since this table will be an unpleasant one to do schema changes
# on later
ALL_FLAGS = ['read', 'starred', 'collapsed', 'mentioned', 'wildcard_mentioned',
'summarize_in_home', 'summarize_in_stream', 'force_expand', 'force_collapse',
'has_alert_word', "historical", 'is_me_message']
flags = BitField(flags=ALL_FLAGS, default=0)
class Meta(object):
unique_together = ("user_profile", "message")
def __repr__(self):
# type: () -> str
display_recipient = get_display_recipient(self.message.recipient)
return (u"<UserMessage: %s / %s (%s)>" % (display_recipient, self.user_profile.email, self.flags_list())).encode("utf-8")
def flags_list(self):
# type: () -> List[str]
return [flag for flag in self.flags.keys() if getattr(self.flags, flag).is_set]
def parse_usermessage_flags(val):
# type: (int) -> List[str]
flags = []
mask = 1
for flag in UserMessage.ALL_FLAGS:
if val & mask:
flags.append(flag)
mask <<= 1
return flags
class Attachment(models.Model):
MAX_FILENAME_LENGTH = 100
file_name = models.CharField(max_length=MAX_FILENAME_LENGTH, db_index=True)
# path_id is a storage location agnostic representation of the path of the file.
# If the path of a file is http://localhost:9991/user_uploads/a/b/abc/temp_file.py
# then its path_id will be a/b/abc/temp_file.py.
path_id = models.TextField(db_index=True)
owner = models.ForeignKey(UserProfile)
messages = models.ManyToManyField(Message)
create_time = models.DateTimeField(default=timezone.now, db_index=True)
def __repr__(self):
# type: () -> str
return (u"<Attachment: %s>" % (self.file_name))
def is_claimed(self):
# type: () -> bool
return self.messages.count() > 0
def get_url(self):
# type: () -> str
return "/user_uploads/%s" % (self.path_id)
def get_attachments_by_owner_id(uid):
# type: (int) -> List[Attachment]
return Attachment.objects.filter(owner=uid).select_related('owner')
def get_owners_from_file_name(file_name):
# type: (str) -> List[Attachment]
# The returned vaule will list of owners since different users can upload
# same files with the same filename.
return Attachment.objects.filter(file_name=file_name).select_related('owner')
def get_old_unclaimed_attachments(weeks_ago):
# type: (int) -> List[Attachment]
delta_weeks_ago = timezone.now() - datetime.timedelta(weeks=weeks_ago)
old_attachments = Attachment.objects.filter(messages=None, create_time__lt=delta_weeks_ago)
return old_attachments
class Subscription(models.Model):
user_profile = models.ForeignKey(UserProfile)
recipient = models.ForeignKey(Recipient)
active = models.BooleanField(default=True)
in_home_view = models.NullBooleanField(default=True)
DEFAULT_STREAM_COLOR = "#c2c2c2"
color = models.CharField(max_length=10, default=DEFAULT_STREAM_COLOR)
desktop_notifications = models.BooleanField(default=True)
audible_notifications = models.BooleanField(default=True)
# Combination desktop + audible notifications superseded by the
# above.
notifications = models.BooleanField(default=False)
class Meta(object):
unique_together = ("user_profile", "recipient")
def __repr__(self):
# type: () -> str
return (u"<Subscription: %r -> %s>" % (self.user_profile, self.recipient)).encode("utf-8")
def __str__(self):
# type: () -> str
return self.__repr__()
@cache_with_key(user_profile_by_id_cache_key, timeout=3600*24*7)
def get_user_profile_by_id(uid):
# type: (int) -> UserProfile
return UserProfile.objects.select_related().get(id=uid)
@cache_with_key(user_profile_by_email_cache_key, timeout=3600*24*7)
def get_user_profile_by_email(email):
# type: (text_type) -> UserProfile
return UserProfile.objects.select_related().get(email__iexact=email.strip())
@cache_with_key(active_user_dicts_in_realm_cache_key, timeout=3600*24*7)
def get_active_user_dicts_in_realm(realm):
# type: (Realm) -> List[Dict[str, Any]]
return UserProfile.objects.filter(realm=realm, is_active=True) \
.values(*active_user_dict_fields)
@cache_with_key(active_bot_dicts_in_realm_cache_key, timeout=3600*24*7)
def get_active_bot_dicts_in_realm(realm):
# type: (Realm) -> List[Dict[str, Any]]
return UserProfile.objects.filter(realm=realm, is_active=True, is_bot=True) \
.values(*active_bot_dict_fields)
def get_owned_bot_dicts(user_profile, include_all_realm_bots_if_admin=True):
# type: (UserProfile, bool) -> List[Dict[str, Any]]
if user_profile.is_realm_admin and include_all_realm_bots_if_admin:
result = get_active_bot_dicts_in_realm(user_profile.realm)
else:
result = UserProfile.objects.filter(realm=user_profile.realm, is_active=True, is_bot=True,
bot_owner=user_profile).values(*active_bot_dict_fields)
return [{'email': botdict['email'],
'full_name': botdict['full_name'],
'api_key': botdict['api_key'],
'default_sending_stream': botdict['default_sending_stream__name'],
'default_events_register_stream': botdict['default_events_register_stream__name'],
'default_all_public_streams': botdict['default_all_public_streams'],
'owner': botdict['bot_owner__email'],
'avatar_url': get_avatar_url(botdict['avatar_source'], botdict['email']),
}
for botdict in result]
def get_prereg_user_by_email(email):
# type: (str) -> PreregistrationUser
# A user can be invited many times, so only return the result of the latest
# invite.
return PreregistrationUser.objects.filter(email__iexact=email.strip()).latest("invited_at")
# The Huddle class represents a group of individuals who have had a
# Group Private Message conversation together. The actual membership
# of the Huddle is stored in the Subscription table just like with
# Streams, and a hash of that list is stored in the huddle_hash field
# below, to support efficiently mapping from a set of users to the
# corresponding Huddle object.
class Huddle(models.Model):
# TODO: We should consider whether using
# CommaSeparatedIntegerField would be better.
huddle_hash = models.CharField(max_length=40, db_index=True, unique=True)
def get_huddle_hash(id_list):
# type: (List[int]) -> str
id_list = sorted(set(id_list))
hash_key = ",".join(str(x) for x in id_list)
return make_safe_digest(hash_key)
def huddle_hash_cache_key(huddle_hash):
# type: (str) -> str
return "huddle_by_hash:%s" % (huddle_hash,)
def get_huddle(id_list):
# type: (List[int]) -> Huddle
huddle_hash = get_huddle_hash(id_list)
return get_huddle_backend(huddle_hash, id_list)
@cache_with_key(lambda huddle_hash, id_list: huddle_hash_cache_key(huddle_hash), timeout=3600*24*7)
def get_huddle_backend(huddle_hash, id_list):
# type: (str, List[int]) -> Huddle
(huddle, created) = Huddle.objects.get_or_create(huddle_hash=huddle_hash)
if created:
with transaction.atomic():
recipient = Recipient.objects.create(type_id=huddle.id,
type=Recipient.HUDDLE)
subs_to_create = [Subscription(recipient=recipient,
user_profile=get_user_profile_by_id(user_profile_id))
for user_profile_id in id_list]
Subscription.objects.bulk_create(subs_to_create)
return huddle
def get_realm(domain):
# type: (text_type) -> Optional[Realm]
if not domain:
return None
try:
return Realm.objects.get(domain__iexact=domain.strip())
except Realm.DoesNotExist:
return None
def clear_database():
# type: () -> None
pylibmc.Client(['127.0.0.1']).flush_all()
model = None # type: Any
for model in [Message, Stream, UserProfile, Recipient,
Realm, Subscription, Huddle, UserMessage, Client,
DefaultStream]:
model.objects.all().delete()
Session.objects.all().delete()
class UserActivity(models.Model):
user_profile = models.ForeignKey(UserProfile)
client = models.ForeignKey(Client)
query = models.CharField(max_length=50, db_index=True)
count = models.IntegerField()
last_visit = models.DateTimeField('last visit')
class Meta(object):
unique_together = ("user_profile", "client", "query")
class UserActivityInterval(models.Model):
user_profile = models.ForeignKey(UserProfile)
start = models.DateTimeField('start time', db_index=True)
end = models.DateTimeField('end time', db_index=True)
class UserPresence(models.Model):
user_profile = models.ForeignKey(UserProfile)
client = models.ForeignKey(Client)
# Valid statuses
ACTIVE = 1
IDLE = 2
timestamp = models.DateTimeField('presence changed')
status = models.PositiveSmallIntegerField(default=ACTIVE)
@staticmethod
def status_to_string(status):
# type: (int) -> str
if status == UserPresence.ACTIVE:
return 'active'
elif status == UserPresence.IDLE:
return 'idle'
@staticmethod
def get_status_dict_by_realm(realm_id):
# type: (int) -> defaultdict[Any, Dict[Any, Any]]
user_statuses = defaultdict(dict) # type: defaultdict[Any, Dict[Any, Any]]
query = UserPresence.objects.filter(
user_profile__realm_id=realm_id,
user_profile__is_active=True,
user_profile__is_bot=False
).values(
'client__name',
'status',
'timestamp',
'user_profile__email',
'user_profile__id',
'user_profile__enable_offline_push_notifications',
'user_profile__is_mirror_dummy',
)
mobile_user_ids = [row['user'] for row in PushDeviceToken.objects.filter(
user__realm_id=1,
user__is_active=True,
user__is_bot=False,
).distinct("user").values("user")]
for row in query:
info = UserPresence.to_presence_dict(
client_name=row['client__name'],
status=row['status'],
dt=row['timestamp'],
push_enabled=row['user_profile__enable_offline_push_notifications'],
has_push_devices=row['user_profile__id'] in mobile_user_ids,
is_mirror_dummy=row['user_profile__is_mirror_dummy'],
)
user_statuses[row['user_profile__email']][row['client__name']] = info
return user_statuses
@staticmethod
def to_presence_dict(client_name=None, status=None, dt=None, push_enabled=None,
has_push_devices=None, is_mirror_dummy=None):
# type: (Optional[str], Optional[int], Optional[datetime.datetime], Optional[bool], Optional[bool], Optional[bool]) -> Dict[str, Any]
presence_val = UserPresence.status_to_string(status)
timestamp = datetime_to_timestamp(dt)
return dict(
client=client_name,
status=presence_val,
timestamp=timestamp,
pushable=(push_enabled and has_push_devices),
)
def to_dict(self):
# type: () -> Dict[str, Any]
return UserPresence.to_presence_dict(
client_name=self.client.name,
status=self.status,
dt=self.timestamp
)
@staticmethod
def status_from_string(status):
# type: (str) -> Optional[int]
if status == 'active':
status_val = UserPresence.ACTIVE
elif status == 'idle':
status_val = UserPresence.IDLE
else:
status_val = None
return status_val
class Meta(object):
unique_together = ("user_profile", "client")
class DefaultStream(models.Model):
realm = models.ForeignKey(Realm)
stream = models.ForeignKey(Stream)
class Meta(object):
unique_together = ("realm", "stream")
class Referral(models.Model):
user_profile = models.ForeignKey(UserProfile)
email = models.EmailField(blank=False, null=False)
timestamp = models.DateTimeField(auto_now_add=True, null=False)
# This table only gets used on Zulip Voyager instances
# For reasons of deliverability (and sending from multiple email addresses),
# we will still send from mandrill when we send things from the (staging.)zulip.com install
class ScheduledJob(models.Model):
scheduled_timestamp = models.DateTimeField(auto_now_add=False, null=False)
type = models.PositiveSmallIntegerField()
# Valid types are {email}
# for EMAIL, filter_string is recipient_email
EMAIL = 1
# JSON representation of the job's data. Be careful, as we are not relying on Django to do validation
data = models.TextField()
# Kind if like a ForeignKey, but table is determined by type.
filter_id = models.IntegerField(null=True)
filter_string = models.CharField(max_length=100)
|
Vallher/zulip
|
zerver/models.py
|
Python
|
apache-2.0
| 58,960
|
[
"VisIt"
] |
d58e85669c7932cd77d0d57fe897211e8d69a065851738c66765a211f93f7686
|
# -*- coding: utf-8 -*-
import click
import pytumblr
from exceptions import DumblrException
from httplib2 import ServerNotFoundError
from requests.exceptions import RequestException
from requests_oauthlib import OAuth1Session
class Tumblr(object):
def __init__(self, ckey, skey, oauth, oauth_s):
self.tumblr = pytumblr.TumblrRestClient(ckey,
skey,
oauth,
oauth_s)
def info(self):
"""Retrieves blog information of the token holder
Filters out all but the following:
- 'user' 'name'
"""
try:
d = self.tumblr.info()
except ServerNotFoundError as e:
raise DumblrException(str(e))
return {'name' : d['user']['name']}
def get_text_posts(self, name):
"""Retrieves all text posts from tumblr, including drafts
"""
try:
pub_posts = self.tumblr.posts(name, type='text', filter='raw')['posts']
draft_posts = self.tumblr.drafts(name, filter='raw')['posts']
except ServerNotFoundError as e:
raise DumblrException(str(e))
## drafts api call cannot filter by type
draft_posts = filter(lambda x : x['type'] == 'text', draft_posts)
## remove every key but
keeps = ['body', 'date', 'format', 'slug', 'title', 'tags', 'id']
pub_posts = [{keep : post[keep] for keep in keeps} for post in pub_posts]
draft_posts = [{keep : post[keep] for keep in keeps} for post in draft_posts]
## include state
pub_posts = map(lambda x : dict(x, state='published'), pub_posts)
draft_posts = map(lambda x : dict(x, state='draft'), draft_posts)
## escape title - it is treated specially since yaml needs to parse it
pub_posts = map(lambda x : dict(x, title=repr(x['title'])), pub_posts)
draft_posts = map(lambda x : dict(x, title=repr(x['title'])), draft_posts)
return pub_posts + draft_posts
def push_post(self, action, post):
try:
info = self.info()
if action == 'create':
## we do not include 'id'
post.pop('id')
return self.tumblr.create_text(info['name'], type='text',
**Tumblr.escape_unicode(post))
elif action == 'update':
return self.tumblr.edit_post(info['name'], type='text',
**Tumblr.escape_unicode(post))
elif action == 'delete':
return self.tumblr.delete_post(info['name'], post['id'])
return None
except ServerNotFoundError as e:
raise DumblrException(str(e))
@staticmethod
def escape_unicode(post):
"""pytumblr does not support unicode too well
"""
for k, v in post.iteritems():
if isinstance(v, basestring):
post[k] = v.encode('utf-8')
elif isinstance(v, list):
post[k] = [l.encode('utf-8') if isinstance(l, basestring) else
str(l)for l in v]
return post
@staticmethod
def oauth(ckey=None, skey=None):
"""Retrieves OAuth tokens via OAuth 1.0 protocol
returns a dictionary of retrieved tokens and application tokens
"""
urls = {
'request' : "http://www.tumblr.com/oauth/request_token",
'authorize' : "http://www.tumblr.com/oauth/authorize",
'access' : "http://www.tumblr.com/oauth/access_token"
}
if not ckey or not skey:
click.secho("Please register a Tumblr app to obtain OAuth keys")
click.secho("https://www.tumblr.com/oauth/apps", bold=True)
click.launch("https://www.tumblr.com/oauth/apps")
ckey = click.prompt("Consumer key")
skey = click.prompt("Secret key")
try:
oauth = OAuth1Session(ckey, client_secret=skey)
resp = oauth.fetch_request_token(urls['request'])
okey = resp.get('oauth_token')
osecret = resp.get('oauth_token_secret')
auth_url = oauth.authorization_url(urls['authorize'])
click.secho("Please visit the following URL and authorize the app:")
click.secho(auth_url, bold=True)
click.launch(auth_url)
resp = click.prompt("Paste the URL of the redirected page")
oauth_response = oauth.parse_authorization_response(resp)
verifier = oauth_response.get('oauth_verifier')
oauth = OAuth1Session(ckey,
client_secret=skey,
resource_owner_key=okey,
resource_owner_secret=osecret,
verifier=verifier)
tokens = oauth.fetch_access_token(urls["access"])
except RequestException as e:
raise DumblrException(str(e))
tokens.update({'consumer_key' : ckey, 'secret_key' : skey})
return tokens
|
devty1023/dumblr
|
dumblr/tumblr.py
|
Python
|
mit
| 5,213
|
[
"VisIt"
] |
9606a643dc54fb7d4608e8a3a2614e015dfdb0f272a3b9aeca77116603455372
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builds the CIFAR-10 network.
Summary of available functions:
# Compute input images and labels for training. If you would like to run
# evaluations, use inputs() instead.
inputs, labels = distorted_inputs()
# Compute inference on the model inputs to make a prediction.
predictions = inference(inputs)
# Compute the total loss of the prediction with respect to the labels.
loss = loss(predictions, labels)
# Create a graph to run one step of training with respect to the loss.
train_op = train(loss, global_step)
"""
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import re
import sys
import tarfile
from six.moves import urllib
import tensorflow as tf
#from tensorflow.models.image.cifar10 import cifar10_input
import cifar10_input
FLAGS = tf.app.flags.FLAGS
# Basic model parameters.
tf.app.flags.DEFINE_integer('batch_size', 128,"""Number of images to process in a batch.""")
tf.app.flags.DEFINE_string('data_dir', '/tmp/cifar10_data',"""Path to the CIFAR-10 data directory.""")
# Global constants describing the CIFAR-10 data set.
IMAGE_SIZE = cifar10_input.IMAGE_SIZE
NUM_CLASSES = cifar10_input.NUM_CLASSES
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL
# Constants describing the training process.
MOVING_AVERAGE_DECAY = 0.9999 # The decay to use for the moving average.
NUM_EPOCHS_PER_DECAY = 300.0 # Epochs after which learning rate decays.
LEARNING_RATE_DECAY_FACTOR = 0.075 # Learning rate decay factor.
INITIAL_LEARNING_RATE = 0.2 # Initial learning rate.
# If a model is trained with multiple GPUs, prefix all Op names with tower_name
# to differentiate the operations. Note that this prefix is removed from the
# names of the summaries when visualizing a model.
TOWER_NAME = 'tower'
DATA_URL = 'http://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz'
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measure the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.histogram_summary(tensor_name + '/activations', x)
tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
def _variable_on_cpu(name, shape, initializer):
"""Helper to create a Variable stored on CPU memory.
Args:
name: name of the variable
shape: list of ints
initializer: initializer for Variable
Returns:
Variable Tensor
"""
with tf.device('/cpu:0'):
var = tf.get_variable(name, shape, initializer=initializer)
return var
def _variable_with_weight_decay(name, shape, stddev, wd):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
"""
var = _variable_on_cpu(name, shape,
tf.truncated_normal_initializer(stddev=stddev))
if wd is not None:
weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def distorted_inputs():
"""Construct distorted input for CIFAR training using the Reader ops.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
"""
if not FLAGS.data_dir:
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
return cifar10_input.distorted_inputs(data_dir=data_dir,
batch_size=FLAGS.batch_size)
def inputs(eval_data):
"""Construct input for CIFAR evaluation using the Reader ops.
Args:
eval_data: bool, indicating if one should use the train or eval data set.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
"""
if not FLAGS.data_dir:
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
return cifar10_input.inputs(eval_data=eval_data, data_dir=data_dir,
batch_size=FLAGS.batch_size)
def inference(images):
"""Build the CIFAR-10 model.
Args:
images: Images returned from distorted_inputs() or inputs().
Returns:
Logits.
"""
# We instantiate all variables using tf.get_variable() instead of
# tf.Variable() in order to share variables across multiple GPU training runs.
# If we only ran this model on a single GPU, we could simplify this function
# by replacing all instances of tf.get_variable() with tf.Variable().
#
# conv1
with tf.variable_scope('conv1') as scope:
kernel = _variable_with_weight_decay('weights', shape=[5, 5, 3, 64],
stddev=1e-4, wd=0.0)
conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.0))
bias = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(bias, name=scope.name)
_activation_summary(conv1)
# pool1
pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
padding='SAME', name='pool1')
# norm1
norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm1')
# conv2
with tf.variable_scope('conv2') as scope:
kernel = _variable_with_weight_decay('weights', shape=[5, 5, 64, 64],
stddev=1e-4, wd=0.0)
conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.1))
bias = tf.nn.bias_add(conv, biases)
conv2 = tf.nn.relu(bias, name=scope.name)
_activation_summary(conv2)
# norm2
norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm2')
# pool2
pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1], padding='SAME', name='pool2')
# local3
with tf.variable_scope('local3') as scope:
# Move everything into depth so we can perform a single matrix multiply.
reshape = tf.reshape(pool2, [FLAGS.batch_size, -1])
dim = reshape.get_shape()[1].value
weights = _variable_with_weight_decay('weights', shape=[dim, 192],
stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [192], tf.constant_initializer(0.1))
local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)
_activation_summary(local3)
# local4
with tf.variable_scope('local4') as scope:
weights = _variable_with_weight_decay('weights', shape=[192, 96],
stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [96], tf.constant_initializer(0.1))
local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name=scope.name)
_activation_summary(local4)
# local5
with tf.variable_scope('local5') as scope:
weights = _variable_with_weight_decay('weights', shape=[96, 48],
stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [48], tf.constant_initializer(0.1))
local5 = tf.nn.relu(tf.matmul(local4, weights) + biases, name=scope.name)
_activation_summary(local5)
# softmax, i.e. softmax(WX + b)
with tf.variable_scope('softmax_linear') as scope:
weights = _variable_with_weight_decay('weights', [48, NUM_CLASSES],
stddev=1/192.0, wd=0.0)
biases = _variable_on_cpu('biases', [NUM_CLASSES],
tf.constant_initializer(0.0))
softmax_linear = tf.add(tf.matmul(local5, weights), biases, name=scope.name)
_activation_summary(softmax_linear)
return softmax_linear
def loss(logits, labels):
"""Add L2Loss to all the trainable variables.
Add summary for "Loss" and "Loss/avg".
Args:
logits: Logits from inference().
labels: Labels from distorted_inputs or inputs(). 1-D tensor
of shape [batch_size]
Returns:
Loss tensor of type float.
"""
# Calculate the average cross entropy loss across the batch.
labels = tf.cast(labels, tf.int64)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits, labels, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
# The total loss is defined as the cross entropy loss plus all of the weight
# decay terms (L2 loss).
return tf.add_n(tf.get_collection('losses'), name='total_loss')
def _add_loss_summaries(total_loss):
"""Add summaries for losses in CIFAR-10 model.
Generates moving average for all losses and associated summaries for
visualizing the performance of the network.
Args:
total_loss: Total loss from loss().
Returns:
loss_averages_op: op for generating moving averages of losses.
"""
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.scalar_summary(l.op.name +' (raw)', l)
tf.scalar_summary(l.op.name, loss_averages.average(l))
return loss_averages_op
def train(total_loss, global_step):
"""Train CIFAR-10 model.
Create an optimizer and apply to all trainable variables. Add moving
average for all trainable variables.
Args:
total_loss: Total loss from loss().
global_step: Integer Variable counting the number of training steps
processed.
Returns:
train_op: op for training.
"""
# Variables that affect learning rate.
num_batches_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / FLAGS.batch_size
decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)
# Decay the learning rate exponentially based on the number of steps.
lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE,
global_step,
decay_steps,
LEARNING_RATE_DECAY_FACTOR,
staircase=True)
tf.scalar_summary('learning_rate', lr)
# Generate moving averages of all losses and associated summaries.
loss_averages_op = _add_loss_summaries(total_loss)
# Compute gradients.
with tf.control_dependencies([loss_averages_op]):
opt = tf.train.GradientDescentOptimizer(lr)
grads = opt.compute_gradients(total_loss)
# Apply gradients.
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
# Add histograms for trainable variables.
for var in tf.trainable_variables():
tf.histogram_summary(var.op.name, var)
# Add histograms for gradients.
for grad, var in grads:
if grad is not None:
tf.histogram_summary(var.op.name + '/gradients', grad)
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(
MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
with tf.control_dependencies([apply_gradient_op, variables_averages_op]):
train_op = tf.no_op(name='train')
return train_op
def maybe_download_and_extract():
"""Download and extract the tarball from Alex's website."""
dest_directory = FLAGS.data_dir
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (filename,
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
|
neeasthana/CS498MachineLearning
|
HW8/part4/cifar10part4.py
|
Python
|
unlicense
| 13,908
|
[
"Gaussian"
] |
9749a13434b9f04e1b767434c615457b5782a926fc3f698de7a558b797f8c54f
|
# Copyright 1999 by Jeffrey Chang. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""
This module provides code to work with the enzyme.dat file from
Enzyme.
http://www.expasy.ch/enzyme/
Classes:
_Scanner Scans Enzyme data.
"""
from Bio import File
from Bio.ParserSupport import *
class _Scanner:
"""Scans Enzyme data.
Tested with:
Release 33
"""
def feed(self, handle, consumer):
"""feed(self, handle, consumer)
Feed in Enzyme data for scanning. handle is a file-like object
that contains keyword information. consumer is a Consumer
object that will receive events as the report is scanned.
"""
if isinstance(handle, File.UndoHandle):
uhandle = handle
else:
uhandle = File.UndoHandle(handle)
while not is_blank_line(uhandle.peekline()): # Am I done yet?
self._scan_record(uhandle, consumer)
def _scan_record(self, uhandle, consumer):
# The first record is just copyright information embedded in
# comments. Check to see if I'm at the first record. If so,
# then just scan the comments and the terminator.
consumer.start_record()
line = uhandle.peekline()
if line[:2] == 'CC':
self._scan_cc(uhandle, consumer)
self._scan_terminator(uhandle, consumer)
else:
for fn in self._scan_fns:
fn(self, uhandle, consumer)
consumer.end_record()
def _scan_line(self, line_type, uhandle, event_fn,
exactly_one=None, one_or_more=None, any_number=None,
up_to_one=None):
# Callers must set exactly one of exactly_one, one_or_more, or
# any_number to a true value. I do not explicitly check to
# make sure this function is called correctly.
# This does not guarantee any parameter safety, but I
# like the readability. The other strategy I tried was have
# parameters min_lines, max_lines.
if exactly_one or one_or_more:
read_and_call(uhandle, event_fn, start=line_type)
if one_or_more or any_number:
while 1:
if not attempt_read_and_call(uhandle, event_fn,
start=line_type):
break
if up_to_one:
attempt_read_and_call(uhandle, event_fn, start=line_type)
def _scan_id(self, uhandle, consumer):
self._scan_line('ID', uhandle, consumer.identification, exactly_one=1)
def _scan_de(self, uhandle, consumer):
self._scan_line('DE', uhandle, consumer.description, one_or_more=1)
def _scan_an(self, uhandle, consumer):
self._scan_line('AN', uhandle, consumer.alternate_name, any_number=1)
def _scan_ca(self, uhandle, consumer):
self._scan_line('CA', uhandle, consumer.catalytic_activity,
any_number=1)
def _scan_cf(self, uhandle, consumer):
self._scan_line('CF', uhandle, consumer.cofactor, any_number=1)
def _scan_cc(self, uhandle, consumer):
self._scan_line('CC', uhandle, consumer.comment, any_number=1)
def _scan_di(self, uhandle, consumer):
self._scan_line('DI', uhandle, consumer.disease, any_number=1)
def _scan_pr(self, uhandle, consumer):
self._scan_line('PR', uhandle, consumer.prosite_reference,
any_number=1)
def _scan_dr(self, uhandle, consumer):
self._scan_line('DR', uhandle, consumer.databank_reference,
any_number=1)
def _scan_terminator(self, uhandle, consumer):
self._scan_line('//', uhandle, consumer.terminator, exactly_one=1)
_scan_fns = [
_scan_id,
_scan_de,
_scan_an,
_scan_ca,
_scan_cf,
_scan_cc,
_scan_di,
_scan_pr,
_scan_dr,
_scan_terminator
]
class DataRecord:
def __init__(self,tr_code='',sw_code=''):
self.tr_code = tr_code
self.sw_code = sw_code
def __str__(self):
return self.tr_code + ", " + self.sw_code
class EnzymeRecord:
def __init__(self):
self.ID = ''
self.DE = []
self.AN = []
self.CA = ''
self.CF = []
self.CC = [] # one comment per line
self.DI = []
self.PR = []
self.DR = []
def __repr__(self):
if self.ID:
if self.DE:
return "%s (%s, %s)" % (self.__class__.__name__,
self.ID, self.DE[0])
else:
return "%s (%s)" % (self.__class__.__name__,
self.ID)
else:
return "%s ( )" % (self.__class__.__name__)
def __str__(self):
output = "ID: " + self.ID
output += " DE: " + repr(self.DE)
output += " AN: " + repr(self.AN)
output += " CA: '" + self.CA + "'"
output += " CF: " + repr(self.CF)
output += " CC: " + repr(self.CC)
output += " DI: " + repr(self.DI)
output += " PR: " + repr(self.PR)
output += " DR: %d Records" % len(self.DR)
return output
class RecordParser(AbstractParser):
def __init__(self):
self._scanner = _Scanner()
self._consumer = _RecordConsumer()
def parse(self, handle):
if isinstance(handle, File.UndoHandle):
uhandle = handle
else:
uhandle = File.UndoHandle(handle)
self._scanner.feed(uhandle, self._consumer)
return self._consumer.enzyme_record
class Iterator:
def __init__(self, handle, parser=None):
self._uhandle = File.UndoHandle(handle)
def next(self):
self._parser = RecordParser()
lines = []
while 1:
line = self._uhandle.readline()
if not line: break
if line[:2] == '//':
break
lines.append(line)
if not lines:
return None
lines.append('//')
data = string.join(lines,'')
if self._parser is not None:
return self._parser.parse(File.StringHandle(data))
return data
def __iter__(self):
return iter(self.next, None)
class _RecordConsumer(AbstractConsumer):
def __init__(self):
self.enzyme_record = EnzymeRecord()
def identification(self, id_info):
self.enzyme_record.ID = id_info.split()[1]
def description(self,de_info):
self.enzyme_record.DE.append(de_info[2:].strip())
def alternate_name(self,an_info):
self.enzyme_record.AN.append(an_info[2:].strip())
def catalytic_activity(self, ca_info):
self.enzyme_record.CA = string.join([self.enzyme_record.CA,ca_info[2:].strip()],'')
def cofactor(self, cf_info):
self.enzyme_record.CF.append(cf_info[2:].strip())
def comment(self, cc_info):
cc = cc_info[2:].strip()
if cc.startswith("-!-"):
self.enzyme_record.CC.append(cc[len("-!-"):].strip())
else:
# The header is all CC, but doesn't start with -!-
if self.enzyme_record.CC:
pre_cc = self.enzyme_record.CC.pop()
else:
pre_cc = ""
new_cc = pre_cc + " " + cc
self.enzyme_record.CC.append(new_cc)
def disease(self, di_info):
self.enzyme_record.DI.append(di_info[2:].strip())
def prosite_reference(self,pr_info):
self.enzyme_record.PR.append(pr_info.split(';')[1].strip())
def databank_reference(self,dr_info):
good_data = dr_info[2:].strip()
pair_data = good_data.split(';')
for pair in pair_data:
if not pair: continue
data_record = DataRecord()
t1, t2 = pair.split(',')
data_record.tr_code, data_record.sw_code = \
t1.strip(), t2.strip()
self.enzyme_record.DR.append(data_record)
def terminator(self,schwarzenegger):
pass # Hasta la Vista, baby!
|
dbmi-pitt/DIKB-Micropublication
|
scripts/mp-scripts/Bio/Enzyme/__init__.py
|
Python
|
apache-2.0
| 8,109
|
[
"Biopython"
] |
2ace4df3b39d2a4ad4fdf39b58ca36919a5dbdb8a702b8f51f5cb71810ea3fc5
|
"""
Extracts minimal phrases from symmetrized word alignments
"""
from collections import defaultdict, deque
def as_words(phrase, id2word):
return tuple(id2word[i] for i in phrase)
def try_expand(f, f2e, f_min, f_max, e_min, e_max):
"""
Try to expand the boundaries of a phrase pair based on the alignment points in f2e[f]
:param f: a position in the source
:param f2e: maps source positions into a sorted list of aligned target positions
:param f_min, f_max: source phrase boundary
:param e_min, e_max: target phrase boundary
:returns: f_min, f_max, e_min, e_max, discovered target positions
"""
# retrieve the target positions reachable from f
es = f2e.get(f, None)
extra = []
# if there is any
if es is not None:
if f_min is None:
# we just discovered that we know something about the source phrase
f_min = f_max = f
if e_min is None: # thus e_max is also None
# we just learnt the first thing about the target phrase
e_min = es[0]
e_max = es[-1]
# basically, we discovered the positions [e_min .. e_max]
extra.extend(xrange(e_min, e_max + 1))
else:
# we have the chance to update our target phrase
if e_min > es[0]:
# we discovered a few extra words on the left
extra.extend(xrange(es[0], e_min))
# and update e_min
e_min = es[0]
if e_max < es[-1]: # update e_max
# we discovered a few extra words to the right
extra.extend(xrange(e_max + 1, es[-1] + 1))
# and update e_max
e_max = es[-1]
return f_min, f_max, e_min, e_max, extra
def minimal_biphrases(f_words, e_words, links):
"""
Returns the minimal phrase pairs
:param f_words: list of source words
:param e_words: list of target words
:param links: list of alignment points
:return: list of tuples (source phrase, target phrase) where a phrase is a list of positions in f_words or e_words
"""
# 1) organise alignment points
# first we group them
f2e = defaultdict(set)
e2f = defaultdict(set)
for i, j in links:
f2e[i].add(j)
e2f[j].add(i)
# then we sort them
f2e = {f:sorted(es) for f, es in f2e.iteritems()}
e2f = {e:sorted(fs) for e, fs in e2f.iteritems()}
# biphrases
biphrases = set()
# 2) find minimal phrase pairs
f_done = set()
e_done = set()
# iterate investigating words in the source
# TODO: sort alignment points as to visit adjacent points first
for fi, ej in links:
# check if row or column have alread been done, if so, the minimal phrase consistent with this alignment point has already been found
if fi in f_done or ej in e_done:
continue
else:
# flag row and column as processed
f_done.add(fi)
e_done.add(ej)
# source phrase boundaries
f_min, f_max = fi, fi
# target phrase boundaries
e_min, e_max = ej, ej
# queue of words whose alignment points need be investigated
f_queue = deque([f_min])
e_queue = deque([e_min])
# for as long as there are words to be visited
while f_queue or e_queue:
if f_queue:
# get a source word
f = f_queue.popleft()
# try to expand the boundaries
f_min, f_max, e_min, e_max, extra = try_expand(f, f2e, f_min, f_max, e_min, e_max)
# book discovered target words
e_queue.extend(extra)
if e_queue:
# get a target word
e = e_queue.popleft()
# try to expand the boundaries (the logic is the same, only transposed)
e_min, e_max, f_min, f_max, extra = try_expand(e, e2f, e_min, e_max, f_min, f_max)
# book discovered source words
f_queue.extend(extra)
# store the minimal phrase
f_phrase = tuple(range(f_min, f_max + 1))
e_phrase = tuple(range(e_min, e_max + 1))
biphrases.add((f_phrase, e_phrase))
return biphrases
def unaligned_words(f_words, e_words, biphrases):
"""Find unaligned words
:param f_words: source words
:param e_words: target words
:param biphrases: list of phrase pairs (check `minimal_biphrases`)
:returns: set of unaligned source words, set of unaligned target words
"""
fs = set()
es = set()
for fp, ep in biphrases:
fs.update(fp)
es.update(ep)
return frozenset(range(len(f_words))) - fs, frozenset(range(len(e_words))) - es
def parse_strings(fstr, estr, astr):
"""
:param fstr: source string
:param estr: target string
:param astr: alingment string
:return: list of source words, a list of target words and list of alignment points
where an alignment point is a pair of integers (i, j)
"""
f = fstr.split()
e = estr.split()
a = [map(int, link.split('-')) for link in astr.split()]
return f, e, a
def parse_line(line, separator = ' ||| '):
"""returns the source words, the target words and the alignment points"""
return parse_strings(*line.split(separator))
def read_corpus(istream, separator=' ||| '):
"""
Reads a file containing lines like this:
source sentence ||| taget sentence ||| alignment points
and returns a list where each element is a triple
(source, target, alignment)
and source is a list of source words
target is a list of target words
alignment is a list of pairs (each pair represents an alignment point of the kind (f,e))
"""
return [parse_line(line.strip()) for line in istream]
|
wilkeraziz/smtutils
|
atools/minphrases.py
|
Python
|
apache-2.0
| 5,841
|
[
"VisIt"
] |
683934696a5ef10aacaa3353012c0a51be9e802d1d2b8a951f5eee2f315cc810
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='pages/home.jinja'), name="home"),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.jinja'), name="about"),
# Django Admin, use {% url 'admin:index' %}
url(settings.ADMIN_URL, include(admin.site.urls)),
# User management
url(r'^users/', include("foosball.users.urls", namespace="users")),
url(r'^accounts/', include('allauth.urls')),
url(r'^select2/', include('django_select2.urls')),
# Real stuff goes here
url(r'^api/', include("foosball.api.urls", namespace="api")),
url(r'^games/', include('foosball.games.urls', namespace="games")),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
import debug_toolbar
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request, kwargs={'exception': Exception("Bad Request!")}),
url(r'^403/$', default_views.permission_denied, kwargs={'exception': Exception("Permission Denied")}),
url(r'^404/$', default_views.page_not_found, kwargs={'exception': Exception("Page not Found")}),
url(r'^500/$', default_views.server_error),
url(r'^__debug__/', include(debug_toolbar.urls)),
]
|
andersinno/foosball
|
config/urls.py
|
Python
|
mit
| 1,691
|
[
"VisIt"
] |
d92fc6055cc096862ecb4047077f9f4f49fe06eba9e244a7282e86add446612e
|
'''
Created on Aug 5, 2014
@author: gearsad
'''
import vtk
from SceneObject import SceneObject
class Sphere(SceneObject):
'''
A template for drawing a sphere.
'''
def __init__(self, renderers, scale = 1, color = [1.0, 0, 0]):
'''
Initialize the sphere.
'''
# Call the parent constructor
super(Sphere,self).__init__(renderers)
sphereSource = vtk.vtkSphereSource()
sphereSource.SetCenter(0.0, 0.0, 0.0)
sphereSource.SetRadius(scale)
# Make it a little more defined
sphereSource.SetThetaResolution(12)
sphereSource.SetPhiResolution(12)
sphereMapper = vtk.vtkPolyDataMapper()
sphereMapper.SetInputConnection(sphereSource.GetOutputPort())
self.vtkActor.SetMapper(sphereMapper)
# Change it to a red sphere
self.vtkActor.GetProperty().SetColor(color[0], color[1], color[2])
def SetColor(self, color = [1.0, 0, 0]):
self.vtkActor.GetProperty().SetColor(color[0], color[1], color[2])
|
GearsAD/semisorted_arnerve
|
arnerve/scene/Sphere.py
|
Python
|
mit
| 1,136
|
[
"VTK"
] |
88c4766bea77cad144d3888a0d138f04a772f3d7b7ed11d517ce623376147044
|
from __future__ import print_function
import numpy as np
from theano.compat.six.moves import cStringIO, xrange
import theano.tensor as T
from theano.tests import disturb_mem
from theano.tests.record import Record, RecordMode
from pylearn2.compat import first_key
from pylearn2.costs.cost import Cost, SumOfCosts, DefaultDataSpecsMixin
from pylearn2.datasets.dense_design_matrix import DenseDesignMatrix
from pylearn2.models.model import Model
from pylearn2.monitor import Monitor, push_monitor
from pylearn2.space import CompositeSpace, Conv2DSpace, VectorSpace
from pylearn2.termination_criteria import EpochCounter
from pylearn2.testing.cost import CallbackCost, SumOfParams
from pylearn2.testing.datasets import ArangeDataset
from pylearn2.train import Train
from pylearn2.training_algorithms.sgd import (ExponentialDecay,
PolyakAveraging,
LinearDecay,
LinearDecayOverEpoch,
MonitorBasedLRAdjuster,
SGD,
AnnealedLearningRate,
EpochMonitor)
from pylearn2.training_algorithms.learning_rule import (Momentum,
MomentumAdjustor)
from pylearn2.utils.iteration import _iteration_schemes
from pylearn2.utils import safe_izip, safe_union, sharedX
from pylearn2.utils.exc import reraise_as
class SupervisedDummyCost(DefaultDataSpecsMixin, Cost):
supervised = True
def expr(self, model, data):
space, sources = self.get_data_specs(model)
space.validate(data)
(X, Y) = data
return T.square(model(X) - Y).mean()
class DummyCost(DefaultDataSpecsMixin, Cost):
def expr(self, model, data):
space, sources = self.get_data_specs(model)
space.validate(data)
X = data
return T.square(model(X) - X).mean()
class DummyModel(Model):
def __init__(self, shapes, lr_scalers=None):
super(DummyModel, self).__init__()
self._params = [sharedX(np.random.random(shape)) for shape in shapes]
self.input_space = VectorSpace(1)
self.lr_scalers = lr_scalers
def __call__(self, X):
# Implemented only so that DummyCost would work
return X
def get_lr_scalers(self):
if self.lr_scalers:
return dict(zip(self._params, self.lr_scalers))
else:
return dict()
class SoftmaxModel(Model):
"""A dummy model used for testing.
Important properties:
has a parameter (P) for SGD to act on
has a get_output_space method, so it can tell the
algorithm what kind of space the targets for supervised
learning live in
has a get_input_space method, so it can tell the
algorithm what kind of space the features live in
"""
def __init__(self, dim):
super(SoftmaxModel, self).__init__()
self.dim = dim
rng = np.random.RandomState([2012, 9, 25])
self.P = sharedX(rng.uniform(-1., 1., (dim, )))
def get_params(self):
return [self.P]
def get_input_space(self):
return VectorSpace(self.dim)
def get_output_space(self):
return VectorSpace(self.dim)
def __call__(self, X):
# Make the test fail if algorithm does not
# respect get_input_space
assert X.ndim == 2
# Multiplying by P ensures the shape as well
# as ndim is correct
return T.nnet.softmax(X*self.P)
class TopoSoftmaxModel(Model):
"""A dummy model used for testing.
Like SoftmaxModel but its features have 2 topological
dimensions. This tests that the training algorithm
will provide topological data correctly.
"""
def __init__(self, rows, cols, channels):
super(TopoSoftmaxModel, self).__init__()
dim = rows * cols * channels
self.input_space = Conv2DSpace((rows, cols), channels)
self.dim = dim
rng = np.random.RandomState([2012, 9, 25])
self.P = sharedX(rng.uniform(-1., 1., (dim, )))
def get_params(self):
return [self.P]
def get_output_space(self):
return VectorSpace(self.dim)
def __call__(self, X):
# Make the test fail if algorithm does not
# respect get_input_space
assert X.ndim == 4
# Multiplying by P ensures the shape as well
# as ndim is correct
return T.nnet.softmax(X.reshape((X.shape[0], self.dim)) * self.P)
def test_sgd_unspec_num_mon_batch():
# tests that if you don't specify a number of
# monitoring batches, SGD configures the monitor
# to run on all the data
m = 25
visited = [False] * m
rng = np.random.RandomState([25, 9, 2012])
X = np.zeros((m, 1))
X[:, 0] = np.arange(m)
dataset = DenseDesignMatrix(X=X)
model = SoftmaxModel(1)
learning_rate = 1e-3
batch_size = 5
cost = DummyCost()
algorithm = SGD(learning_rate,
cost,
batch_size=batch_size,
monitoring_batches=None,
monitoring_dataset=dataset,
termination_criterion=None,
update_callbacks=None,
set_batch_size=False)
algorithm.setup(dataset=dataset, model=model)
monitor = Monitor.get_monitor(model)
X = T.matrix()
def tracker(*data):
X, = data
assert X.shape[1] == 1
for i in xrange(X.shape[0]):
visited[int(X[i, 0])] = True
monitor.add_channel(name='tracker',
ipt=X,
val=0.,
prereqs=[tracker],
data_specs=(model.get_input_space(),
model.get_input_source()))
monitor()
if False in visited:
print(visited)
assert False
def test_sgd_sup():
# tests that we can run the sgd algorithm
# on a supervised cost.
# does not test for correctness at all, just
# that the algorithm runs without dying
dim = 3
m = 10
rng = np.random.RandomState([25, 9, 2012])
X = rng.randn(m, dim)
idx = rng.randint(0, dim, (m, ))
Y = np.zeros((m, dim))
for i in xrange(m):
Y[i, idx[i]] = 1
dataset = DenseDesignMatrix(X=X, y=Y)
m = 15
X = rng.randn(m, dim)
idx = rng.randint(0, dim, (m,))
Y = np.zeros((m, dim))
for i in xrange(m):
Y[i, idx[i]] = 1
# Including a monitoring dataset lets us test that
# the monitor works with supervised data
monitoring_dataset = DenseDesignMatrix(X=X, y=Y)
model = SoftmaxModel(dim)
learning_rate = 1e-3
batch_size = 5
cost = SupervisedDummyCost()
# We need to include this so the test actually stops running at some point
termination_criterion = EpochCounter(5)
algorithm = SGD(learning_rate, cost,
batch_size=batch_size,
monitoring_batches=3,
monitoring_dataset=monitoring_dataset,
termination_criterion=termination_criterion,
update_callbacks=None,
set_batch_size=False)
train = Train(dataset,
model,
algorithm,
save_path=None,
save_freq=0,
extensions=None)
train.main_loop()
def test_sgd_unsup():
# tests that we can run the sgd algorithm
# on an supervised cost.
# does not test for correctness at all, just
# that the algorithm runs without dying
dim = 3
m = 10
rng = np.random.RandomState([25, 9, 2012])
X = rng.randn(m, dim)
dataset = DenseDesignMatrix(X=X)
m = 15
X = rng.randn(m, dim)
# Including a monitoring dataset lets us test that
# the monitor works with unsupervised data
monitoring_dataset = DenseDesignMatrix(X=X)
model = SoftmaxModel(dim)
learning_rate = 1e-3
batch_size = 5
cost = DummyCost()
# We need to include this so the test actually stops running at some point
termination_criterion = EpochCounter(5)
algorithm = SGD(learning_rate,
cost,
batch_size=batch_size,
monitoring_batches=3,
monitoring_dataset=monitoring_dataset,
termination_criterion=termination_criterion,
update_callbacks=None,
set_batch_size=False)
train = Train(dataset,
model,
algorithm,
save_path=None,
save_freq=0,
extensions=None)
train.main_loop()
def get_topological_dataset(rng, rows, cols, channels, m):
X = rng.randn(m, rows, cols, channels)
dim = rows * cols * channels
idx = rng.randint(0, dim, (m,))
Y = np.zeros((m, dim))
for i in xrange(m):
Y[i, idx[i]] = 1
return DenseDesignMatrix(topo_view=X, y=Y)
def test_linear_decay():
# tests that the class LinearDecay in sgd.py
# gets the learning rate properly over the training batches
# it runs a small softmax and at the end checks the learning values.
# the learning rates are expected to start changing at batch 'start'
# by an amount of 'step' specified below.
# the decrease of the learning rate should continue linearly until
# we reach batch 'saturate' at which the learning rate equals
# 'learning_rate * decay_factor'
class LearningRateTracker(object):
def __init__(self):
self.lr_rates = []
def __call__(self, algorithm):
self.lr_rates.append(algorithm.learning_rate.get_value())
dim = 3
dataset_size = 10
rng = np.random.RandomState([25, 9, 2012])
X = rng.randn(dataset_size, dim)
dataset = DenseDesignMatrix(X=X)
m = 15
X = rng.randn(m, dim)
# including a monitoring datasets lets us test that
# the monitor works with supervised data
monitoring_dataset = DenseDesignMatrix(X=X)
model = SoftmaxModel(dim)
learning_rate = 1e-1
batch_size = 5
# We need to include this so the test actually stops running at some point
epoch_num = 15
termination_criterion = EpochCounter(epoch_num)
cost = DummyCost()
start = 5
saturate = 10
decay_factor = 0.1
linear_decay = LinearDecay(start=start, saturate=saturate,
decay_factor=decay_factor)
# including this extension for saving learning rate value after each batch
lr_tracker = LearningRateTracker()
algorithm = SGD(learning_rate,
cost,
batch_size=batch_size,
monitoring_batches=3,
monitoring_dataset=monitoring_dataset,
termination_criterion=termination_criterion,
update_callbacks=[linear_decay, lr_tracker],
set_batch_size=False)
train = Train(dataset,
model,
algorithm,
save_path=None,
save_freq=0,
extensions=None)
train.main_loop()
step = (learning_rate - learning_rate*decay_factor)/(saturate - start + 1)
num_batches = np.ceil(dataset_size / float(batch_size)).astype(int)
for i in xrange(epoch_num * num_batches):
actual = lr_tracker.lr_rates[i]
batches_seen = i + 1
if batches_seen < start:
expected = learning_rate
elif batches_seen >= saturate:
expected = learning_rate*decay_factor
elif (start <= batches_seen) and (batches_seen < saturate):
expected = (decay_factor * learning_rate +
(saturate - batches_seen) * step)
if not np.allclose(actual, expected):
raise AssertionError("After %d batches, expected learning rate to "
"be %f, but it is %f." %
(batches_seen, expected, actual))
def test_annealed_learning_rate():
# tests that the class AnnealedLearingRate in sgd.py
# gets the learning rate properly over the training batches
# it runs a small softmax and at the end checks the learning values.
# the learning rates are expected to start changing at batch 'anneal_start'
# After batch anneal_start, the learning rate should be
# learning_rate * anneal_start/number of batches seen
class LearningRateTracker(object):
def __init__(self):
self.lr_rates = []
def __call__(self, algorithm):
self.lr_rates.append(algorithm.learning_rate.get_value())
dim = 3
dataset_size = 10
rng = np.random.RandomState([25, 9, 2012])
X = rng.randn(dataset_size, dim)
dataset = DenseDesignMatrix(X=X)
m = 15
X = rng.randn(m, dim)
# including a monitoring datasets lets us test that
# the monitor works with supervised data
monitoring_dataset = DenseDesignMatrix(X=X)
model = SoftmaxModel(dim)
learning_rate = 1e-1
batch_size = 5
# We need to include this so the test actually stops running at some point
epoch_num = 15
termination_criterion = EpochCounter(epoch_num)
cost = DummyCost()
anneal_start = 5
annealed_rate = AnnealedLearningRate(anneal_start=anneal_start)
# including this extension for saving learning rate value after each batch
lr_tracker = LearningRateTracker()
algorithm = SGD(learning_rate,
cost,
batch_size=batch_size,
monitoring_batches=3,
monitoring_dataset=monitoring_dataset,
termination_criterion=termination_criterion,
update_callbacks=[annealed_rate, lr_tracker],
set_batch_size=False)
train = Train(dataset,
model,
algorithm,
save_path=None,
save_freq=0,
extensions=None)
train.main_loop()
num_batches = np.ceil(dataset_size / float(batch_size)).astype(int)
for i in xrange(epoch_num * num_batches):
actual = lr_tracker.lr_rates[i]
batches_seen = i + 1
expected = learning_rate*min(1, float(anneal_start)/batches_seen)
if not np.allclose(actual, expected):
raise AssertionError("After %d batches, expected learning rate to "
"be %f, but it is %f." %
(batches_seen, expected, actual))
def test_linear_decay_over_epoch():
# tests that the class LinearDecayOverEpoch in sgd.py
# gets the learning rate properly over the training epochs
# it runs a small softmax and at the end checks the learning values.
# the learning rates are expected to start changing at epoch 'start' by an
# amount of 'step' specified below.
# the decrease of the learning rate should continue linearly until we
# reach epoch 'saturate' at which the learning rate equals
# 'learning_rate * decay_factor'
dim = 3
m = 10
rng = np.random.RandomState([25, 9, 2012])
X = rng.randn(m, dim)
dataset = DenseDesignMatrix(X=X)
m = 15
X = rng.randn(m, dim)
# including a monitoring datasets lets us test that
# the monitor works with supervised data
monitoring_dataset = DenseDesignMatrix(X=X)
model = SoftmaxModel(dim)
learning_rate = 1e-1
batch_size = 5
# We need to include this so the test actually stops running at some point
epoch_num = 15
termination_criterion = EpochCounter(epoch_num)
cost = DummyCost()
algorithm = SGD(learning_rate, cost, batch_size=batch_size,
monitoring_batches=3,
monitoring_dataset=monitoring_dataset,
termination_criterion=termination_criterion,
update_callbacks=None,
set_batch_size=False)
start = 5
saturate = 10
decay_factor = 0.1
linear_decay = LinearDecayOverEpoch(start=start,
saturate=saturate,
decay_factor=decay_factor)
train = Train(dataset,
model,
algorithm,
save_path=None,
save_freq=0,
extensions=[linear_decay])
train.main_loop()
lr = model.monitor.channels['learning_rate']
step = (learning_rate - learning_rate*decay_factor)/(saturate - start + 1)
for i in xrange(epoch_num + 1):
actual = lr.val_record[i]
if i < start:
expected = learning_rate
elif i >= saturate:
expected = learning_rate*decay_factor
elif (start <= i) and (i < saturate):
expected = decay_factor * learning_rate + (saturate - i) * step
if not np.allclose(actual, expected):
raise AssertionError("After %d epochs, expected learning rate to "
"be %f, but it is %f." %
(i, expected, actual))
def test_linear_decay_epoch_xfer():
# tests that the class LinearDecayOverEpoch in sgd.py
# gets the epochs xfered over properly
dim = 3
m = 10
rng = np.random.RandomState([25, 9, 2012])
X = rng.randn(m, dim)
dataset = DenseDesignMatrix(X=X)
m = 15
X = rng.randn(m, dim)
# including a monitoring datasets lets us test that
# the monitor works with supervised data
monitoring_dataset = DenseDesignMatrix(X=X)
model = SoftmaxModel(dim)
learning_rate = 1e-1
batch_size = 5
# We need to include this so the test actually stops running at some point
epoch_num = 6
termination_criterion = EpochCounter(epoch_num)
cost = DummyCost()
algorithm = SGD(learning_rate, cost, batch_size=batch_size,
monitoring_batches=3,
monitoring_dataset=monitoring_dataset,
termination_criterion=termination_criterion,
update_callbacks=None,
set_batch_size=False)
start = 5
saturate = 10
decay_factor = 0.1
linear_decay = LinearDecayOverEpoch(start=start,
saturate=saturate,
decay_factor=decay_factor)
train = Train(dataset,
model,
algorithm,
save_path=None,
save_freq=0,
extensions=[linear_decay])
train.main_loop()
lr = model.monitor.channels['learning_rate']
final_learning_rate = lr.val_record[-1]
algorithm2 = SGD(learning_rate, cost,
batch_size=batch_size,
monitoring_batches=3,
monitoring_dataset=monitoring_dataset,
termination_criterion=EpochCounter(epoch_num+1,
new_epochs=False),
update_callbacks=None,
set_batch_size=False)
model_xfer = push_monitor(name="old_monitor",
transfer_experience=True,
model=model)
linear_decay2 = LinearDecayOverEpoch(start=start,
saturate=saturate,
decay_factor=decay_factor)
train2 = Train(dataset,
model_xfer,
algorithm2,
save_path=None,
save_freq=0,
extensions=[linear_decay2])
train2.main_loop()
lr_resume = model_xfer.monitor.channels['learning_rate']
resume_learning_rate = lr_resume.val_record[0]
assert np.allclose(resume_learning_rate,
final_learning_rate)
def test_momentum_epoch_xfer():
# tests that the class MomentumAdjustor in learning_rate.py
# gets the epochs xfered over properly
dim = 3
m = 10
rng = np.random.RandomState([25, 9, 2012])
X = rng.randn(m, dim)
dataset = DenseDesignMatrix(X=X)
m = 15
X = rng.randn(m, dim)
# including a monitoring datasets lets us test that
# the monitor works with supervised data
monitoring_dataset = DenseDesignMatrix(X=X)
model = SoftmaxModel(dim)
learning_rate = 1e-1
batch_size = 5
# We need to include this so the test actually stops running at some point
epoch_num = 6
termination_criterion = EpochCounter(epoch_num)
cost = DummyCost()
algorithm = SGD(learning_rate, cost, batch_size=batch_size,
monitoring_batches=3,
monitoring_dataset=monitoring_dataset,
termination_criterion=termination_criterion,
update_callbacks=None,
set_batch_size=False,
learning_rule=Momentum(.4))
start = 1
saturate = 11
final_momentum = 0.9
momentum_adjustor = MomentumAdjustor(final_momentum=final_momentum,
start=start,
saturate=saturate)
train = Train(dataset,
model,
algorithm,
save_path=None,
save_freq=0,
extensions=[momentum_adjustor])
train.main_loop()
mm = model.monitor.channels['momentum']
final_momentum_init = mm.val_record[-1]
algorithm2 = SGD(learning_rate, cost,
batch_size=batch_size,
monitoring_batches=3,
monitoring_dataset=monitoring_dataset,
termination_criterion=EpochCounter(epoch_num+1,
new_epochs=False),
update_callbacks=None,
set_batch_size=False,
learning_rule=Momentum(.4))
model_xfer = push_monitor(name="old_monitor",
transfer_experience=True,
model=model)
momentum_adjustor2 = MomentumAdjustor(final_momentum=final_momentum,
start=start,
saturate=saturate)
train2 = Train(dataset,
model_xfer,
algorithm2,
save_path=None,
save_freq=0,
extensions=[momentum_adjustor2])
train2.main_loop()
assert np.allclose(model.monitor.channels['momentum'].val_record[0],
final_momentum_init)
def test_val_records_xfer():
# tests that the class push_motnior in learning_rate.py
# gets the epochs xfered over properly
dim = 3
m = 10
rng = np.random.RandomState([25, 9, 2012])
X = rng.randn(m, dim)
dataset = DenseDesignMatrix(X=X)
m = 15
X = rng.randn(m, dim)
# including a monitoring datasets lets us test that
# the monitor works with supervised data
monitoring_dataset = DenseDesignMatrix(X=X)
model = SoftmaxModel(dim)
learning_rate = 1e-1
batch_size = 5
# We need to include this so the test actually stops running at some point
epoch_num = 6
termination_criterion = EpochCounter(epoch_num)
cost = DummyCost()
algorithm = SGD(learning_rate, cost, batch_size=batch_size,
monitoring_batches=3,
monitoring_dataset=monitoring_dataset,
termination_criterion=termination_criterion,
update_callbacks=None,
set_batch_size=False)
train = Train(dataset,
model,
algorithm,
save_path=None,
save_freq=0)
train.main_loop()
assert len(model.monitor.channels['objective'].val_record) ==\
model.monitor._epochs_seen + 1
final_obj = model.monitor.channels['objective'].val_record[-1]
algorithm2 = SGD(learning_rate, cost,
batch_size=batch_size,
monitoring_batches=3,
monitoring_dataset=monitoring_dataset,
termination_criterion=EpochCounter(epoch_num+1,
new_epochs=False),
update_callbacks=None,
set_batch_size=False)
model_xfer = push_monitor(name="old_monitor",
transfer_experience=True,
model=model)
train2 = Train(dataset,
model_xfer,
algorithm2,
save_path=None,
save_freq=0)
train2.main_loop()
assert np.allclose(model.monitor.channels['objective'].val_record[0],
final_obj)
assert len(model.monitor.channels['objective'].val_record) == 2
def test_save_records():
# tests that the flag save_records in class
# push_monitor in learning_rate.py
# gets the val_records xfered over properly
dim = 3
m = 10
rng = np.random.RandomState([25, 9, 2012])
X = rng.randn(m, dim)
dataset = DenseDesignMatrix(X=X)
m = 15
X = rng.randn(m, dim)
# including a monitoring datasets lets us test that
# the monitor works with supervised data
monitoring_dataset = DenseDesignMatrix(X=X)
model = SoftmaxModel(dim)
learning_rate = 1e-1
batch_size = 5
# We need to include this so the test actually stops running at some point
epoch_num = 6
termination_criterion = EpochCounter(epoch_num)
cost = DummyCost()
algorithm = SGD(learning_rate, cost, batch_size=batch_size,
monitoring_batches=3,
monitoring_dataset=monitoring_dataset,
termination_criterion=termination_criterion,
update_callbacks=None,
set_batch_size=False)
train = Train(dataset,
model,
algorithm,
save_path=None,
save_freq=0)
train.main_loop()
old_monitor_len =\
len(model.monitor.channels['objective'].val_record)
assert old_monitor_len == model.monitor._epochs_seen + 1
init_obj = model.monitor.channels['objective'].val_record[0]
final_obj = model.monitor.channels['objective'].val_record[-1]
index_final_obj =\
len(model.monitor.channels['objective'].val_record) - 1
algorithm2 = SGD(learning_rate, cost,
batch_size=batch_size,
monitoring_batches=3,
monitoring_dataset=monitoring_dataset,
termination_criterion=EpochCounter(epoch_num+1,
new_epochs=False),
update_callbacks=None,
set_batch_size=False)
model_xfer = push_monitor(name="old_monitor",
transfer_experience=True,
model=model,
save_records=True)
train2 = Train(dataset,
model_xfer,
algorithm2,
save_path=None,
save_freq=0)
train2.main_loop()
assert len(model.old_monitor.channels['objective'].val_record) ==\
old_monitor_len
assert np.allclose(model.monitor.channels['objective'].val_record[0],
init_obj)
assert len(model.monitor.channels['objective'].val_record) ==\
model.monitor._epochs_seen + 1
assert len(model.monitor.channels['objective'].val_record) ==\
epoch_num + 2
assert model.monitor.channels['objective'].val_record[index_final_obj] ==\
final_obj
def test_monitor_based_lr():
# tests that the class MonitorBasedLRAdjuster in sgd.py
# gets the learning rate properly over the training epochs
# it runs a small softmax and at the end checks the learning values. It
# runs 2 loops. Each loop evaluates one of the if clauses when checking
# the observation channels. Otherwise, longer training epochs are needed
# to observe both if and elif cases.
high_trigger = 1.0
shrink_amt = 0.99
low_trigger = 0.99
grow_amt = 1.01
min_lr = 1e-7
max_lr = 1.
dim = 3
m = 10
rng = np.random.RandomState([25, 9, 2012])
X = rng.randn(m, dim)
dataset = DenseDesignMatrix(X=X)
m = 15
X = rng.randn(m, dim)
learning_rate = 1e-2
batch_size = 5
# We need to include this so the test actually stops running at some point
epoch_num = 5
# including a monitoring datasets lets us test that
# the monitor works with supervised data
monitoring_dataset = DenseDesignMatrix(X=X)
cost = DummyCost()
for i in xrange(2):
if i == 1:
high_trigger = 0.99
model = SoftmaxModel(dim)
termination_criterion = EpochCounter(epoch_num)
algorithm = SGD(learning_rate,
cost,
batch_size=batch_size,
monitoring_batches=3,
monitoring_dataset=monitoring_dataset,
termination_criterion=termination_criterion,
update_callbacks=None,
set_batch_size=False)
monitor_lr = MonitorBasedLRAdjuster(high_trigger=high_trigger,
shrink_amt=shrink_amt,
low_trigger=low_trigger,
grow_amt=grow_amt,
min_lr=min_lr,
max_lr=max_lr)
train = Train(dataset,
model,
algorithm,
save_path=None,
save_freq=0,
extensions=[monitor_lr])
train.main_loop()
v = model.monitor.channels['objective'].val_record
lr = model.monitor.channels['learning_rate'].val_record
lr_monitor = learning_rate
for i in xrange(2, epoch_num + 1):
if v[i-1] > high_trigger * v[i-2]:
lr_monitor *= shrink_amt
elif v[i-1] > low_trigger * v[i-2]:
lr_monitor *= grow_amt
lr_monitor = max(min_lr, lr_monitor)
lr_monitor = min(max_lr, lr_monitor)
assert np.allclose(lr_monitor, lr[i])
def test_bad_monitoring_input_in_monitor_based_lr():
# tests that the class MonitorBasedLRAdjuster in sgd.py avoids wrong
# settings of channel_name or dataset_name in the constructor.
dim = 3
m = 10
rng = np.random.RandomState([6, 2, 2014])
X = rng.randn(m, dim)
learning_rate = 1e-2
batch_size = 5
# We need to include this so the test actually stops running at some point
epoch_num = 2
dataset = DenseDesignMatrix(X=X)
# including a monitoring datasets lets us test that
# the monitor works with supervised data
monitoring_dataset = DenseDesignMatrix(X=X)
cost = DummyCost()
model = SoftmaxModel(dim)
termination_criterion = EpochCounter(epoch_num)
algorithm = SGD(learning_rate,
cost,
batch_size=batch_size,
monitoring_batches=2,
monitoring_dataset=monitoring_dataset,
termination_criterion=termination_criterion,
update_callbacks=None,
set_batch_size=False)
# testing for bad dataset_name input
dummy = 'void'
monitor_lr = MonitorBasedLRAdjuster(dataset_name=dummy)
train = Train(dataset,
model,
algorithm,
save_path=None,
save_freq=0,
extensions=[monitor_lr])
try:
train.main_loop()
except ValueError as e:
pass
except Exception:
reraise_as(AssertionError("MonitorBasedLRAdjuster takes dataset_name "
"that is invalid "))
# testing for bad channel_name input
monitor_lr2 = MonitorBasedLRAdjuster(channel_name=dummy)
model2 = SoftmaxModel(dim)
train2 = Train(dataset,
model2,
algorithm,
save_path=None,
save_freq=0,
extensions=[monitor_lr2])
try:
train2.main_loop()
except ValueError as e:
pass
except Exception:
reraise_as(AssertionError("MonitorBasedLRAdjuster takes channel_name "
"that is invalid "))
return
def testing_multiple_datasets_in_monitor_based_lr():
# tests that the class MonitorBasedLRAdjuster in sgd.py does not take
# multiple datasets in which multiple channels ending in '_objective'
# exist.
# This case happens when the user has not specified either channel_name or
# dataset_name in the constructor
dim = 3
m = 10
rng = np.random.RandomState([6, 2, 2014])
X = rng.randn(m, dim)
Y = rng.randn(m, dim)
learning_rate = 1e-2
batch_size = 5
# We need to include this so the test actually stops running at some point
epoch_num = 1
# including a monitoring datasets lets us test that
# the monitor works with supervised data
monitoring_train = DenseDesignMatrix(X=X)
monitoring_test = DenseDesignMatrix(X=Y)
cost = DummyCost()
model = SoftmaxModel(dim)
dataset = DenseDesignMatrix(X=X)
termination_criterion = EpochCounter(epoch_num)
algorithm = SGD(learning_rate,
cost,
batch_size=batch_size,
monitoring_batches=2,
monitoring_dataset={'train': monitoring_train,
'test': monitoring_test},
termination_criterion=termination_criterion,
update_callbacks=None,
set_batch_size=False)
monitor_lr = MonitorBasedLRAdjuster()
train = Train(dataset,
model,
algorithm,
save_path=None,
save_freq=0,
extensions=[monitor_lr])
try:
train.main_loop()
except ValueError:
return
raise AssertionError("MonitorBasedLRAdjuster takes multiple dataset names "
"in which more than one \"objective\" channel exist "
"and the user has not specified either channel_name "
"or database_name in the constructor to "
"disambiguate.")
def testing_multiple_datasets_with_specified_dataset_in_monitor_based_lr():
# tests that the class MonitorBasedLRAdjuster in sgd.py can properly use
# the spcified dataset_name in the constructor when multiple datasets
# exist.
dim = 3
m = 10
rng = np.random.RandomState([6, 2, 2014])
X = rng.randn(m, dim)
Y = rng.randn(m, dim)
learning_rate = 1e-2
batch_size = 5
# We need to include this so the test actually stops running at some point
epoch_num = 1
# including a monitoring datasets lets us test that
# the monitor works with supervised data
monitoring_train = DenseDesignMatrix(X=X)
monitoring_test = DenseDesignMatrix(X=Y)
cost = DummyCost()
model = SoftmaxModel(dim)
dataset = DenseDesignMatrix(X=X)
termination_criterion = EpochCounter(epoch_num)
monitoring_dataset = {'train': monitoring_train, 'test': monitoring_test}
algorithm = SGD(learning_rate,
cost,
batch_size=batch_size,
monitoring_batches=2,
monitoring_dataset=monitoring_dataset,
termination_criterion=termination_criterion,
update_callbacks=None,
set_batch_size=False)
dataset_name = first_key(monitoring_dataset)
monitor_lr = MonitorBasedLRAdjuster(dataset_name=dataset_name)
train = Train(dataset,
model,
algorithm,
save_path=None,
save_freq=0,
extensions=[monitor_lr])
train.main_loop()
def test_sgd_topo():
# tests that we can run the sgd algorithm
# on data with topology
# does not test for correctness at all, just
# that the algorithm runs without dying
rows = 3
cols = 4
channels = 2
dim = rows * cols * channels
m = 10
rng = np.random.RandomState([25, 9, 2012])
dataset = get_topological_dataset(rng, rows, cols, channels, m)
# including a monitoring datasets lets us test that
# the monitor works with supervised data
m = 15
monitoring_dataset = get_topological_dataset(rng, rows, cols, channels, m)
model = TopoSoftmaxModel(rows, cols, channels)
learning_rate = 1e-3
batch_size = 5
cost = SupervisedDummyCost()
# We need to include this so the test actually stops running at some point
termination_criterion = EpochCounter(5)
algorithm = SGD(learning_rate,
cost,
batch_size=batch_size,
monitoring_batches=3,
monitoring_dataset=monitoring_dataset,
termination_criterion=termination_criterion,
update_callbacks=None,
set_batch_size=False)
train = Train(dataset,
model,
algorithm,
save_path=None,
save_freq=0,
extensions=None)
train.main_loop()
def test_sgd_no_mon():
# tests that we can run the sgd algorithm
# wihout a monitoring dataset
# does not test for correctness at all, just
# that the algorithm runs without dying
dim = 3
m = 10
rng = np.random.RandomState([25, 9, 2012])
X = rng.randn(m, dim)
idx = rng.randint(0, dim, (m,))
Y = np.zeros((m, dim))
for i in xrange(m):
Y[i, idx[i]] = 1
dataset = DenseDesignMatrix(X=X, y=Y)
m = 15
X = rng.randn(m, dim)
idx = rng.randint(0, dim, (m,))
Y = np.zeros((m, dim))
for i in xrange(m):
Y[i, idx[i]] = 1
model = SoftmaxModel(dim)
learning_rate = 1e-3
batch_size = 5
cost = SupervisedDummyCost()
# We need to include this so the test actually stops running at some point
termination_criterion = EpochCounter(5)
algorithm = SGD(learning_rate,
cost,
batch_size=batch_size,
monitoring_dataset=None,
termination_criterion=termination_criterion,
update_callbacks=None,
set_batch_size=False)
train = Train(dataset,
model,
algorithm,
save_path=None,
save_freq=0,
extensions=None)
train.main_loop()
def test_reject_mon_batch_without_mon():
# tests that setting up the sgd algorithm
# without a monitoring dataset
# but with monitoring_batches specified is an error
dim = 3
m = 10
rng = np.random.RandomState([25, 9, 2012])
X = rng.randn(m, dim)
idx = rng.randint(0, dim, (m,))
Y = np.zeros((m, dim))
for i in xrange(m):
Y[i, idx[i]] = 1
dataset = DenseDesignMatrix(X=X, y=Y)
m = 15
X = rng.randn(m, dim)
idx = rng.randint(0, dim, (m, ))
Y = np.zeros((m, dim))
for i in xrange(m):
Y[i, idx[i]] = 1
model = SoftmaxModel(dim)
learning_rate = 1e-3
batch_size = 5
cost = SupervisedDummyCost()
try:
algorithm = SGD(learning_rate,
cost,
batch_size=batch_size,
monitoring_batches=3,
monitoring_dataset=None,
update_callbacks=None,
set_batch_size=False)
except ValueError:
return
assert False
def test_sgd_sequential():
# tests that requesting train_iteration_mode = 'sequential'
# works
dim = 1
batch_size = 3
m = 5 * batch_size
dataset = ArangeDataset(m)
model = SoftmaxModel(dim)
learning_rate = 1e-3
batch_size = 5
visited = [False] * m
def visit(X):
assert X.shape[1] == 1
assert np.all(X[1:] == X[0:-1]+1)
start = int(X[0, 0])
if start > 0:
assert visited[start - 1]
for i in xrange(batch_size):
assert not visited[start+i]
visited[start+i] = 1
data_specs = (model.get_input_space(), model.get_input_source())
cost = CallbackCost(visit, data_specs)
# We need to include this so the test actually stops running at some point
termination_criterion = EpochCounter(5)
algorithm = SGD(learning_rate,
cost,
batch_size=batch_size,
train_iteration_mode='sequential',
monitoring_dataset=None,
termination_criterion=termination_criterion,
update_callbacks=None,
set_batch_size=False)
algorithm.setup(dataset=dataset, model=model)
algorithm.train(dataset)
assert all(visited)
def test_determinism():
# Verifies that running SGD twice results in the same examples getting
# visited in the same order
for mode in _iteration_schemes:
dim = 1
batch_size = 3
num_batches = 5
m = num_batches * batch_size
dataset = ArangeDataset(m)
model = SoftmaxModel(dim)
learning_rate = 1e-3
batch_size = 5
visited = [[-1] * m]
def visit(X):
mx = max(visited[0])
counter = mx + 1
for i in X[:, 0]:
i = int(i)
assert visited[0][i] == -1
visited[0][i] = counter
counter += 1
data_specs = (model.get_input_space(), model.get_input_source())
cost = CallbackCost(visit, data_specs)
# We need to include this so the test actually stops running at some
# point
termination_criterion = EpochCounter(5)
def run_algorithm():
unsupported_modes = ['random_slice', 'random_uniform',
'even_sequences']
algorithm = SGD(learning_rate,
cost,
batch_size=batch_size,
train_iteration_mode=mode,
monitoring_dataset=None,
termination_criterion=termination_criterion,
update_callbacks=None,
set_batch_size=False)
algorithm.setup(dataset=dataset, model=model)
raised = False
try:
algorithm.train(dataset)
except ValueError:
assert mode in unsupported_modes
raised = True
if mode in unsupported_modes:
assert raised
return True
return False
if run_algorithm():
continue
visited.insert(0, [-1] * m)
del model.monitor
run_algorithm()
for v in visited:
assert len(v) == m
for elem in range(m):
assert elem in v
assert len(visited) == 2
print(visited[0])
print(visited[1])
assert np.all(np.asarray(visited[0]) == np.asarray(visited[1]))
def test_determinism_2():
"""
A more aggressive determinism test. Tests that apply nodes are all passed
inputs with the same md5sums, apply nodes are run in same order, etc. Uses
disturb_mem to try to cause dictionaries to iterate in different orders,
etc.
"""
def run_sgd(mode):
# Must be seeded the same both times run_sgd is called
disturb_mem.disturb_mem()
rng = np.random.RandomState([2012, 11, 27])
batch_size = 5
train_batches = 3
valid_batches = 4
num_features = 2
# Synthesize dataset with a linear decision boundary
w = rng.randn(num_features)
def make_dataset(num_batches):
disturb_mem.disturb_mem()
m = num_batches*batch_size
X = rng.randn(m, num_features)
y = np.zeros((m, 1))
y[:, 0] = np.dot(X, w) > 0.
rval = DenseDesignMatrix(X=X, y=y)
rval.yaml_src = "" # suppress no yaml_src warning
X = rval.get_batch_design(batch_size)
assert X.shape == (batch_size, num_features)
return rval
train = make_dataset(train_batches)
valid = make_dataset(valid_batches)
num_chunks = 10
chunk_width = 2
class ManyParamsModel(Model):
"""
Make a model with lots of parameters, so that there are many
opportunities for their updates to get accidentally re-ordered
non-deterministically. This makes non-determinism bugs manifest
more frequently.
"""
def __init__(self):
super(ManyParamsModel, self).__init__()
self.W1 = [sharedX(rng.randn(num_features, chunk_width)) for i
in xrange(num_chunks)]
disturb_mem.disturb_mem()
self.W2 = [sharedX(rng.randn(chunk_width))
for i in xrange(num_chunks)]
self._params = safe_union(self.W1, self.W2)
self.input_space = VectorSpace(num_features)
self.output_space = VectorSpace(1)
disturb_mem.disturb_mem()
model = ManyParamsModel()
disturb_mem.disturb_mem()
class LotsOfSummingCost(Cost):
"""
Make a cost whose gradient on the parameters involves summing many
terms together, so that T.grad is more likely to sum things in a
random order.
"""
supervised = True
def expr(self, model, data, **kwargs):
self.get_data_specs(model)[0].validate(data)
X, Y = data
disturb_mem.disturb_mem()
def mlp_pred(non_linearity):
Z = [T.dot(X, W) for W in model.W1]
H = [non_linearity(z) for z in Z]
Z = [T.dot(h, W) for h, W in safe_izip(H, model.W2)]
pred = sum(Z)
return pred
nonlinearity_predictions = map(mlp_pred,
[T.nnet.sigmoid,
T.nnet.softplus,
T.sqr,
T.sin])
pred = sum(nonlinearity_predictions)
disturb_mem.disturb_mem()
return abs(pred-Y[:, 0]).sum()
def get_data_specs(self, model):
data = CompositeSpace((model.get_input_space(),
model.get_output_space()))
source = (model.get_input_source(), model.get_target_source())
return (data, source)
cost = LotsOfSummingCost()
disturb_mem.disturb_mem()
algorithm = SGD(cost=cost,
batch_size=batch_size,
learning_rule=Momentum(.5),
learning_rate=1e-3,
monitoring_dataset={'train': train, 'valid': valid},
update_callbacks=[ExponentialDecay(decay_factor=2.,
min_lr=.0001)],
termination_criterion=EpochCounter(max_epochs=5))
disturb_mem.disturb_mem()
train_object = Train(dataset=train,
model=model,
algorithm=algorithm,
extensions=[PolyakAveraging(start=0),
MomentumAdjustor(final_momentum=.9,
start=1,
saturate=5), ],
save_freq=0)
disturb_mem.disturb_mem()
train_object.main_loop()
output = cStringIO()
record = Record(file_object=output, replay=False)
record_mode = RecordMode(record)
run_sgd(record_mode)
output = cStringIO(output.getvalue())
playback = Record(file_object=output, replay=True)
playback_mode = RecordMode(playback)
run_sgd(playback_mode)
def test_lr_scalers():
"""
Tests that SGD respects Model.get_lr_scalers
"""
# We include a cost other than SumOfParams so that data is actually
# queried from the training set, and the expected number of updates
# are applied.
cost = SumOfCosts([SumOfParams(), (0., DummyCost())])
scales = [.01, .02, .05, 1., 5.]
shapes = [(1,), (9,), (8, 7), (6, 5, 4), (3, 2, 2, 2)]
learning_rate = .001
class ModelWithScalers(Model):
def __init__(self):
super(ModelWithScalers, self).__init__()
self._params = [sharedX(np.zeros(shape)) for shape in shapes]
self.input_space = VectorSpace(1)
def __call__(self, X):
# Implemented only so that DummyCost would work
return X
def get_lr_scalers(self):
return dict(zip(self._params, scales))
model = ModelWithScalers()
dataset = ArangeDataset(1)
sgd = SGD(cost=cost,
learning_rate=learning_rate,
learning_rule=Momentum(.0),
batch_size=1)
sgd.setup(model=model, dataset=dataset)
manual = [param.get_value() for param in model.get_params()]
manual = [param - learning_rate * scale for param, scale in
zip(manual, scales)]
sgd.train(dataset=dataset)
assert all(np.allclose(manual_param, sgd_param.get_value())
for manual_param, sgd_param
in zip(manual, model.get_params()))
manual = [param - learning_rate * scale
for param, scale
in zip(manual, scales)]
sgd.train(dataset=dataset)
assert all(np.allclose(manual_param, sgd_param.get_value())
for manual_param, sgd_param
in zip(manual, model.get_params()))
def test_lr_scalers_momentum():
"""
Tests that SGD respects Model.get_lr_scalers when using
momentum.
"""
# We include a cost other than SumOfParams so that data is actually
# queried from the training set, and the expected number of updates
# are applied.
cost = SumOfCosts([SumOfParams(), (0., DummyCost())])
scales = [.01, .02, .05, 1., 5.]
shapes = [(1,), (9,), (8, 7), (6, 5, 4), (3, 2, 2, 2)]
model = DummyModel(shapes, lr_scalers=scales)
dataset = ArangeDataset(1)
learning_rate = .001
momentum = 0.5
sgd = SGD(cost=cost,
learning_rate=learning_rate,
learning_rule=Momentum(momentum),
batch_size=1)
sgd.setup(model=model, dataset=dataset)
manual = [param.get_value() for param in model.get_params()]
inc = [-learning_rate * scale for param, scale in zip(manual, scales)]
manual = [param + i for param, i in zip(manual, inc)]
sgd.train(dataset=dataset)
assert all(np.allclose(manual_param, sgd_param.get_value())
for manual_param, sgd_param
in zip(manual, model.get_params()))
manual = [param - learning_rate * scale + i * momentum
for param, scale, i in
zip(manual, scales, inc)]
sgd.train(dataset=dataset)
assert all(np.allclose(manual_param, sgd_param.get_value())
for manual_param, sgd_param
in zip(manual, model.get_params()))
def test_batch_size_specialization():
# Tests that using a batch size of 1 for training and a batch size
# other than 1 for monitoring does not result in a crash.
# This catches a bug reported in the pylearn-dev@googlegroups.com
# e-mail "[pylearn-dev] monitor assertion error: channel_X.type != X.type"
# The training data was specialized to a row matrix (theano tensor with
# first dim broadcastable) and the monitor ended up with expressions
# mixing the specialized and non-specialized version of the expression.
m = 2
rng = np.random.RandomState([25, 9, 2012])
X = np.zeros((m, 1))
dataset = DenseDesignMatrix(X=X)
model = SoftmaxModel(1)
learning_rate = 1e-3
cost = DummyCost()
algorithm = SGD(learning_rate, cost,
batch_size=1,
monitoring_batches=1,
monitoring_dataset=dataset,
termination_criterion=EpochCounter(max_epochs=1),
update_callbacks=None,
set_batch_size=False)
train = Train(dataset,
model,
algorithm,
save_path=None,
save_freq=0,
extensions=None)
train.main_loop()
def test_empty_monitoring_datasets():
"""
Test that handling of monitoring datasets dictionnary
does not fail when it is empty.
"""
learning_rate = 1e-3
batch_size = 5
dim = 3
rng = np.random.RandomState([25, 9, 2012])
train_dataset = DenseDesignMatrix(X=rng.randn(10, dim))
model = SoftmaxModel(dim)
cost = DummyCost()
algorithm = SGD(learning_rate, cost,
batch_size=batch_size,
monitoring_dataset={},
termination_criterion=EpochCounter(2))
train = Train(train_dataset,
model,
algorithm,
save_path=None,
save_freq=0,
extensions=None)
train.main_loop()
def test_uneven_batch_size():
"""
Testing extensively sgd parametrisations for datasets with a number of
examples not divisible by batch size
The tested settings are:
- Model with force_batch_size = True or False
- Training dataset with number of examples divisible or not by batch size
- Monitoring dataset with number of examples divisible or not by batch size
- Even or uneven iterators
2 tests out of 10 should raise ValueError
"""
learning_rate = 1e-3
batch_size = 5
dim = 3
m1, m2, m3 = 10, 15, 22
rng = np.random.RandomState([25, 9, 2012])
dataset1 = DenseDesignMatrix(X=rng.randn(m1, dim))
dataset2 = DenseDesignMatrix(X=rng.randn(m2, dim))
dataset3 = DenseDesignMatrix(X=rng.randn(m3, dim))
def train_with_monitoring_datasets(train_dataset,
monitoring_datasets,
model_force_batch_size,
train_iteration_mode,
monitor_iteration_mode):
model = SoftmaxModel(dim)
if model_force_batch_size:
model.force_batch_size = model_force_batch_size
cost = DummyCost()
algorithm = SGD(learning_rate, cost,
batch_size=batch_size,
train_iteration_mode=train_iteration_mode,
monitor_iteration_mode=monitor_iteration_mode,
monitoring_dataset=monitoring_datasets,
termination_criterion=EpochCounter(2))
train = Train(train_dataset,
model,
algorithm,
save_path=None,
save_freq=0,
extensions=None)
train.main_loop()
no_monitoring_datasets = None
even_monitoring_datasets = {'valid': dataset2}
uneven_monitoring_datasets = {'valid': dataset2, 'test': dataset3}
# without monitoring datasets
train_with_monitoring_datasets(
train_dataset=dataset1,
monitoring_datasets=no_monitoring_datasets,
model_force_batch_size=False,
train_iteration_mode='sequential',
monitor_iteration_mode='sequential')
train_with_monitoring_datasets(
train_dataset=dataset1,
monitoring_datasets=no_monitoring_datasets,
model_force_batch_size=batch_size,
train_iteration_mode='sequential',
monitor_iteration_mode='sequential')
# with uneven training datasets
train_with_monitoring_datasets(
train_dataset=dataset3,
monitoring_datasets=no_monitoring_datasets,
model_force_batch_size=False,
train_iteration_mode='sequential',
monitor_iteration_mode='sequential')
try:
train_with_monitoring_datasets(
train_dataset=dataset3,
monitoring_datasets=no_monitoring_datasets,
model_force_batch_size=batch_size,
train_iteration_mode='sequential',
monitor_iteration_mode='sequential')
assert False
except ValueError:
pass
train_with_monitoring_datasets(
train_dataset=dataset3,
monitoring_datasets=no_monitoring_datasets,
model_force_batch_size=batch_size,
train_iteration_mode='even_sequential',
monitor_iteration_mode='sequential')
# with even monitoring datasets
train_with_monitoring_datasets(
train_dataset=dataset1,
monitoring_datasets=even_monitoring_datasets,
model_force_batch_size=False,
train_iteration_mode='sequential',
monitor_iteration_mode='sequential')
train_with_monitoring_datasets(
train_dataset=dataset1,
monitoring_datasets=even_monitoring_datasets,
model_force_batch_size=batch_size,
train_iteration_mode='sequential',
monitor_iteration_mode='sequential')
# with uneven monitoring datasets
train_with_monitoring_datasets(
train_dataset=dataset1,
monitoring_datasets=uneven_monitoring_datasets,
model_force_batch_size=False,
train_iteration_mode='sequential',
monitor_iteration_mode='sequential')
try:
train_with_monitoring_datasets(
train_dataset=dataset1,
monitoring_datasets=uneven_monitoring_datasets,
model_force_batch_size=batch_size,
train_iteration_mode='sequential',
monitor_iteration_mode='sequential')
assert False
except ValueError:
pass
train_with_monitoring_datasets(
train_dataset=dataset1,
monitoring_datasets=uneven_monitoring_datasets,
model_force_batch_size=batch_size,
train_iteration_mode='sequential',
monitor_iteration_mode='even_sequential')
def test_epoch_monitor():
"""
Checks that monitored channels contain expected number of values
when using EpochMonitor for updates within epochs.
"""
dim = 1
batch_size = 3
n_batches = 10
m = n_batches * batch_size
dataset = ArangeDataset(m)
model = SoftmaxModel(dim)
monitor = Monitor.get_monitor(model)
learning_rate = 1e-3
data_specs = (model.get_input_space(), model.get_input_source())
cost = DummyCost()
termination_criterion = EpochCounter(1)
monitor_rate = 3
em = EpochMonitor(model=model,
tick_rate=None,
monitor_rate=monitor_rate)
algorithm = SGD(learning_rate,
cost,
batch_size=batch_size,
train_iteration_mode='sequential',
monitoring_dataset=dataset,
termination_criterion=termination_criterion,
update_callbacks=[em],
set_batch_size=False)
algorithm.setup(dataset=dataset, model=model)
algorithm.train(dataset)
for key, val in monitor.channels.items():
assert len(val.val_record) == n_batches//monitor_rate
if __name__ == '__main__':
test_monitor_based_lr()
|
CKehl/pylearn2
|
pylearn2/training_algorithms/tests/test_sgd.py
|
Python
|
bsd-3-clause
| 60,662
|
[
"VisIt"
] |
8a3e7442390a7cb06a60fc27b73910e9c6c5a9e8178291e773145be51b155225
|
# -*- coding: utf-8 -*-
__author__ = 'ke4roh'
# Copyright © 2016 James E. Scarborough
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from RPiNWR.Si4707 import *
from RPiNWR.Si4707.mock import MockContext
import unittest
import logging
class TestSi4707(unittest.TestCase):
def test_power_up(self):
logging.basicConfig(level=logging.INFO)
events = []
with MockContext() as context:
with Si4707(context) as radio:
radio.register_event_listener(events.append)
result = radio.do_command(PowerUp(function=15)).get(timeout=1)
self.assertEqual("2.0", result.firmware)
time.sleep(.01) # The event will come later, after the command is done.
self.assertEqual(1, len(events))
def test_patch_command(self):
logging.basicConfig(level=logging.INFO)
events = []
with MockContext() as context:
with Si4707(context) as radio:
radio.register_event_listener(events.append)
self.assertFalse(radio.radio_power)
result = radio.do_command(
PatchCommand(DEFAULT_CONFIG["power_on"]["patch"], DEFAULT_CONFIG["power_on"]["patch_id"])).get(
timeout=500)
self.assertTrue(radio.radio_power)
# Power Down is supposed to happen as part of the __exit__ routine
self.assertEquals(PowerDown, type(events[-1]))
def test_exception_in_command(self):
class ExceptionalCommand(Command):
def do_command0(self, r):
raise NotImplemented("Oh no!")
with MockContext() as context:
with Si4707(context) as radio:
future = radio.do_command(ExceptionalCommand())
self.assertRaises(Exception, future.get)
def test_set_property(self):
events = []
with MockContext() as context:
with Si4707(context) as radio:
radio.register_event_listener(events.append)
radio.power_on({"frequency": 162.4, "properties": {}})
radio.do_command(SetProperty("RX_VOLUME", 5)).get()
self.assertTrue(5, context.props[0x4000])
self.assertRaises(ValueError, SetProperty, "RX_VOLUME", 66)
# Wait up to 2 sec for the shutdown to finish (it should go really fast)
timeout = time.time() + 2
while not type(events[-1]) is PowerDown:
if time.time() >= timeout:
raise TimeoutError()
time.sleep(.002)
# There is supposed to be 500 ms between power up and first tuning when using the crystal
# oscillator. We'll check that.
self.assertTrue(type(events[-1]) is PowerDown)
checked_rtt = False
checked_tune = False
pup_time = 0
for event in events:
if not pup_time:
if type(event) is PowerUp or type(event) is PatchCommand:
pup_time = event.time_complete
self.assertTrue(abs((radio.tune_after - pup_time) * 1000 - 500) < 5,
"tune_after - pup_time should be about 500 ms, but it is %d ms" % int(
(radio.tune_after - pup_time) * 1000))
for event in events:
if type(event) is ReadyToTuneEvent:
self.assertTrue(event.time >= radio.tune_after,
"RTT happened %d ms early." % int((radio.tune_after - event.time) * 1000))
checked_rtt = True
if type(event) is TuneFrequency:
self.assertTrue(event.time_complete >= radio.tune_after,
"Tune happened %d ms early." % int((radio.tune_after - event.time_complete) * 1000))
checked_tune = True
self.assertTrue(checked_tune)
self.assertTrue(checked_rtt)
def test_get_property(self):
with MockContext() as context:
with Si4707(context) as radio:
radio.power_on({"frequency": 162.4})
self.assertEqual(63, radio.do_command(GetProperty("RX_VOLUME")).get())
def test_agc_control(self):
with MockContext() as context:
with Si4707(context) as radio:
radio.power_on({"frequency": 162.4})
self.assertTrue(radio.do_command(GetAGCStatus()).get())
radio.do_command(SetAGCStatus(False)).get()
self.assertFalse(radio.do_command(GetAGCStatus()).get())
radio.do_command(SetAGCStatus(True)).get()
self.assertTrue(radio.do_command(GetAGCStatus()).get())
def test_Tune_162_450(self):
# Check for a rounding error
c = TuneFrequency(162.450)
self.assertEqual(64980, c.frequency)
def test_rsq_interrupts(self):
events = []
with MockContext() as context:
with Si4707(context) as radio:
radio.power_on({"frequency": 162.4})
radio.register_event_listener(events.append)
context.interrupts |= 8 # RSQ
timeout = time.time() + 5
while not len(list(filter(lambda x: type(x) is ReceivedSignalQualityCheck, events))):
time.sleep(.1)
self.assertTrue(time.time() < timeout)
rsqe = list(filter(lambda x: type(x) is ReceivedSignalQualityCheck, events))
self.assertEqual(1, len(rsqe))
self.assertEqual(1, rsqe[0].frequency_offset)
def test_alert_tone_detection(self): # WB_ASQ_STATUS
events = []
tone_duration = 0.5
tone_duration_tolerance = 0.1
with MockContext() as context:
with Si4707(context) as radio:
radio.power_on({"frequency": 162.4})
radio.register_event_listener(events.append)
context.alert_tone(True)
time.sleep(tone_duration)
context.alert_tone(False)
time.sleep(0.05)
asqe = list(filter(lambda x: type(x) is AlertToneCheck, events))
self.assertEqual(2, len(asqe))
self.assertTrue(asqe[0].tone_on)
self.assertTrue(asqe[0].tone_start)
self.assertFalse(asqe[0].tone_end)
self.assertFalse(asqe[1].tone_on)
self.assertFalse(asqe[1].tone_start)
self.assertTrue(asqe[1].tone_end)
# Finally, make sure the timing of the tones is measured fairly accurately
self.assertTrue(abs((asqe[1].time_complete - asqe[0].time_complete) - tone_duration) < tone_duration_tolerance,
"Tone duration measured as %f sec - spec called for %f±%f" % (
asqe[1].time_complete - asqe[0].time_complete, tone_duration, tone_duration_tolerance))
self.assertIsNone(asqe[0].duration)
self.assertTrue(abs(asqe[1].time_complete - asqe[0].time_complete - asqe[1].duration) < 0.01)
def __filter_same_events(self, events, interrupt):
return list(filter(lambda x: type(x) is SameInterruptCheck and x.status[interrupt], events))
def __wait_for_eom_events(self, events, n=3, timeout=30):
timeout = time.time() + timeout
while len(self.__filter_same_events(events, "EOMDET")) < n and not time.time() >= timeout:
time.sleep(.02)
def test_send_message(self):
events = []
message = '-WXR-RWT-020103-020209-020091-020121-029047-029165-029095-029037+0030-3031700-KEAX/NWS-'
message2 = '-WXR-TOR-020103+0030-3031701-KEAX/NWS-'
interrupts_cleared = [0]
with MockContext() as context:
def check_interrupts_cleared(event):
try:
if event.intack:
self.assertEqual(0, context.interrupts)
interrupts_cleared[0] += 1
except AttributeError:
pass
with Si4707(context) as radio:
radio.register_event_listener(check_interrupts_cleared)
radio.power_on({"transmitter": "KID77"})
radio.register_event_listener(events.append)
context.send_message(message=message, voice_duration=80, time_factor=0.1)
self.__wait_for_eom_events(events)
same_messages = list(filter(lambda x: type(x) is SAMEMessageReceivedEvent, events))
self.assertEquals(1, len(same_messages))
self.assertEquals(message, same_messages[0].message.get_SAME_message()[0])
for interrupt in ["EOMDET", "HDRRDY", "PREDET"]:
times = len(self.__filter_same_events(events, interrupt))
self.assertEquals(3, times, "Interrupt %s happened %d times" % (interrupt, times))
# Test alert tone check
alert_tones = list(filter(lambda x: type(x) is AlertToneCheck, events))
self.assertEquals(2, len(alert_tones))
self.assertTrue(alert_tones[0].tone_on)
self.assertFalse(alert_tones[1].tone_on)
self.assertTrue(abs(alert_tones[1].duration - .8) < 0.01)
# Test EOM
self.assertEquals(1, len(list(filter(lambda x: type(x) is EndOfMessage, events))))
self.assertEqual(0, sum(context.same_buffer), "Buffer wasn't flushed")
# And the correct number of interrupts needs to have been handled
self.assertTrue(10 < interrupts_cleared[0] < 13, interrupts_cleared[0])
# Send another message to ensure that the last one got cleared out properly
context.send_message(message=message2, voice_duration=0, time_factor=0.1)
self.__wait_for_eom_events(events, 6)
self.assertEqual(0, len(list(filter(lambda x: type(x) is CommandExceptionEvent, events))))
def test_send_message_no_tone_2_headers(self):
# This will hit the timeout.
events = []
message = '-WXR-RWT-020103-020209-020091-020121-029047-029165-029095-029037+0030-3031700-KEAX/NWS-'
with MockContext() as context:
with Si4707(context) as radio:
radio.power_on({"frequency": 162.4})
radio.register_event_listener(events.append)
context.send_message(message=message, voice_duration=50, time_factor=0.1, header_count=2, tone=None)
self.__wait_for_eom_events(events)
same_messages = list(filter(lambda x: type(x) is SAMEMessageReceivedEvent, events))
self.assertEquals(1, len(same_messages))
self.assertEquals(message, same_messages[0].message.get_SAME_message()[0])
for interrupt in ["HDRRDY", "PREDET"]:
times = len(self.__filter_same_events(events, interrupt))
self.assertEquals(2, times, "Interrupt %s happened %d times" % (interrupt, times))
def test_send_invalid_message(self):
# This will hit the timeout.
events = []
message = '-WWF-RWT-020103-020209-020091-020121-029047-029165'
with MockContext() as context:
with Si4707(context) as radio:
radio.power_on({"frequency": 162.4})
radio.register_event_listener(events.append)
context.send_message(message=message, time_factor=0.1, tone=None, invalid_message=True)
self.__wait_for_eom_events(events)
same_messages = list(filter(lambda x: type(x) is SAMEMessageReceivedEvent, events))
self.assertEquals(1, len(same_messages))
self.assertTrue(same_messages[0].message.headers[0][0].startswith(message),
"%s != %s " % (message, same_messages[0].message.headers[0][0]))
for interrupt in ["HDRRDY", "PREDET"]:
times = len(self.__filter_same_events(events, interrupt))
self.assertEquals(3, times, "Interrupt %s happened %d times" % (interrupt, times))
if __name__ == '__main__':
unittest.main()
|
ke4roh/RPiNWR
|
tests/test_Si4707.py
|
Python
|
gpl-3.0
| 12,599
|
[
"CRYSTAL"
] |
a1d1be77666bbbdb8f741f85bc4b776cfcd4a292fbf4e0c0c79b4f4ae7ed70f2
|
#!/usr/bin/env python
#
# -----------------------------------------------------------------------------
# Copyright (c) 2018 The Regents of the University of California
#
# This file is part of kevlar (http://github.com/dib-lab/kevlar) and is
# licensed under the MIT license: see LICENSE.
# -----------------------------------------------------------------------------
from io import StringIO
from shutil import rmtree, copyfile
import sys
from tempfile import mkdtemp
import kevlar
from kevlar.reference import autoindex, load_refr_cutouts, ReferenceCutout
from kevlar.reference import KevlarBWAError, KevlarInvalidCutoutDeflineError
from kevlar.reference import KevlarDeflineSequenceLengthMismatchError
from kevlar.tests import data_file
import pytest
def test_cutout_basic():
c1 = ReferenceCutout()
assert c1.interval == (None, None, None)
c2 = ReferenceCutout('1_1000-2000')
assert c2.defline == '1_1000-2000'
assert c2.sequence is None
assert c2.interval == ('1', 1000, 2000)
with pytest.raises(KevlarInvalidCutoutDeflineError):
c3 = ReferenceCutout('deFlIne FOrMaT WHat arEYoutALKingAb out')
c4 = ReferenceCutout('chr3_1000-2000', 'A' * 1000)
assert c4.defline == 'chr3_1000-2000'
assert c4.sequence == 'A' * 1000
assert c4.local_to_global(40) == 1040
with pytest.raises(KevlarDeflineSequenceLengthMismatchError):
c5 = ReferenceCutout('scaffold_4000-5000', 'A' * 42)
def test_load_cutouts():
instream = kevlar.open(data_file('ssc218.gdna.fa'), 'r')
cutouts = list(load_refr_cutouts(instream))
assert len(cutouts) == 1
assert cutouts[0].defline == '6_23229978-23230336'
assert cutouts[0].sequence.startswith('GAACTCTCAATAAGGAATGTAATTAGAGTCATGT')
assert cutouts[0].sequence.endswith('GTTAAACAATGGATACAAAATTGATAGAAACAATTA')
def test_autoindex():
origlogstream = kevlar.logstream
# Bogus directory
with pytest.raises(KevlarBWAError) as e:
autoindex('/a/truly/bogus/dir/seqs.fa')
assert 'does not exist' in str(e)
tmpdir = mkdtemp()
try:
# Real directory, non-existent file
filename = tmpdir + '/refr.fa'
with pytest.raises(KevlarBWAError) as e:
autoindex(filename)
assert 'does not exist' in str(e)
# Should successfully index the sequence
kevlar.logstream = StringIO()
copyfile(data_file('bogus-genome/refr.fa'), filename)
autoindex(filename)
assert 'BWA index not found' in kevlar.logstream.getvalue()
assert 'indexing now' in kevlar.logstream.getvalue()
# Should find existing index
kevlar.logstream = StringIO()
autoindex(filename)
assert kevlar.logstream.getvalue() == ''
finally:
rmtree(tmpdir)
kevlar.logstream = origlogstream
def test_bwa_align_coords():
seq = ('TGACGTGACCCCAAGAAAACACTGCACCCAACTTCTTTCTTTAAGCCTTCGTGTGTGCAGGAGGAG'
'GCCAGCCCTGGTTTCAAAATTGTTCCTCAGCATT')
fasta = '>seq1\n{}\n'.format(seq)
args = ['bwa', 'mem', data_file('fiveparts-refr.fa.gz'), '-']
aligner = kevlar.reference.bwa_align(args, fasta)
mappings = list(aligner)
assert len(mappings) == 1
assert mappings[0] == ('seq1', 50, 150, seq)
def test_bwa_failure():
args = ['bwa', 'mem', data_file('not-a-real-file.fa'), '-']
with pytest.raises(KevlarBWAError) as e:
aligner = kevlar.reference.bwa_align(args, '>seq1\nACGT')
pos = list(aligner)
|
dib-lab/kevlar
|
kevlar/tests/test_reference.py
|
Python
|
mit
| 3,472
|
[
"BWA"
] |
120c2d4638728c6244888f4000973fa5c92bbe90dc910e528982e41e11256485
|
from libmyriad import ResultCode
import datetime
import json
import logging
import multiprocessing
import os
import psutil
import re
import requests
import shutil
import subprocess
import time
import zipfile
class Myriad:
def __init__(self):
self.config = []
self.maestroAPIGateway = None
self.myriadJobsFolderOnAWS = None
self.cpus = 1
self.cpuOverride = None
self.mem = 1
self.displacements = None
self.jobID = None
self.executionID = None
self.jobGroup = None
self.jobCategory = None
self.jobFolder = None
self.errors = []
self.ip = None
self.jobConfig = None
self.parsedJob = None
self.jobStarted = None
self.jobName = None
self.ami = None
self.instanceID = None
self.region = None
self.cmdBacklog = []
self.memMax = 16000
self.memAdjust = 0.75 # Only use 75% of the available memory
def getInstanceID(self):
# Load the configuration values from file
f = open('instance-id.txt')
lines = f.readlines()
f.close()
return lines[0].strip()
def getAmi(self):
# Load the configuration values from file
f = open('ami-id.txt')
lines = f.readlines()
f.close()
return lines[0].strip()
def getRegion(self):
# lazy load region value
if self.region == None:
r = requests.get('http://169.254.169.254/latest/dynamic/instance-identity/document')
if r.status_code == 200:
j = json.loads(r.text)
self.region = str(j['region'])
return self.region
def loadEndpoints(self):
# Load the configuration values from file
f = open('config.txt')
lines = f.readlines()
f.close()
for line in lines:
if line.startswith('Maestro_api_gateway '):
self.maestroAPIGateway = line.split(' ')[1].strip()
logging.info('JobRunner GET endpoint set to ' + self.maestroAPIGateway)
elif line.startswith('Myriad_AWS '):
self.myriadJobsFolderOnAWS = line.split(' ')[1].strip()
logging.info('Myriad AWS endpoint set to ' + self.myriadJobsFolderOnAWS)
def getJob(self, jobGroup=None, jobCategory=None):
logging.info("Requesting a new job from " + str(self.maestroAPIGateway))
if jobGroup != None and jobCategory != None:
logging.info("Job group set to " + str(jobGroup))
logging.info("Job category set to " + str(jobCategory))
p = {"jobGroup": jobGroup, "jobCategory": jobCategory}
r = requests.get(self.maestroAPIGateway, params=p)
elif jobGroup != None and jobCategory == None:
logging.info("Job group set to " + str(jobGroup))
p = {"jobGroup": jobGroup}
r = requests.get(self.maestroAPIGateway, params=p)
elif jobGroup == None and jobCategory != None:
logging.info("Job category set to " + str(jobCategory))
p = {"jobCategory": jobCategory}
r = requests.get(self.maestroAPIGateway, params=p)
else:
logging.info("No job group or sub group specified")
r = requests.get(self.maestroAPIGateway)
# Check for good HTTP response
if r.status_code == 200:
logging.info("*** Begin get job response ***")
logging.info(r.text)
logging.info("*** End get job response ***")
# Check for logical error in response
if not "errorMessage" in r.text:
logging.info("Good response:\n" + str(r.text))
return self.parseJob(r.text)
else:
# logic error
logging.warn("Error from web service:\n" + str(r.text))
return ResultCode.failure
else:
# HTTP error
logging.warn("HTTP error: " + str(r.status_code))
return ResultCode.failure
def parseJob(self, job):
# The response should look something like this...
# {
# "JobID": "12345",
# "JobGroup": "NS2",
# "JobCategory": "5Z",
# "JobName": "NS2-5Z-1",
# "JobDefinition": {"Displacements":"-1,-1,-2"},
# "Created": "2016-07-17 15:26:45"
# }
logging.info("Parsing job")
self.parsedJob = json.loads(job)
self.jobID = self.parsedJob['JobID']
self.executionID = self.parsedJob['ExecutionID']
self.jobGroup = self.parsedJob['JobGroup']
self.jobCategory = self.parsedJob['JobCategory']
self.jobName = self.parsedJob['JobName']
self.displacements = self.parsedJob['JobDefinition']['Displacements']
return ResultCode.success
def getJobSupportFiles(self):
result = ResultCode.success
# download job-specific script(s) to the parent folder
url = self.myriadJobsFolderOnAWS + "/" + self.jobGroup + "/jobConfig.py"
logging.info("Retrieving job config from " + url)
r = requests.get(url)
# Check for web errors (404, 500, etc.)
if "<html>" in r.text:
logging.warn("Bad jobConfig.py")
result = ResultCode.failure
# logging.info(r.text)
f = open("jobConfig.py", "w")
f.write(r.text)
f.flush()
f.close()
return result
def getSystemSpecs(self):
self.cpus = psutil.cpu_count()
logging.info('Number of cores set to ' + str(self.cpus))
cpus = self.readTag('cpus')
if cpus != None:
logging.info('Overriding number of cores to: ' + str(cpus))
self.cpus = int(cpus)
if self.cpuOverride != None:
logging.info('Error-based override of number of cores to: ' + str(self.cpuOverride))
self.cpus = self.cpuOverride
os.environ["OMP_NUM_THREADS"] = str(self.cpus)
os.environ["MKL_NUM_THREADS"] = str(self.cpus)
self.mem = psutil.virtual_memory().available
logging.info('Bytes of available memory ' + str(self.mem))
def recordDiskUsage(self):
myoutput = open('diskspace.out', 'w')
df = subprocess.Popen("df", stdout=myoutput)
myoutput.flush()
myoutput.close()
def shutdownMyriad(self):
if os.path.isfile('../shutdown.myriad'):
logging.info('shutdownMyriad() found shutdown file. Returning True')
return True
r = requests.get('http://169.254.169.254/latest/meta-data/spot/termination-time')
if r.status_code == 200:
if re.search('.*T.*Z', r.text):
logging.info('shutdownMyriad() determined that AWS is terminating this spot instance. Returning True')
f = open('../shutdown.myriad', 'w')
f.write(' ')
f.flush()
f.close()
return True
return False
def runPsi4(self):
self.jobStarted = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
result = ResultCode.success
myoutput = open('psi4.out', 'w')
myerror = open('psi4.err', 'w')
exitcode = 0
try:
self.postJobStatus(True, "Started")
p = subprocess.Popen("psi4", stdout=myoutput, stderr=myerror)
waiting = True
waitCounter = 0
shutdown = False
while waiting:
try:
exitcode = p.wait(5)
logging.info("Call to p.wait() completed")
waiting = False
except subprocess.TimeoutExpired:
waiting = True
waitCounter = waitCounter + 1
if self.shutdownMyriad():
p.kill()
self.postJobStatus(True, "Terminated")
exitcode = 1
shutdown = True
waiting = False
else:
if waitCounter == 60:
waitCounter = 0
self.postJobStatus(True, "Running")
logging.info("psi4 exited with exit code of " + str(exitcode))
if exitcode == 0:
result = ResultCode.success
else:
if shutdown:
logging.info('Setting result code to ResultCode.shutdown')
result = ResultCode.shutdown
else:
result = ResultCode.failure
except RuntimeError as e:
self.postJobStatus(False, str(e))
result = ResultCode.failure
finally:
myoutput.flush()
myerror.flush()
myoutput.close()
myerror.close()
self.recordDiskUsage()
return result
def uploadResults(self):
logging.info("Extracting results from output.dat")
f = open("output.dat", "r")
lines = f.readlines()
energy = None
for line in reversed(lines):
if "CURRENT ENERGY" in line:
energy = line.split(">")
energy = energy[1].strip()
break
f.close()
logging.info("Energy = " + str(energy))
if energy == None:
logging.warn("No energy found")
return ResultCode.failure
logging.info("Posting results to the web service at " + str(self.maestroAPIGateway))
n = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
j = { "JobID" : self.jobID, "Started" : self.jobStarted, "Completed" : n, "JobResults" : energy, "job" : self.parsedJob }
logging.info("Job results encoded as: " + str(j))
r = requests.post(self.maestroAPIGateway, json=j)
# Check for good HTTP response
if r.status_code == 200:
# Check for logical error in response
if not "errorMessage" in r.text:
logging.info("Good response:\n" + str(r.text))
else:
# logic error
logging.warn("Error from web service:\n" + str(r.text))
return ResultCode.failure
else:
# HTTP error
logging.warn("HTTP error: " + str(r.status_code))
return ResultCode.failure
def clearScratch(self):
logging.info("Clearing the scratch folder. Some errors are normal.")
folder = os.environ['PSI_SCRATCH']
for the_file in os.listdir(folder):
file_path = os.path.join(folder, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
logging.warn(e)
logging.info("Finished clearing the scratch folder.")
def makeJobFolder(self):
self.jobFolder = self.jobName + "_" + datetime.datetime.now().strftime("%Y%m%d_%H%M%S_"+str(self.jobID))
os.mkdir(self.jobFolder)
os.chdir(self.jobFolder)
def closeJobFolder(self):
os.chdir("..")
def makeInputDat(self):
# Adjust memory value in input.dat
logging.info("Calculating memory value for input.dat...")
adjustedMem = int(((self.mem * self.memAdjust) / self.cpus)/1000000)
adjustedMem = max(adjustedMem, self.memMax)
newmem = "memory " + str(adjustedMem) + " MB"
# Creates the input.dat file in the job folder
from jobConfig import JobConfig
self.jobConfig = JobConfig()
intder = self.jobConfig.intderIn(self.displacements)
if intder != None:
f = open('intder.in', 'w')
f.write(intder)
f.flush()
f.close()
# Run Intder2005 to produce the geometries
logging.info("Running Intder2005...")
myinput = open('intder.in')
myoutput = open('intder.out', 'w')
p = subprocess.Popen("Intder2005.x", stdin=myinput, stdout=myoutput)
p.wait()
myoutput.flush()
myoutput.close()
logging.info("Finished running Intder2005...")
# Read the intder output and produce an input.dat file from the geometries
logging.info("Reading file07...")
f = open('file07')
file07 = f.readlines()
f.close
else:
file07 = None
if len(self.errors) > 0:
inputdat = self.jobConfig.inputDat(newmem, self.jobCategory, file07, self.errors[-1])
self.cpuOverride = self.jobConfig.checkThreads(self.errors[-1])
else:
inputdat = self.jobConfig.inputDat(newmem, self.jobCategory, file07)
self.cpuOverride = None
# Write input.dat contents to file
f=open('input.dat', 'w')
f.write(inputdat)
# Append print_variables() call as a preventive measure, since that is
# where we get the final energy value.
f.write("\nprint_variables()\n")
f.flush()
f.close()
logging.info("File input.dat written to disk.")
def postJobStatus(self, status, message=None):
n = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
logging.info("Posting job status to " + str(self.maestroAPIGateway))
if status == True:
statusStr = "Success"
else:
statusStr = "Failure"
if message == None:
j = { "ExecutionID" : self.executionID, "LastUpdate":n, "Status":statusStr, "job" : self.parsedJob }
else:
j = { "JobID" : self.jobID, "LastUpdate":n, "Status":statusStr, "Message":message, "job" : self.parsedJob }
logging.info("Job status encoded as: " + str(j))
try:
r = requests.put(self.maestroAPIGateway, json=j)
except:
logging.warn("Error posting status. Ignoring.")
# If there's a failed tagging command in the queue, pop it and run it
if len(self.cmdBacklog) > 0:
command = self.cmdBacklog.pop()
self.runCommand(command)
def zipJobFolder(self, result):
# Get IP address
f = open('ip.txt')
self.ip = f.readline().strip()
f.close()
if self.ip == None:
self.ip = ""
try:
logging.info("Compressing job folder...")
if result == ResultCode.success:
zipname = self.jobFolder + "_ip_" + self.ip + "_success.zip"
elif result == ResultCode.failure:
zipname = self.jobFolder + "_ip_" + self.ip + "_failure.zip"
myZipFile = zipfile.ZipFile(zipname, "w" )
listing = os.listdir(self.jobFolder)
for f in listing:
myZipFile.write(self.jobFolder + "/" + f)
# grab a copy of the Myriad log file, too
myZipFile.write("logs/myriad.log")
myZipFile.close()
logging.info("Job folder compressed. Removing original...")
shutil.rmtree(self.jobFolder)
logging.info("Done removing original job folder")
except Exception as e:
logging.warn("Error compressing job folder: " + str(e))
def readTag(self, key):
# aws ec2 describe-tags --filters "Name=resource-id,Values=i-1234567890abcdef8" "Name=key,Values=threads"
# 'Key="ExecutionID",Value="3bd99202-5d7f-49c2-a350-f1fdf2235ad3"'
command = 'aws ec2 describe-tags --region '+self.region+' --filters "Name=resource-id,Values=' + str(self.instanceID) + '" "Name=key,Values='+str(key)+'"'
logging.info(command)
proc = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = proc.communicate()
if out:
logging.info("readTag() subprocess.Popen stdout...")
logging.info(out)
tag = json.loads(str(out, "utf-8"))
if 'Tags' in tag and len(tag['Tags']) > 0 and 'Value' in tag['Tags'][0]:
return str(tag['Tags'][0]['Value'])
if err:
logging.warn("readTag() subprocess.Popen stderr...")
logging.warn(err)
return None
def doModifyTag(self, action, key, value):
# aws ec2 delete-tags --resources ami-78a54011 --region us-east-1 --tags Key=Stack
# aws ec2 create-tags --resources ami-78a54011 --region us-east-1 --tags Key=Stack,Value=foo
# 'Key="ExecutionID",Value="3bd99202-5d7f-49c2-a350-f1fdf2235ad3"'
command = "aws ec2 " + action + " --resources " + str(self.instanceID) + " --region " + str(self.region) + " --tags 'Key="+str(key)
if value != None:
command += ',Value="' + str(value) + '"'
command += "'"
self.runCommand(command)
def runCommand(self, command):
logging.info("Invoking " + str(command))
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = process.communicate()
if out:
logging.info("doModifyTag() subprocess.Popen stdout...")
logging.info(out)
if err:
logging.warn("doModifyTag() subprocess.Popen stderr...")
logging.warn(err)
logging.info("doModifyTag() subprocess.Popen returncode...")
logging.info(process.returncode)
# If we get back a RequestLimitExceeded error make a note to try again later...
if process.returncode == 255 and "RequestLimitExceeded" in str(err):
self.cmdBacklog.append(command)
def tagInstance(self):
self.downloadCredentials()
self.doModifyTag("create-tags", "Name", self.jobName)
self.doModifyTag("create-tags", "ExecutionID", self.executionID)
self.doModifyTag("create-tags", "JobID", self.jobID)
self.doModifyTag("create-tags", "StartTime", datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
self.doModifyTag("create-tags", "Displacements", self.displacements)
#self.displacements
def untagInstance(self):
self.downloadCredentials()
self.doModifyTag("create-tags", "Name", "Waiting")
self.doModifyTag("delete-tags", "ExecutionID", None)
self.doModifyTag("delete-tags", "JobID", None)
self.doModifyTag("delete-tags", "StartTime", None)
self.doModifyTag("delete-tags", "Displacements", None)
def downloadCredentials(self):
logging.info("Retrieving credentials...")
r = requests.get("http://169.254.169.254/latest/meta-data/iam/security-credentials/S3FullAccess")
if r.status_code == 200:
j = json.loads(r.text)
os.environ["AWS_ACCESS_KEY_ID"] = str(j['AccessKeyId'])
os.environ["AWS_SECRET_ACCESS_KEY"] = str(j['SecretAccessKey'])
os.environ["AWS_SECURITY_TOKEN"] = str(j['Token'])
logging.info("Credentials exported to environment variables")
else:
logging.warn("Failed to retrieve credentials")
# Main
def runOnce(self, jobGroup=None, jobCategory=None, error=None):
logging.info("Myriad.runOnce invoked...")
logging.info("Job group = " + str(jobGroup))
logging.info("Job sub group = " + str(jobCategory))
logging.info("Error = " + str(error))
self.jobGroup = jobGroup
self.jobCategory = jobCategory
# if we have seen this error before, bail out.
# We couldn't fix it the first time. Why should this time be any different?
if error != None and error in self.errors:
self.postJobStatus(False, "Error repeated: " + str(error))
return ResultCode.failure
# add the error condition to the stack of prior errors if we've never seen it before
if error != None:
self.errors.append(error)
# load the endpoints for web service calls and get ami-id for this machine
self.loadEndpoints()
self.ami = self.getAmi()
self.instanceID = self.getInstanceID()
self.region = self.getRegion()
# if no error, get a new job.
# if there is an error code, we're going to re-run the job we have
if error == None:
result = self.getJob(self.jobGroup, self.jobCategory)
else:
logging.info("Running current job again to correct for errors")
result = ResultCode.success
if result == ResultCode.success:
newerror = None
result = self.getJobSupportFiles()
if result == ResultCode.success:
self.getSystemSpecs()
self.clearScratch()
self.makeJobFolder()
self.makeInputDat()
self.tagInstance()
result = self.runPsi4()
if result == ResultCode.success:
logging.info("runPsi4() returned success code")
while self.uploadResults() == ResultCode.failure:
logging.info("Failure uploading results. Retrying in 60 seconds...")
time.sleep(60)
else:
if result != ResultCode.shutdown:
# Check for known error situations in output.dat
logging.warn("runPsi4() returned failure code. Checking for known errors")
newerror = self.jobConfig.checkError()
self.postJobStatus(False, "PSI4 error: " + str(newerror))
logging.info("CheckError() result: " + str(newerror))
self.closeJobFolder()
if result != ResultCode.shutdown:
self.zipJobFolder(result)
self.clearScratch()
# if we encounter a known error, try the job again and compensate
if newerror != None:
logging.info("Re-executing job due to known error: " + str(newerror))
result = self.runOnce(self.jobGroup, self.jobCategory, newerror)
else:
logging.warn("Error retrieving support files")
else:
result = ResultCode.noaction
self.untagInstance()
return result
|
russellthackston/comp-chem-util
|
myriad/v1/myriad.py
|
Python
|
mit
| 18,545
|
[
"Psi4"
] |
7d50344a52e763d5f2755fbee890f328db1d40dbbde05c3a90b2bb78a98baa47
|
# -*- coding: utf-8 -*-
from distutils.core import setup
setup(name='abouttag',
version='1.3.1',
description='Normalizes about tags for Fluidinfo',
author='Nicholas J. Radcliffe',
author_email='njr@StochasticSolutions.com',
packages=['abouttag'],
url="http://github.com/njr0/abouttag/",
download_url="https://github.com/njr0/abouttag",
keywords=[
'fluidinfo',
'about',
'tag',
'normalization',
'canonicalization',
'standardarization'
],
classifiers = [
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: MIT License',
'Environment :: Other Environment',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Internet',
],
install_requires = ['urlnorm>=1.1.2'],
long_description_content_type="text/markdown",
long_description = '''\
This package provides functions for generating about tags for
Fluidinfo following various conventions.
Fluidinfo is a hosted, online database based on the notion of tagging.
For more information on FluidDB, visit http://fluidinfo.com.
For more information on the ideas that motivated this module,
see posts at http://abouttag.blogspot.com. A good place to
start is
http://abouttag.blogspot.com/2010/03/about-tag-conventions-in-fluiddb.html
EXAMPLE
Examples of usage are provided in the examples directory.
Some simple examples are:
from abouttag.books import book
from abouttag.music import album, artist, track
from abouttag.film import film, movie
print book(u"One Hundred Years of Solitude", u'Gabriel García Márquez')
print book(u'The Feynman Lectures on Physics',
u'Richard P. Feynman', u'Robert B. Leighton',
u'Matthew Sands')
print track(u'Bamboulé', u'Bensusan and Malherbe')
print album(u"Solilaï", u'Pierre Bensusan')
print artist(u"Crosby, Stills, Nash & Young")
print film(u"Citizen Kane", u'1941')
print movie(u"L'Âge d'Or", u'1930')
INSTALLATION
pip install -U abouttag
DEPENDENCIES
urlnorm
'''
)
|
njr0/abouttag
|
setup.py
|
Python
|
mit
| 2,321
|
[
"VisIt"
] |
9cfd29e91ae8fc306f76cfb02d7b3c84e1ef16702625faf6e0803309ce410114
|
import random
import copy
from itertools import repeat
from collections import Sequence
# Mutator with "rubber-like" behaviour. Doesn't go out of bounds, moves in a gaussian way.
# 3.3.1 Local Gaussian Mutation in report.
def mutRubber(individual, sigma, border, indpb, lower_bound=0):
size = len(individual)
if not isinstance(sigma, Sequence):
sigma = repeat(sigma, size)
elif len(sigma) < size:
raise IndexError("sigma must be at least the size of individual: %d < %d" % (len(sigma), size))
for i, s in zip(xrange(size), sigma):
if random.random() < indpb:
if (lower_bound != 0):
if (i % 2 == 0):
other_coord = i + 1
else:
other_coord = i - 1
if individual[other_coord] < lower_bound:
individual[i] = int(round(min(border, max(lower_bound, random.gauss(individual[i], s)))))
else:
individual[i] = int(round(min(border, max(0, random.gauss(individual[i], s)))))
else:
individual[i] = int(round(min(border, max(0, random.gauss(individual[i], s)))))
return individual,
|
eastlund/evolutionary_square_packing
|
mutation.py
|
Python
|
mit
| 1,227
|
[
"Gaussian"
] |
1cff31701929e193162406917d20a4c6884d79ba337e242a4df0553034ef9673
|
import random
import time
import itertools
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
from keras.preprocessing import sequence
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout, LSTM, Input, Lambda, TimeDistributed, Merge, concatenate, add
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.layers.normalization import BatchNormalization
from keras.models import Model
from keras.layers.wrappers import Bidirectional
from keras import backend as K
from keras.optimizers import RMSprop, Adam, Nadam
import keras.callbacks
from keras.models import load_model
from keras.models import model_from_json
from keras.layers.noise import GaussianNoise
from keras.regularizers import l1,l2
from keras.constraints import maxnorm
from keras import layers
from keras.initializers import RandomUniform
minibatch_size = 2
val_split = 0.2
maxlen = 1900
nb_classes = 22
nb_epoch = 500
numfeats_speech = 39
numfeats_skeletal = 20
stamp = 'early_multimodal'
#====================================================== DATA GENERATOR =================================================================================
# Data generator that will provide training and testing with data. Works with mini batches of audio feat files.
# The data generator is called using the next_train() and next_val() methods.
# Class constructor to initialize the datagenerator object.
class DataGenerator(keras.callbacks.Callback):
def __init__(self,
minibatch_size,
numfeats_skeletal,
numfeats_speech,
maxlen,val_split,
nb_classes,
absolute_max_sequence_len=28):
# Currently is only 2 files per batch.
self.minibatch_size = minibatch_size
# Maximum length of data sequence.
self.maxlen = maxlen
# 39 mel frequency feats.
self.numfeats_speech = numfeats_speech
# 22 skeletal feats.
self.numfeats_skeletal = numfeats_skeletal
# Size of the validation set.
self.val_split = val_split
# Max number of labels per label sequence.
self.absolute_max_sequence_len = absolute_max_sequence_len
# INdexing variables
self.train_index = 0
self.val_index = 0
# Actually 1-20 classes and 21 is the blank label.
self.nb_classes = nb_classes
# Blank model to use.
self.blank_label = np.array([self.nb_classes - 1])
self.load_dataset()
# Loads and preprocesses the dataset and splits it into training and validation set.
# This runs at initiallization.
def load_dataset(self):
print 'Loading data...'
# The input files.
in_file_audio = '/home/alex/Documents/Python/multimodal_gesture_recognition/speech_blstm/Training_set_audio_labeled.csv'
in_file_skeletal = '../skeletal_network/Training_set_skeletal.csv'
# Read the inputs.
self.df_a = pd.read_csv(in_file_audio,
header=None)
self.df_s = pd.read_csv(in_file_skeletal)
# Zero mean and unity variance normalization.
self.df_a = self.normalize_data('audio')
self.df_s = self.normalize_data('skeletal')
# Create and shuffle file list. (The same for both skeletal and audio)
file_list = self.df_a[39].unique().tolist()
random.seed(10)
random.shuffle(file_list)
# SPlit to training and validation set.
split_point = int(len(file_list) * (1 - self.val_split))
self.train_list, self.val_list = file_list[:split_point], file_list[split_point:]
self.train_size = len(self.train_list)
self.val_size = len(self.val_list)
#=====================================================================================================================================================
#Make sure that train and validation lists have an even length to avoid mini-batches of size 1
train_mod_by_batch_size = self.train_size % self.minibatch_size
if train_mod_by_batch_size != 0:
del self.train_list[-train_mod_by_batch_size:]
self.train_size -= train_mod_by_batch_size
val_mod_by_batch_size = self.val_size % self.minibatch_size
if val_mod_by_batch_size != 0:
del self.val_list[-val_mod_by_batch_size:]
self.val_size -= val_mod_by_batch_size
#=====================================================================================================================================================
# Return sizes.
# Called in order to get the training and validation set sizes.
def get_size(self,train):
if train:
return self.train_size
else:
return self.val_size
# Normalize the data to have zero mean and unity variance.
# Called at initiallization.
def normalize_data(self,stream):
# Normalize audio.
if stream == 'audio':
data = self.df_a.drop([39,40], axis=1).as_matrix().astype(float)
norm_data = preprocessing.scale(data)
norm_df = pd.DataFrame(norm_data)
norm_df[39] = self.df_a[39]
norm_df[40] = self.df_a[40]
# Normalize skeletal.
elif stream == 'skeletal':
data = self.df_s[['lh_v','rh_v','le_v','re_v','lh_dist_rp',
'rh_dist_rp','lh_hip_d','rh_hip_d','le_hip_d','re_hip_d',
'lh_shc_d','rh_shc_d','le_shc_d','re_shc_d','lh_hip_ang',
'rh_hip_ang','lh_shc_ang','rh_shc_ang','lh_el_ang','rh_el_ang']].as_matrix().astype(float)
norm_data = preprocessing.scale(data)
norm_df = pd.DataFrame(norm_data,
columns=['lh_v','rh_v','le_v','re_v','lh_dist_rp',
'rh_dist_rp','lh_hip_d','rh_hip_d','le_hip_d','re_hip_d',
'lh_shc_d','rh_shc_d','le_shc_d','re_shc_d','lh_hip_ang',
'rh_hip_ang','lh_shc_ang','rh_shc_ang','lh_el_ang',
'rh_el_ang'])
norm_df['file_number'] = self.df_s['file_number']
return norm_df
# each time a batch (list of file ids) is requested from train/val/test
# Basic function that returns a batch of training or validation data.
# Returns a dictionary with the required fields for CTC training and a dummy output variable.
def get_batch(self, train):
# number of files in batch is 2
# Select train or validation mode.
if train:
file_list = self.train_list
index = self.train_index
else:
file_list = self.val_list
index = self.val_index
# Get the batch.
try:
batch = file_list[index:(index + self.minibatch_size)]
except:
batch = file_list[index:]
size = len(batch)
# INitialize the variables to be returned.
X_data_a = np.ones([size, self.maxlen, self.numfeats_speech])
labels_a = np.ones([size, self.absolute_max_sequence_len])
input_length_a = np.zeros([size, 1])
label_length_a = np.zeros([size, 1])
X_data_s = np.ones([size, self.maxlen, self.numfeats_skeletal])
labels_s = np.ones([size, self.absolute_max_sequence_len])
input_length_s = np.zeros([size, 1])
label_length_s = np.zeros([size, 1])
# Read batch.
for i in range(len(batch)):
file = batch[i]
vf_a = self.df_a[self.df_a[39] == file]
# Downsample by 5 the audio.
vf_a = vf_a.iloc[::5, :].reset_index(drop=True)
vf_s = self.df_s[self.df_s['file_number'] == file]
# SElect and pad data sequence to max length.
# Audio
gest_seq_a = vf_a.drop([39,40], axis=1).as_matrix().astype(float)
gest_seq_a = sequence.pad_sequences([gest_seq_a],
maxlen=self.maxlen,
padding='post',
truncating='post',
dtype='float32')
# Skeletal
gest_seq_s = vf_s[['lh_v','rh_v','le_v','re_v','lh_dist_rp',
'rh_dist_rp','lh_hip_d','rh_hip_d','le_hip_d','re_hip_d',
'lh_shc_d','rh_shc_d','le_shc_d','re_shc_d','lh_hip_ang',
'rh_hip_ang','lh_shc_ang','rh_shc_ang','lh_el_ang','rh_el_ang']].as_matrix().astype(float)
gest_seq_s = sequence.pad_sequences([gest_seq_s],
maxlen=self.maxlen,
padding='post',
truncating='post',
dtype='float32')
# Create the label vector. Ignores the blanks.(Same for skeletal and audio)
lab_seq = vf_a[vf_a[40] != 0][40].unique().astype('float32')
index = np.argwhere(lab_seq==0)
lab_seq = np.delete(lab_seq, index)
# If a sequence is not found insert a blank example and pad.
if lab_seq.shape[0] == 0:
lab_seq = sequence.pad_sequences([self.blank_label],
maxlen=(self.absolute_max_sequence_len),
padding='post',
value=-1)
labels_s[i, :] = lab_seq
labels_a[i, :] = lab_seq
label_length_s[i] = 1
label_length_a[i] = 1
# Else use the save the returned variables.
else:
X_data_a[i, :, :] = gest_seq_a
X_data_s[i, :, :] = gest_seq_s
label_length_a[i] = lab_seq.shape[0]
label_length_s[i] = lab_seq.shape[0]
lab_seq = sequence.pad_sequences([lab_seq],
maxlen=(self.absolute_max_sequence_len),
padding='post',
value=-1)
labels_a[i, :] = lab_seq
labels_s[i, :] = lab_seq
input_length_a[i] = (X_data_a[i].shape[0] - 2)
input_length_s[i] = (X_data_s[i].shape[0] - 2)
# Returned values: a dictionary with 4 values
# the_input_audio: audio data sequence
# the_input_skeletal: skeletal data sequence
# the labels: label sequence
# input_length: length of data sequence
# label_length: length of label sequence
# an array of zeros
# outputs: dummy vector of zeros required for keras training
inputs = {'the_input_audio': X_data_a,
'the_input_skeletal': X_data_s,
'the_labels': labels_a,
'input_length': input_length_a,
'label_length': label_length_a,
}
outputs = {'ctc': np.zeros([size])} # dummy data for dummy loss function
return (inputs, outputs)
# Get the next training batch and update index.
# Called by the generator during training.
def next_train(self):
while 1:
ret = self.get_batch(train=True)
self.train_index += self.minibatch_size
if self.train_index >= self.train_size:
self.train_index = 0
yield ret
# Get the next validation batch and update index.
# Called by the generator during validation.
def next_val(self):
while 1:
ret = self.get_batch(train=False)
self.val_index += self.minibatch_size
if self.val_index >= self.val_size:
self.val_index = 0
yield ret
# Save model and weights on epochs end.
# Callback at the end of each epoch.
def on_epoch_end(self, epoch, logs={}):
self.train_index = 0
self.val_index = 0
random.shuffle(self.train_list)
random.shuffle(self.val_list)
#============================================================== CTC LOSS ==============================================================================
# the actual loss calc occurs here despite it not being
# an internal Keras loss function
def ctc_lambda_func(args):
y_pred, labels, input_length, label_length = args
# the 2 is critical here since the first couple outputs of the RNN
# tend to be garbage:
y_pred = y_pred[:, 2:, :]
ctc_batch_loss = K.ctc_batch_cost(labels, y_pred, input_length, label_length)
return ctc_batch_loss
def build_net():
uni_initializer = RandomUniform(minval=-0.05,
maxval=0.05,
seed=47)
# Shape of the network inputs.
input_shape_a = (maxlen, numfeats_speech)
input_shape_s = (maxlen, numfeats_skeletal)
# Input layers for the audio and skeletal.
input_data_a = Input(name='the_input_audio',
shape=input_shape_a,
dtype='float32')
input_data_s = Input(name='the_input_skeletal',
shape=input_shape_s,
dtype='float32')
# Add zero-centered gaussian noise to the input. (For better regularization)
input_noise_a = GaussianNoise(stddev=0.5,
name='gaussian_noise_a')(input_data_a)
input_noise_s = GaussianNoise(stddev=0.5,
name='gaussian_noise_s')(input_data_s)
concat = concatenate([input_noise_a, input_noise_s], axis=2)
# Block 3
lstm_1 = Bidirectional(LSTM(500,
name='blstm_1',
activation='tanh',
recurrent_activation='hard_sigmoid',
recurrent_dropout=0.0,
dropout=0.4,
kernel_constraint=maxnorm(3),
kernel_initializer=uni_initializer,
return_sequences=True),
merge_mode='concat')(concat)
lstm_2 = Bidirectional(LSTM(500,
name='blstm_2',
activation='tanh',
recurrent_activation='hard_sigmoid',
recurrent_dropout=0.0,
dropout=0.4,
kernel_constraint=maxnorm(3),
kernel_initializer=uni_initializer,
return_sequences=True),
merge_mode='concat')(lstm_1)
res_block = add([lstm_1, lstm_2])
dropout_1 = Dropout(0.4,
name='dropout_layer_1')(res_block)
# Softmax output layer
# Block 4
inner = Dense(nb_classes,
name='dense_1',
kernel_initializer=uni_initializer)(dropout_1)
y_pred = Activation('softmax',
name='softmax')(inner)
Model(input=[input_data_a,input_data_s],
output=y_pred).summary()
# These are also inputes needed for the CTC loss
labels = Input(name='the_labels',
shape=[data_gen.absolute_max_sequence_len],
dtype='float32')
input_length = Input(name='input_length',
shape=[1],
dtype='int64')
label_length = Input(name='label_length',
shape=[1],
dtype='int64')
# Keras doesn't currently support loss funcs with extra parameters
# so CTC loss is implemented in a lambda layer
loss_out = Lambda(ctc_lambda_func,
output_shape=(1,),
name="ctc")([y_pred, labels, input_length, label_length])
# The complete model with the CTC loss.
model = Model(input=[input_data_a,input_data_s, labels, input_length, label_length],
output=[loss_out])
# Optimizer.
adam = Adam(lr=0.0001,
clipvalue=0.5,
decay=1e-5)
# the loss calc occurs elsewhere, so use a dummy lambda func for the loss
model.compile(loss={'ctc': lambda y_true, y_pred: y_pred},
optimizer=adam)
model_json = model.to_json()
with open(stamp + ".json", "w") as json_file:
json_file.write(model_json)
return model
def load_model():
json_file = open(stamp + '.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
model = model_from_json(loaded_model_json)
# load weights into new model
model.load_weights(stamp + '.h5')
adam = Adam(lr=0.0001,
clipvalue=0.5)
print("Loaded model from disk")
y_pred = model.get_layer('softmax').output
model.compile(loss={'ctc': lambda y_true, y_pred: y_pred},
optimizer=adam)
return model
# ====================================================== MAIN ==========================================================================================
K.set_learning_phase(1) # all new operations will be in train mode from now on
# Can load a previous model to resume training. 'yes'
load_previous = raw_input('Type yes/no if you want to load previous model: ')
# Create data generator object.
data_gen = DataGenerator(minibatch_size=minibatch_size,
numfeats_skeletal=numfeats_skeletal,
numfeats_speech=numfeats_speech,
maxlen=maxlen,
val_split=val_split,
nb_classes=nb_classes)
# Resume training.
if load_previous == 'yes':
model = load_model()
else:
model = build_net()
# Early stopping to avoid overfitting
earlystopping = EarlyStopping(monitor='val_loss',
patience=20,
verbose=1)
# Checkpoint to save the weights with the best validation accuracy.
filepath= stamp + ".h5"
checkpoint = ModelCheckpoint(filepath,
monitor='val_loss',
verbose=1,
save_best_only=True,
save_weights_only=True,
mode='auto')
# ====================================================== TRAIN =========================================================================================
print 'Start training.'
start_time = time.time()
model.fit_generator(generator=data_gen.next_train(),
steps_per_epoch=(data_gen.get_size(train=True)/minibatch_size),
epochs=nb_epoch,
validation_data=data_gen.next_val(),
validation_steps=(data_gen.get_size(train=False)/minibatch_size),
callbacks=[earlystopping, checkpoint, data_gen])
#================================================================================================================================
end_time = time.time()
print "--- Training time: %s seconds ---" % (end_time - start_time)
|
AlexGidiotis/Multimodal-Gesture-Recognition-with-LSTMs-and-CTC
|
early_fusion/early_multimodal.py
|
Python
|
mit
| 15,549
|
[
"Gaussian"
] |
2f95f5e48a5094b48d384f4e97eac874fbe28e4889a7335191572b63ba1c1e11
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Module implementing classes and functions to use Zeo++.
Zeo++ Installation Steps:
========================
1) Zeo++ requires Voro++. Download Voro++ from code.lbl.gov using
subversion:
"svn checkout --username anonsvn https://code.lbl.gov/svn/voro/trunk
Password is anonsvn.
2) Stable version of Zeo++ can be obtained from
http://www.maciejharanczyk.info/Zeopp/
Alternatively it can be obtained from code.lbl.gov. Replace voro
with zeo.
3) (Optional) Install cython from pip
Mac OS X:
4) (a) Edit the Voro++/voro/trunk/config.mk file to suit your environment
(compiler, linker).
(b) Run make command
5) (a) Edit the Zeo++/trunk/cython_wrapper/setup.py to correctly point to
Voro++ directory.
(b) Run "python setup.py develop" to install Zeo++ python bindings.
Be patient, it will take a while.
Linux:
4) (a) Edit the Voro++/voro/trunk/config.mk file to suit your environment.
(b) Also add -fPIC option to CFLAGS variable in config.mk file.
(c) Run make command
5) (a) Go to Zeo++/zeo/trunk folder and compile zeo++ library using the
command "make dylib".
(b) Edit the Zeo++/trunk/cython_wrapper/setup_alt.py to correctly
point to Voro++ directory.
(c) Run "python setup_alt.py develop" to install Zeo++ python bindings.
Zeo++ Post-Installation Checking:
==============================
1) Go to pymatgen/io/tests and run "python test_zeoio.py"
If Zeo++ python bindings are properly installed, the tests should
pass. One or two tests will be skipped.
b) Go to pymatgen/analysis/defects/tests and run
"python test_point_defects.py". Lots of tests will be skipped if GULP
is not installed. But there should be no errors.
"""
import os
import re
from monty.io import zopen
from monty.dev import requires
from monty.tempfile import ScratchDir
from pymatgen.core.structure import Structure, Molecule
from pymatgen.core.lattice import Lattice
from pymatgen.io.cssr import Cssr
from pymatgen.io.xyz import XYZ
try:
from zeo.netstorage import AtomNetwork
from zeo.area_volume import volume, surface_area
from zeo.cluster import prune_voronoi_network_close_node
zeo_found = True
except ImportError:
zeo_found = False
__author__ = "Bharat Medasani"
__copyright = "Copyright 2013, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Bharat Medasani"
__email__ = "mbkumar@gmail.com"
__data__ = "Aug 2, 2013"
class ZeoCssr(Cssr):
"""
ZeoCssr adds extra fields to CSSR sites to conform with Zeo++
input CSSR format. The coordinate system is rorated from xyz to zyx.
This change aligns the pivot axis of pymatgen (z-axis) to pivot axis
of Zeo++ (x-axis) for structurural modifications.
"""
def __init__(self, structure):
"""
Args:
structure: A structure to create ZeoCssr object
"""
super().__init__(structure)
def __str__(self):
"""
CSSR.__str__ method is modified to padd 0's to the CSSR site data.
The padding is to conform with the CSSR format supported Zeo++.
The oxidation state is stripped from site.specie
Also coordinate system is rotated from xyz to zxy
"""
output = [
"{:.4f} {:.4f} {:.4f}".format(self.structure.lattice.c, self.structure.lattice.a, self.structure.lattice.b),
"{:.2f} {:.2f} {:.2f} SPGR = 1 P 1 OPT = 1".format(self.structure.lattice.gamma,
self.structure.lattice.alpha,
self.structure.lattice.beta),
"{} 0".format(len(self.structure)),
"0 {}".format(self.structure.formula)
]
for i, site in enumerate(self.structure.sites):
# if not hasattr(site, 'charge'):
# charge = 0
# else:
# charge = site.charge
charge = site.charge if hasattr(site, 'charge') else 0
# specie = site.specie.symbol
specie = site.species_string
output.append(
"{} {} {:.4f} {:.4f} {:.4f} 0 0 0 0 0 0 0 0 {:.4f}".format(
i + 1, specie, site.c, site.a, site.b, charge
)
)
return "\n".join(output)
@staticmethod
def from_string(string):
"""
Reads a string representation to a ZeoCssr object.
Args:
string: A string representation of a ZeoCSSR.
Returns:
ZeoCssr object.
"""
lines = string.split("\n")
toks = lines[0].split()
lengths = [float(i) for i in toks]
toks = lines[1].split()
angles = [float(i) for i in toks[0:3]]
# Zeo++ takes x-axis along a and pymatgen takes z-axis along c
a = lengths.pop(-1)
lengths.insert(0, a)
alpha = angles.pop(-1)
angles.insert(0, alpha)
latt = Lattice.from_parameters(*lengths, *angles)
sp = []
coords = []
chrg = []
for l in lines[4:]:
m = re.match(r'\d+\s+(\w+)\s+([0-9\-\.]+)\s+([0-9\-\.]+)\s+' +
r'([0-9\-\.]+)\s+(?:0\s+){8}([0-9\-\.]+)', l.strip())
if m:
sp.append(m.group(1))
# coords.append([float(m.group(i)) for i in xrange(2, 5)])
# Zeo++ takes x-axis along a and pymatgen takes z-axis along c
coords.append([float(m.group(i)) for i in [3, 4, 2]])
chrg.append(m.group(5))
return ZeoCssr(
Structure(latt, sp, coords, site_properties={'charge': chrg})
)
@staticmethod
def from_file(filename):
"""
Reads a CSSR file to a ZeoCssr object.
Args:
filename: Filename to read from.
Returns:
ZeoCssr object.
"""
with zopen(filename, "r") as f:
return ZeoCssr.from_string(f.read())
class ZeoVoronoiXYZ(XYZ):
"""
Class to read Voronoi Nodes from XYZ file written by Zeo++.
The sites have an additional column representing the voronoi node radius.
The voronoi node radius is represented by the site property voronoi_radius.
"""
def __init__(self, mol):
"""
Args:
mol: Input molecule holding the voronoi node information
"""
super().__init__(mol)
@staticmethod
def from_string(contents):
"""
Creates Zeo++ Voronoi XYZ object from a string.
from_string method of XYZ class is being redefined.
Args:
contents: String representing Zeo++ Voronoi XYZ file.
Returns:
ZeoVoronoiXYZ object
"""
lines = contents.split("\n")
num_sites = int(lines[0])
coords = []
sp = []
prop = []
coord_patt = re.compile(
r"(\w+)\s+([0-9\-\.]+)\s+([0-9\-\.]+)\s+([0-9\-\.]+)\s+" +
r"([0-9\-\.]+)"
)
for i in range(2, 2 + num_sites):
m = coord_patt.search(lines[i])
if m:
sp.append(m.group(1)) # this is 1-indexed
# coords.append(map(float, m.groups()[1:4])) # this is 0-indexed
coords.append([float(j)
for j in [m.group(i) for i in [3, 4, 2]]])
prop.append(float(m.group(5)))
return ZeoVoronoiXYZ(
Molecule(sp, coords, site_properties={'voronoi_radius': prop})
)
@staticmethod
def from_file(filename):
"""
Creates XYZ object from a file.
Args:
filename: XYZ filename
Returns:
XYZ object
"""
with zopen(filename) as f:
return ZeoVoronoiXYZ.from_string(f.read())
def __str__(self):
output = [str(len(self._mol)), self._mol.composition.formula]
fmtstr = "{{}} {{:.{0}f}} {{:.{0}f}} {{:.{0}f}} {{:.{0}f}}".format(
self.precision
)
for site in self._mol:
output.append(fmtstr.format(
site.specie.symbol, site.z, site.x, site.y,
site.properties['voronoi_radius']
))
return "\n".join(output)
@requires(zeo_found,
"get_voronoi_nodes requires Zeo++ cython extension to be "
"installed. Please contact developers of Zeo++ to obtain it.")
def get_voronoi_nodes(structure, rad_dict=None, probe_rad=0.1):
"""
Analyze the void space in the input structure using voronoi decomposition
Calls Zeo++ for Voronoi decomposition.
Args:
structure: pymatgen.core.structure.Structure
rad_dict (optional): Dictionary of radii of elements in structure.
If not given, Zeo++ default values are used.
Note: Zeo++ uses atomic radii of elements.
For ionic structures, pass rad_dict with ionic radii
probe_rad (optional): Sampling probe radius in Angstroms. Default is
0.1 A
Returns:
voronoi nodes as pymatgen.core.structure.Strucutre within the
unit cell defined by the lattice of input structure
voronoi face centers as pymatgen.core.structure.Strucutre within the
unit cell defined by the lattice of input structure
"""
with ScratchDir('.'):
name = "temp_zeo1"
zeo_inp_filename = name + ".cssr"
ZeoCssr(structure).write_file(zeo_inp_filename)
rad_file = None
rad_flag = False
if rad_dict:
rad_file = name + ".rad"
rad_flag = True
with open(rad_file, 'w+') as fp:
for el in rad_dict.keys():
fp.write("{} {}\n".format(el, rad_dict[el].real))
atmnet = AtomNetwork.read_from_CSSR(
zeo_inp_filename, rad_flag=rad_flag, rad_file=rad_file)
vornet, vor_edge_centers, vor_face_centers = \
atmnet.perform_voronoi_decomposition()
vornet.analyze_writeto_XYZ(name, probe_rad, atmnet)
voro_out_filename = name + '_voro.xyz'
voro_node_mol = ZeoVoronoiXYZ.from_file(voro_out_filename).molecule
species = ["X"] * len(voro_node_mol.sites)
coords = []
prop = []
for site in voro_node_mol.sites:
coords.append(list(site.coords))
prop.append(site.properties['voronoi_radius'])
lattice = Lattice.from_parameters(structure.lattice.parameters)
vor_node_struct = Structure(
lattice, species, coords, coords_are_cartesian=True,
to_unit_cell=True, site_properties={"voronoi_radius": prop})
# PMG-Zeo c<->a transformation for voronoi face centers
rot_face_centers = [(center[1], center[2], center[0]) for center in
vor_face_centers]
rot_edge_centers = [(center[1], center[2], center[0]) for center in
vor_edge_centers]
species = ["X"] * len(rot_face_centers)
prop = [0.0] * len(rot_face_centers) # Vor radius not evaluated for fc
vor_facecenter_struct = Structure(
lattice, species, rot_face_centers, coords_are_cartesian=True,
to_unit_cell=True, site_properties={"voronoi_radius": prop})
species = ["X"] * len(rot_edge_centers)
prop = [0.0] * len(rot_edge_centers) # Vor radius not evaluated for fc
vor_edgecenter_struct = Structure(
lattice, species, rot_edge_centers, coords_are_cartesian=True,
to_unit_cell=True, site_properties={"voronoi_radius": prop})
return vor_node_struct, vor_edgecenter_struct, vor_facecenter_struct
def get_high_accuracy_voronoi_nodes(structure, rad_dict, probe_rad=0.1):
"""
Analyze the void space in the input structure using high accuracy
voronoi decomposition.
Calls Zeo++ for Voronoi decomposition.
Args:
structure: pymatgen.core.structure.Structure
rad_dict (optional): Dictionary of radii of elements in structure.
If not given, Zeo++ default values are used.
Note: Zeo++ uses atomic radii of elements.
For ionic structures, pass rad_dict with ionic radii
probe_rad (optional): Sampling probe radius in Angstroms.
Default is 0.1 A
Returns:
voronoi nodes as pymatgen.core.structure.Strucutre within the
unit cell defined by the lattice of input structure
voronoi face centers as pymatgen.core.structure.Strucutre within the
unit cell defined by the lattice of input structure
"""
with ScratchDir('.'):
name = "temp_zeo1"
zeo_inp_filename = name + ".cssr"
ZeoCssr(structure).write_file(zeo_inp_filename)
rad_flag = True
rad_file = name + ".rad"
with open(rad_file, 'w+') as fp:
for el in rad_dict.keys():
print("{} {}".format(el, rad_dict[el].real), file=fp)
atmnet = AtomNetwork.read_from_CSSR(
zeo_inp_filename, rad_flag=rad_flag, rad_file=rad_file)
# vornet, vor_edge_centers, vor_face_centers = \
# atmnet.perform_voronoi_decomposition()
red_ha_vornet = \
prune_voronoi_network_close_node(atmnet)
# generate_simplified_highaccuracy_voronoi_network(atmnet)
# get_nearest_largest_diameter_highaccuracy_vornode(atmnet)
red_ha_vornet.analyze_writeto_XYZ(name, probe_rad, atmnet)
voro_out_filename = name + '_voro.xyz'
voro_node_mol = ZeoVoronoiXYZ.from_file(voro_out_filename).molecule
species = ["X"] * len(voro_node_mol.sites)
coords = []
prop = []
for site in voro_node_mol.sites:
coords.append(list(site.coords))
prop.append(site.properties['voronoi_radius'])
lattice = Lattice.from_parameters(structure.lattice.parameters)
vor_node_struct = Structure(
lattice, species, coords, coords_are_cartesian=True,
to_unit_cell=True, site_properties={"voronoi_radius": prop})
return vor_node_struct
@requires(zeo_found,
"get_voronoi_nodes requires Zeo++ cython extension to be "
"installed. Please contact developers of Zeo++ to obtain it.")
def get_free_sphere_params(structure, rad_dict=None, probe_rad=0.1):
"""
Analyze the void space in the input structure using voronoi decomposition
Calls Zeo++ for Voronoi decomposition.
Args:
structure: pymatgen.core.structure.Structure
rad_dict (optional): Dictionary of radii of elements in structure.
If not given, Zeo++ default values are used.
Note: Zeo++ uses atomic radii of elements.
For ionic structures, pass rad_dict with ionic radii
probe_rad (optional): Sampling probe radius in Angstroms. Default is
0.1 A
Returns:
voronoi nodes as pymatgen.core.structure.Strucutre within the
unit cell defined by the lattice of input structure
voronoi face centers as pymatgen.core.structure.Strucutre within the
unit cell defined by the lattice of input structure
"""
with ScratchDir('.'):
name = "temp_zeo1"
zeo_inp_filename = name + ".cssr"
ZeoCssr(structure).write_file(zeo_inp_filename)
rad_file = None
rad_flag = False
if rad_dict:
rad_file = name + ".rad"
rad_flag = True
with open(rad_file, 'w+') as fp:
for el in rad_dict.keys():
fp.write("{} {}\n".format(el, rad_dict[el].real))
atmnet = AtomNetwork.read_from_CSSR(
zeo_inp_filename, rad_flag=rad_flag, rad_file=rad_file)
out_file = "temp.res"
atmnet.calculate_free_sphere_parameters(out_file)
if os.path.isfile(out_file) and os.path.getsize(out_file) > 0:
with open(out_file, "rt") as fp:
output = fp.readline()
else:
output = ""
fields = [val.strip() for val in output.split()][1:4]
if len(fields) == 3:
fields = [float(field) for field in fields]
free_sphere_params = {'inc_sph_max_dia': fields[0],
'free_sph_max_dia': fields[1],
'inc_sph_along_free_sph_path_max_dia': fields[2]}
return free_sphere_params
# Deprecated. Not needed anymore
def get_void_volume_surfarea(structure, rad_dict=None, chan_rad=0.3,
probe_rad=0.1):
"""
Computes the volume and surface area of isolated void using Zeo++.
Useful to compute the volume and surface area of vacant site.
Args:
structure: pymatgen Structure containing vacancy
rad_dict(optional): Dictionary with short name of elements and their
radii.
chan_rad(optional): Minimum channel Radius.
probe_rad(optional): Probe radius for Monte Carlo sampling.
Returns:
volume: floating number representing the volume of void
"""
with ScratchDir('.'):
name = "temp_zeo"
zeo_inp_filename = name + ".cssr"
ZeoCssr(structure).write_file(zeo_inp_filename)
rad_file = None
if rad_dict:
rad_file = name + ".rad"
with open(rad_file, 'w') as fp:
for el in rad_dict.keys():
fp.write("{0} {1}".format(el, rad_dict[el]))
atmnet = AtomNetwork.read_from_CSSR(zeo_inp_filename, True, rad_file)
vol_str = volume(atmnet, 0.3, probe_rad, 10000)
sa_str = surface_area(atmnet, 0.3, probe_rad, 10000)
vol = None
sa = None
for line in vol_str.split("\n"):
if "Number_of_pockets" in line:
fields = line.split()
if float(fields[1]) > 1:
vol = -1.0
break
if float(fields[1]) == 0:
vol = -1.0
break
vol = float(fields[3])
for line in sa_str.split("\n"):
if "Number_of_pockets" in line:
fields = line.split()
if float(fields[1]) > 1:
# raise ValueError("Too many voids")
sa = -1.0
break
if float(fields[1]) == 0:
sa = -1.0
break
sa = float(fields[3])
if not vol or not sa:
raise ValueError("Error in zeo++ output stream")
return vol, sa
|
fraricci/pymatgen
|
pymatgen/io/zeopp.py
|
Python
|
mit
| 18,471
|
[
"GULP",
"pymatgen"
] |
369ad7d13ae738f016984d6238485eba71ee5408c26f98adb788dca4451bec13
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('datapoints', '0003_load_ng_regions'),
]
operations = [
migrations.RunSQL("""
INSERT INTO auth_group
(name)
SELECT 'can_edit_all_indicators';
INSERT INTO auth_user_groups
(user_id,group_id)
SELECT au.id,ag.id FROM auth_user au
INNER JOIN auth_group ag
ON ag.name = 'can_edit_all_indicators';
INSERT INTO indicator_permission
(group_id, indicator_id)
SELECT ag.id, i.id FROM indicator i
INNER JOIN auth_group ag
ON ag.name = 'can_edit_all_indicators';
INSERT INTO region_permission
(read_write, region_id, user_id)
SELECT 'r', r.id, au.id
FROM region r
INNER JOIN auth_user au
ON 1=1;
INSERT INTO region_permission
(read_write, region_id, user_id)
SELECT 'w' , region_id, user_id FROM region_permission;
INSERT INTO indicator
(id, name, description, short_name, slug, is_reported, created_at)
SELECT
indicator_id, name, description, short_name, CAST(indicator_id as VARCHAR), CAST(0 AS BOOLEAN), now()
FROM (
SELECT 461 as indicator_id,'# of established LT vaccination transit points vs. total # identified by the programme' as name,'# of established LT vaccination transit points vs. total # identified by the programme' as description,'# of established LT vaccination transit points vs. total # identified by the programme' as short_name UNION ALL
SELECT 462,'Number of regions sampled','The total number of regions sampled.','# regions sampled' UNION ALL
SELECT 41,'Number of vaccinators','Number of vaccinators','Number of vaccinators' UNION ALL
SELECT 159,'Number of aVDPV2 cases','Number of aVDPV2 cases','Number of aVDPV2 cases' UNION ALL
SELECT 162,'Number of iVDPV cases','Number of iVDPV cases','Number of iVDPV cases' UNION ALL
SELECT 32,'Number of Unicef polio positions in their posts in PBR-approved structures','Number of Unicef polio positions in their posts in PBR-approved structures','Number of Unicef polio positions in their posts in PBR' UNION ALL
SELECT 31,'Target number of Unicef polio positions in PBR-approved structures','Target number of Unicef polio positions in PBR-approved structures','Target number of Unicef polio positions in PBR-approv' UNION ALL
SELECT 21,'Number of all missed children','Number of all missed children','All missed children' UNION ALL
SELECT 24,'Number of children missed due to other reasons','Number of children missed due to other reasons','Missed due to other reasons' UNION ALL
SELECT 25,'Number of refusals before re-visit','Number of refusals before re-visit','Refusals before re-visit' UNION ALL
SELECT 26,'Number of refusals after re-visit','Number of refusals after re-visit','Refusals after re-visit' UNION ALL
SELECT 27,'Number of microplans in high risk districts','Number of microplans in high risk districts','Microplans in High Risk Disrict' UNION ALL
SELECT 30,'Number of caregivers aware of polio campaigns','Number of caregivers aware of polio campaigns','Number of caregivers aware' UNION ALL
SELECT 28,'Number of microplans in high risk districts incorporating social data','Number of microplans in high risk districts Incorporating social data','Microplans incorporating social data' UNION ALL
SELECT 35,'Number of target social mobilizers','Number of target social mobilizers','Number of target social mobilizers' UNION ALL
SELECT 40,'Number of female social mobilizers','Number of female social mobilizers','Number of female social mobilizers' UNION ALL
SELECT 45,'FRR updated quarterly','FRR updated quarterly','FRR updated quarterly' UNION ALL
SELECT 46,'Number of social mobilizers receiving timely payment for last campaign','Number of social mobilizers receiving timely payment for last campaign','Number of social mobilizers receiving timely payment' UNION ALL
SELECT 34,'Number of high risk sub-districts covered by at least 1 social mobilizer','Number of high risk sub-districts covered by at least 1 social mobilizer','Number of high risk sub-districts w social mobilizers' UNION ALL
SELECT 43,'Amount total requested FRR funds','Amount total requested FRR funds','Amount total requested FRR funds' UNION ALL
SELECT 38,'Number of vaccination teams','Number of vaccination teams','Number of vaccination teams' UNION ALL
SELECT 36,'Number of social mobilizers in place','Number of social mobilizers in place','Number of social mobilizers in place' UNION ALL
SELECT 29,'Number of caregivers in high risk districts','Number of caregivers in high risk districts','Number of caregivers in high risk districts' UNION ALL
SELECT 44,'Amount FRR funds committed','Amount FRR funds committed ','Amount FRR funds committed ' UNION ALL
SELECT 49,'Number of social mobilizers trained on RI in past 6 mo','Number of social mobilizers trained on RI in past 6 mo','Number of social mobilizers trained on RI in past 6 mo' UNION ALL
SELECT 93,'ODK - Census Newborns Female','censusnewbornsf','ODK - Census Newborns Female' UNION ALL
SELECT 112,'ODK - group_msd_chd_msd_poldiffsf','group_msd_chd_msd_poldiffsf','ODK - group_msd_chd_msd_poldiffsf' UNION ALL
SELECT 42,'Number of vaccinators operating in HRDs trained on professional IPC package in last 6 months','Number of vaccinators operating in HRDs trained on professional IPC package in last 6 months','# vaccinators in HRDs trained on IPC in last 6 mo' UNION ALL
SELECT 1,'Polio Cases YTDYYYY','Polio Cases YTDYYYY','Polio Cases YTDYYYY' UNION ALL
SELECT 53,'Number of districts having NO stockouts of OPV','Number of districts having NO stockouts of OPV','Number of districts having NO stockouts of OPV' UNION ALL
SELECT 463,'number of social mobilisers participating the telephone survey','number of social mobilisers participating the telephone survey','number of social mobilisers participating the telephone survey' UNION ALL
SELECT 56,'Number of sub-regional units','Number of sub-regional units','Number of sub-regional units' UNION ALL
SELECT 57,'Number of sub-regional units where OPV arrived in sufficient time','Number of sub-regional units where OPV arrived in sufficient time','Number of sub-regional units where OPV arrived in time' UNION ALL
SELECT 70,'Number of WPV1 cases','Number of WPV1 cases','Number of WPV1 cases' UNION ALL
SELECT 62,'Number of health facilities w/ capacity','Number of health facilities w/ capacity','Number of health facilities w/ capacity' UNION ALL
SELECT 66,'Number of health facilities having NO stock-outs of OPV','Number of health facilities having NO stock-outs of OPV','Number of health facilities having NO stock-outs of OPV' UNION ALL
SELECT 176,'Number of established LT vaccination transit points with a dedicated social mobilizer','Number of established LT vaccination transit points with a dedicated social mobilizer','LT Transit Points with SM' UNION ALL
SELECT 67,'Percentage of States/Regions with OPV supply arriving at state/region level in sufficient time before campaign','Percentage of States/Regions with OPV supply arriving at state/region level in sufficient time before campaign','Percentage of States/Regions with OPV supply arriving a' UNION ALL
SELECT 470,'Number of children missed due to all access issues (TEMP)','TEMPORARY INDICATOR','Inaccessible Children (TEMP)' UNION ALL
SELECT 69,'Number of cVDPV2 cases','Number of cVDPV2 cases','Number of cVDPV2 cases' UNION ALL
SELECT 160,'Number of WPV3 cases','Number of WPV3 cases','Number of WPV3 cases' UNION ALL
SELECT 161,'Number of WPV1WPV3 cases','Number of WPV1WPV3 cases','Number of WPV1WPV3 cases' UNION ALL
SELECT 51,'Number of children vaccinated in HRD','Number of children vaccinated in HRD','Number of children vaccinated in HRD' UNION ALL
SELECT 177,'Number of children vaccinated at transit points last month','Number of children vaccinated at transit points last month','# of children vaccinated at transit points last month' UNION ALL
SELECT 167,'% missed children due to team did not visit','% missed children due to team did not visit','Not visited' UNION ALL
SELECT 165,'% missed children due to other reasons','% missed children due to other reasons','Other reasons' UNION ALL
SELECT 166,'% missed children due to refusal','% missed children due to refusal','Refused' UNION ALL
SELECT 164,'% missed children due to child not available','% missed children due to child not available','Child absent' UNION ALL
SELECT 175,'Number of established LT vaccination transit points','Number of established LT vaccination transit points ','LT Transit Points' UNION ALL
SELECT 95,'ODK - spec_grp_choice','spec_grp_choice','ODK - spec_grp_choice' UNION ALL
SELECT 169,'FRR Funding Level','Percent of FRR funded for the next 6 months','Funding' UNION ALL
SELECT 192,'Routine Immunization Defaulter Tracking','Number of routine immunization defaulters tracked and mobilized by SMs last month','RI Defaulter Tracking' UNION ALL
SELECT 179,'Social Mobilisers and Their Supervisors in Place','Proportion of target social mobilisers and their supervisors in place','Mobilisers in Place' UNION ALL
SELECT 172,'District OPV Stock Balance Reporting','Percent of high risk districts which reported on balance of SIA vaccine stock after last SIA round','Stock Balance Reporting' UNION ALL
SELECT 196,'HR district did NOT receive polio vaccine supply at least 3 days before the planned start date of campaign (1 = yes, 0 = no)','HR district did NOT receive polio vaccine supply at least 3 days before the planned start date of campaign (1 = yes, 0 = no)','HRD did NOT receive OPV supply at least 3d before campa' UNION ALL
SELECT 199,'Total number of all active cold chain equipment in district','Total number of all active cold chain equipment in district','Total # of all active cold chain equipment in district' UNION ALL
SELECT 206,'Number of social mobilizers and supervisors in place','Number of social mobilizers and supervisors in place','# of SMs and supervisors in place' UNION ALL
SELECT 37,'Number of vaccination teams with at least one female','Number of vaccination teams with at least one female','Number of vaccination teams with at least one female' UNION ALL
SELECT 210,'Number of social mobilizers who received on-the-job supervision during their last working week','Number of social mobilizers who received on-the-job supervision during their last working week','# of SMs who received supervision last work week' UNION ALL
SELECT 211,'Number of refusals resolved','Number of refusals resolved','Number of refusals resolved' UNION ALL
SELECT 197,'District reported on balance of SIA vaccine stocks after last SIA round? (1=yes, 0=no)','District reported on balance of SIA vaccine stocks after last SIA round? (1=yes, 0=no)','On balance of SIA vaccine stocks' UNION ALL
SELECT 213,'Number of absences before re-visit','Number of absences before re-visit','# of absences before re-visit' UNION ALL
SELECT 214,'Number of absences after re-visit','Number of absences after re-visit','# of absences after re-visit' UNION ALL
SELECT 216,'Number of RI sessions monitored having stockouts of any vaccine in the last month','Number of RI sessions monitored having stockouts of any vaccine in the last month','# of RI sessions monitored having stockouts of any vacc' UNION ALL
SELECT 217,'Number of RI sessions monitored','Number of RI sessions monitored','Number of RI sessions monitored' UNION ALL
SELECT 218,'Number of high risk districts with locations where OPV is delivered together with any other polio-funded services demanded by community','Number of high risk districts with locations where OPV is delivered together with any other polio-funded services demanded by community','# HRDs with locations where OPV is delivered w services' UNION ALL
SELECT 174,'Proportion of access-challenged districts that have had a specific access approach identified','Proportion of access-challenged districts that have had a specific access approach identified','% of access-challenged districts with access approach' UNION ALL
SELECT 189,'Percent of absences resolved during the previous month (both during campaigns and between rounds)','Percent of absences resolved during the previous month (both during campaigns and between rounds)','Absences Conversion' UNION ALL
SELECT 134,'ODK - group_spec_events_spec_mslscase','group_spec_events_spec_mslscase','ODK - group_spec_events_spec_mslscase' UNION ALL
SELECT 202,'Is an access-challenged district that has a specific access approach identified (1=yes, 0=no)','Is an access-challenged district that has a specific access approach identified (1=yes, 0=no)','Is an access-challenged district w access approach' UNION ALL
SELECT 203,'Is an access-challenged district (1=yes, 0=no)','Is an access-challenged district (1=yes, 0=no)','Is an access-challenged districts' UNION ALL
SELECT 472,'Is a high risk district where at least 90% of active cold chain equipment are functional (1=yes, 0=no)','Is a high risk district where at least 90% of active cold chain equipment are functional (1=yes, 0=no)','HRD with 90% functional cold chain' UNION ALL
SELECT 219,'OPV Wastage between 5% and 15%','Proportion of high risk districts where polio vaccine wastage rate in SIAs is between 5 and 15%, calculated from the number of children vaccinated and the number of vaccines used, as recorded in vaccinator tally sheet','Acceptable OPV Wastage' UNION ALL
SELECT 193,'OPV Delivery with Other Polio-Funded Services','Percent of HRDs where OPV is delivered together with any other polio-funded services demanded by the community','Polio-Plus Activities' UNION ALL
SELECT 475,'Missed Children','Proportion of children missed among all target children, according to independent monitoring (post campaign assessment) data or polio control room data','% Missed Children' UNION ALL
SELECT 180,'Vaccinator from Local Community','Percent of vaccination teams in which at least 1 member is from local community in high risk areas','Local Vaccinators' UNION ALL
SELECT 204,'Total number of LT vaccination transit points planned by the programme','Total number of LT vaccination transit points planned by the programme','Total # of LT transit points planned by the programme' UNION ALL
SELECT 207,'Target number of social mobilizers and supervisors','Target number of social mobilizers and supervisors','Target # of SMs and supervisors' UNION ALL
SELECT 215,'Number of absences resolved','Number of absences resolved','Number of absences resolved' UNION ALL
SELECT 81,'ODK - Total Vaccinated Newborns Male','vaxnewbornsm','ODK - Total Vaccinated Newborns Male' UNION ALL
SELECT 82,'ODK - Total Vaccinated Newborns Female','vaxnewbornsf','ODK - Total Vaccinated Newborns Female' UNION ALL
SELECT 89,'ODK - Total Vaccinated Newborns','tot_vaxnewborn','ODK - Total Vaccinated Newborns' UNION ALL
SELECT 86,'ODK - Total Vaccinated 2 to 11 months','tot_vax2_11mo','ODK - Total Vaccinated 2 to 11 months' UNION ALL
SELECT 88,'ODK - Total Vaccinated 12 to 59 months','tot_vax12_59mo','ODK - Total Vaccinated 12 to 59 months' UNION ALL
SELECT 78,'ODK - Total Vaccinated 2-11 months male','vax2_11mom','ODK - Total Vaccinated 2-11 months male' UNION ALL
SELECT 77,'ODK - Total Vaccinated 2-11 months Female','vax2_11mof','ODK - Total Vaccinated 2-11 months Female' UNION ALL
SELECT 80,'ODK - Total Vaccinated 12-59 months male','vax12_59mof','ODK - Total Vaccinated 12-59 months male' UNION ALL
SELECT 79,'ODK - Total Vaccinated 12-59 months Female','vax12_59mom','ODK - Total Vaccinated 12-59 months Female' UNION ALL
SELECT 74,'ODK - Total Vaccinated','tot_vax','ODK - Total Vaccinated' UNION ALL
SELECT 83,'ODK - Total Newborns','tot_newborns','ODK - Total Newborns' UNION ALL
SELECT 136,'ODK - Total Missed Due to Unhappy with Team Male','group_msd_chd_msd_unhappywteamm','ODK - Total Missed Due to Unhappy with Team Male' UNION ALL
SELECT 137,'ODK - Total Missed Due to Unhappy with Team Female','group_msd_chd_msd_unhappywteamf','ODK - Total Missed Due to Unhappy with Team Female' UNION ALL
SELECT 141,'ODK - Total Missed Due to Too Many Rounds Male','group_msd_chd_msd_toomanyroundsm','ODK - Total Missed Due to Too Many Rounds Male' UNION ALL
SELECT 140,'ODK - Total Missed Due to Too Many Rounds Female','group_msd_chd_msd_toomanyroundsf','ODK - Total Missed Due to Too Many Rounds Female' UNION ALL
SELECT 109,'ODK - Total Missed Due to Social Event Male','group_msd_chd_msd_soceventm','ODK - Total Missed Due to Social Event Male' UNION ALL
SELECT 110,'ODK - Total Missed Due to Social Event Female','group_msd_chd_msd_soceventf','ODK - Total Missed Due to Social Event Female' UNION ALL
SELECT 129,'ODK - Total Missed Due to Side Effects Male','group_msd_chd_msd_sideeffectsm','ODK - Total Missed Due to Side Effects Male' UNION ALL
SELECT 128,'ODK - Total Missed Due to Side Effects Female','group_msd_chd_msd_sideeffectsf','ODK - Total Missed Due to Side Effects Female' UNION ALL
SELECT 107,'ODK - Total Missed Due to Security Male','group_msd_chd_msd_securitym','ODK - Total Missed Due to Security Male' UNION ALL
SELECT 108,'ODK - Total Missed Due to Security Female','group_msd_chd_msd_securityf','ODK - Total Missed Due to Security Female' UNION ALL
SELECT 122,'ODK - Total Missed Due to Religions Beliefs Male','group_msd_chd_msd_relbeliefsm','ODK - Total Missed Due to Religions Beliefs Male' UNION ALL
SELECT 119,'ODK - Total Missed Due to Religions Beliefs Female','group_msd_chd_msd_relbeliefsf','ODK - Total Missed Due to Religions Beliefs Female' UNION ALL
SELECT 142,'ODK - Total Missed Due to Polio uncommon Male','group_msd_chd_msd_poliouncommonm','ODK - Total Missed Due to Polio uncommon Male' UNION ALL
SELECT 143,'ODK - Total Missed Due to Polio uncommon Female','group_msd_chd_msd_poliouncommonf','ODK - Total Missed Due to Polio uncommon Female' UNION ALL
SELECT 133,'ODK - Total Missed Due to Polio Has Cure Male','group_msd_chd_msd_poliohascurem','ODK - Total Missed Due to Polio Has Cure Male' UNION ALL
SELECT 132,'ODK - Total Missed Due to Polio Has Cure Female','group_msd_chd_msd_poliohascuref','ODK - Total Missed Due to Polio Has Cure Female' UNION ALL
SELECT 148,'ODK - Total Missed Due to Other Protection Male','group_msd_chd_msd_otherprotectionm','ODK - Total Missed Due to Other Protection Male' UNION ALL
SELECT 147,'ODK - Total Missed Due to Other Protection Female','group_msd_chd_msd_otherprotectionf','ODK - Total Missed Due to Other Protection Female' UNION ALL
SELECT 105,'ODK - Total Missed Due to No Plusses Male','group_msd_chd_msd_noplusesm','ODK - Total Missed Due to No Plusses Male' UNION ALL
SELECT 106,'ODK - Total Missed Due to No Plusses Female','group_msd_chd_msd_noplusesf','ODK - Total Missed Due to No Plusses Female' UNION ALL
SELECT 145,'ODK - Total Missed Due to No Government Services Male','group_msd_chd_msd_nogovtservicesm','ODK - Total Missed Due to No Government Services Male' UNION ALL
SELECT 146,'ODK - Total Missed Due to No Government Services Female','group_msd_chd_msd_nogovtservicesf','ODK - Total Missed Due to No Government Services Female' UNION ALL
SELECT 194,'District Had Delayed OPV Supply','Percent of High Risk Districts that did not receive polio vaccine supply at least 3 days before the planned starting date of the campaign','Delayed OPV Supply' UNION ALL
SELECT 117,'ODK - Total Missed Due to No Consent Male','group_msd_chd_msd_noconsentm','ODK - Total Missed Due to No Consent Male' UNION ALL
SELECT 118,'ODK - Total Missed Due to No Consent Female','group_msd_chd_msd_noconsentf','ODK - Total Missed Due to No Consent Female' UNION ALL
SELECT 139,'ODK - Total Missed Due to Household not visited Male','group_msd_chd_msd_hhnotvisitedm','ODK - Total Missed Due to Household not visited Male' UNION ALL
SELECT 138,'ODK - Total Missed Due to Household not visited Female','group_msd_chd_msd_hhnotvisitedf','ODK - Total Missed Due to Household not visited Female' UNION ALL
SELECT 121,'ODK - Total Missed Due to Felt Not Needed Male','group_msd_chd_msd_nofeltneedm','ODK - Total Missed Due to Felt Not Needed Male' UNION ALL
SELECT 120,'ODK - Total Missed Due to Felt Not Needed Female','group_msd_chd_msd_nofeltneedf','ODK - Total Missed Due to Felt Not Needed Female' UNION ALL
SELECT 131,'ODK - Total Missed Due to Family Moved Male','group_msd_chd_msd_familymovedm','ODK - Total Missed Due to Family Moved Male' UNION ALL
SELECT 130,'ODK - Total Missed Due to Family Moved Female','group_msd_chd_msd_familymovedf','ODK - Total Missed Due to Family Moved Female' UNION ALL
SELECT 101,'ODK - Total Missed Due to Children at Schools Male','group_msd_chd_msd_schoolm','ODK - Total Missed Due to Children at Schools Male' UNION ALL
SELECT 100,'ODK - Total Missed Due to Children at Schools Female','group_msd_chd_msd_schoolf','ODK - Total Missed Due to Children at Schools Female' UNION ALL
SELECT 115,'ODK - Total Missed Due to Child Sick Male','group_msd_chd_msd_childsickm','ODK - Total Missed Due to Child Sick Male' UNION ALL
SELECT 116,'ODK - Total Missed Due to Child Sick Female','group_msd_chd_msd_childsickf','ODK - Total Missed Due to Child Sick Female' UNION ALL
SELECT 96,'ODK - Total Missed Due to Child on Farm Male','group_msd_chd_msd_farmm','ODK - Total Missed Due to Child on Farm Male' UNION ALL
SELECT 97,'ODK - Total Missed Due to Child on Farm Female','group_msd_chd_msd_farmf','ODK - Total Missed Due to Child on Farm Female' UNION ALL
SELECT 114,'ODK - Total Missed Due to Child Died Male','group_msd_chd_msd_childdiedm','ODK - Total Missed Due to Child Died Male' UNION ALL
SELECT 123,'ODK - Total Missed Due to Child at Playground Male','group_msd_chd_msd_playgroundm','ODK - Total Missed Due to Child at Playground Male' UNION ALL
SELECT 124,'ODK - Total Missed Due to Child at Playground Female','group_msd_chd_msd_playgroundf','ODK - Total Missed Due to Child at Playground Female' UNION ALL
SELECT 98,'ODK - Total Missed Due to Child at Market Male','group_msd_chd_msd_marketm','ODK - Total Missed Due to Child at Market Male' UNION ALL
SELECT 99,'ODK - Total Missed Due to Child at Market Female','group_msd_chd_msd_marketf','ODK - Total Missed Due to Child at Market Female' UNION ALL
SELECT 102,'ODK - Total missed due to Aged Out Male','group_msd_chd_msd_agedoutm','ODK - Total missed due to Aged Out Male' UNION ALL
SELECT 103,'ODK - Total missed due to Aged Out Female','group_msd_chd_msd_agedoutf','ODK - Total missed due to Aged Out Female' UNION ALL
SELECT 113,'ODK - Total Missed Due to Child Died Female','group_msd_chd_msd_childdiedf','ODK - Total Missed Due to Child Died Female' UNION ALL
SELECT 76,'ODK - Total Missed','tot_missed','ODK - Total Missed' UNION ALL
SELECT 90,'ODK - Total Children 2 to 11 months','tot_2_11months','ODK - Total Children 2 to 11 months' UNION ALL
SELECT 92,'ODK - Total children 12 to 59 Months','tot_12_59months','ODK - Total children 12 to 59 Months' UNION ALL
SELECT 75,'ODK - Total Census','tot_census','ODK - Total Census' UNION ALL
SELECT 127,'ODK - group_msd_chd_tot_missed_check','group_msd_chd_tot_missed_check','ODK - group_msd_chd_tot_missed_check' UNION ALL
SELECT 84,'ODK - Census 2 to 11 months Male','census2_11mom','ODK - Census 2 to 11 months Male' UNION ALL
SELECT 85,'ODK - Census 2 to 11 months Female','census2_11mof','ODK - Census 2 to 11 months Female' UNION ALL
SELECT 87,'ODK - Census 12 to 59 months Male','census12_59mom','ODK - Census 12 to 59 months Male' UNION ALL
SELECT 91,'ODK - Census 12 to 59 months Female','census12_59mof','ODK - Census 12 to 59 months Female' UNION ALL
SELECT 94,'ODK - Census Newborns Male','censusnewbornsm','ODK - Census Newborns Male' UNION ALL
SELECT 111,'ODK - group_msd_chd_msd_poldiffsm','group_msd_chd_msd_poldiffsm','ODK - group_msd_chd_msd_poldiffsm' UNION ALL
SELECT 126,'ODK - group_spec_events_spec_afpcase','group_spec_events_spec_afpcase','ODK - group_spec_events_spec_afpcase' UNION ALL
SELECT 149,'ODK - group_spec_events_spec_cmamreferral','group_spec_events_spec_cmamreferral','ODK - group_spec_events_spec_cmamreferral' UNION ALL
SELECT 104,'ODK - group_spec_events_spec_fic','group_spec_events_spec_fic','ODK - group_spec_events_spec_fic' UNION ALL
SELECT 125,'ODK - group_spec_events_spec_newborn','group_spec_events_spec_newborn','ODK - group_spec_events_spec_newborn' UNION ALL
SELECT 150,'ODK - group_spec_events_spec_otherdisease','group_spec_events_spec_otherdisease','ODK - group_spec_events_spec_otherdisease' UNION ALL
SELECT 151,'ODK - group_spec_events_spec_pregnantmother','group_spec_events_spec_pregnantmother','ODK - group_spec_events_spec_pregnantmother' UNION ALL
SELECT 144,'ODK - group_spec_events_spec_rireferral','group_spec_events_spec_rireferral','ODK - group_spec_events_spec_rireferral' UNION ALL
SELECT 152,'ODK - group_spec_events_spec_vcmattendedncer','group_spec_events_spec_vcmattendedncer','ODK - group_spec_events_spec_vcmattendedncer' UNION ALL
SELECT 135,'ODK - group_spec_events_spec_zerodose','group_spec_events_spec_zerodose','ODK - group_spec_events_spec_zerodose' UNION ALL
SELECT 274,'Outside_Percent missed children','Outside_Percent missed children','Not Marked' UNION ALL
SELECT 198,'Number of functional active cold chain equipment in the district','Number of functional active cold chain equipment in the district','# functional active cold chain equipment in district' UNION ALL
SELECT 201,'District has specific access approach identified (1=yes, 0=no)','District has specific access approach identified (1=yes, 0=no)','District has specific access approach identified' UNION ALL
SELECT 208,'Number of vaccination teams with at least 1 member from the local community','Number of vaccination teams with at least 1 member from the local community','# of vaccination teams with at least 1 local member' UNION ALL
SELECT 209,'Number of social mobilizers trained or refreshed with the integrated health package in the last 6 months','Number of social mobilizers trained or refreshed with the integrated health package in the last 6 months','# SMs trained, refreshed w. health package in last 6 mo' UNION ALL
SELECT 55,'Number of children targeted in high-risk districts','Number of children targeted in high-risk districts','Number of children targeted in HRDs' UNION ALL
SELECT 220,'Vaccine wastage rate','Vaccine wastage rate','Vaccine wastage rate' UNION ALL
SELECT 33,'Number of high risk sub-districts','Number of high risk sub-districts ','Number of high risk sub-districts ' UNION ALL
SELECT 195,'Is a high risk district? (1=yes, 0=no)','Is a high risk district? (1=yes, 0=no)','Is high risk?' UNION ALL
SELECT 254,'Endprocess_NOimmReas10 - Fear of OPV side effects','Endprocess_NOimmReas10 - Fear of OPV side effects','Fear of Side Effects' UNION ALL
SELECT 221,'Is an HRD that has polio vaccine wastage rate in SIAs between 5 and 15% (1=yes, 0=no)','Is an HRD that has polio vaccine wastage rate in SIAs between 5 and 15% (1=yes, 0=no)','Is an that has polio vaccine wastage btwn 5-15%' UNION ALL
SELECT 224,'Percentage of established LT vaccination transit points with a dedicated social mobilizer, out of total number established by the programme','Percentage of established LT vaccination transit points with a dedicated social mobilizer, out of total number established by the programme','Percentage of established LT vaccination transit points with a dedicated social mobilizer, out of total number established by the programme' UNION ALL
SELECT 222,'Percentage of districts with microplans that passed review (out of districts sampled)','Percentage of districts with microplans that passed review (out of districts sampled)','Percentage of districts with microplans that passed review' UNION ALL
SELECT 191,'Routine Immunization Session Stockout','Proprotion of routine immunization sessions monitored having stockouts of any vaccine during last month','RI Stockouts' UNION ALL
SELECT 178,'Geographic Coverage by Social Mobilisers','Proportion of High-Risk sub-districts covered by social mobilisers','Mobiliser Coverage' UNION ALL
SELECT 184,'Social Mobilisers Training in Integrated Health Package','Proportion of social mobilisers trained or refreshed with the integrated health package in the last 6 months','Mobiliser Training on Polio+' UNION ALL
SELECT 250,'Endprocess_NOimmReas7 - Child at school','Endprocess_NOimmReas7 - Child at school','School ' UNION ALL
SELECT 273,'Outside_Total Not Marked','Outside_Total Not Marked','Missed (Outside)' UNION ALL
SELECT 272,'Endprocess_Percent missed children','Endprocess_Percent missed children','Missed Children' UNION ALL
SELECT 247,'Endprocess_NoimmReas4 - Child at social event','Endprocess_NoimmReas4 - Child at social event','Social Event ' UNION ALL
SELECT 249,'Endprocess_NOimmReas6 - Child at farm','Endprocess_NOimmReas6 - Child at farm','Farm ' UNION ALL
SELECT 242,'Number of vaccine doses wasted','Number of vaccine doses wasted','Number of vaccine doses wasted' UNION ALL
SELECT 243,'Number of children 12 months and under','Number of children 12 months and under','Number of children 12 months and under' UNION ALL
SELECT 244,'Number of children under 12 months who received DPT3 or Penta3','Number of children under 12 months who received DPT3 or Penta3','No. of children under 12 months who received RI' UNION ALL
SELECT 248,'Endprocess_NOimmReas5 - Child at market','Endprocess_NOimmReas5 - Child at market','Market ' UNION ALL
SELECT 246,'Endprocess_NOimmReas3 - Child at playground','Endprocess_NOimmReas3 - Child at playground','Playground ' UNION ALL
SELECT 253,'Endprocess_NOimmReas9 - Too many rounds','Endprocess_NOimmReas9 - Too many rounds','Too Many Rounds ' UNION ALL
SELECT 263,'Endprocess_NOimmReas19 - No caregiver consent','Endprocess_NOimmReas19 - No caregiver consent','No Consent' UNION ALL
SELECT 251,'Endprocess_Reason for missed children - child absent','Endprocess_Reason for missed children - child absent','Child Absent' UNION ALL
SELECT 252,'Endprocess_NOimmReas8 - Child sick','Endprocess_NOimmReas8 - Child sick','Child Sick' UNION ALL
SELECT 236,'Caregiver Knowledge of Routine Immunization','Proportion of caregivers in HRDs who know number of times they need to visit a RI site for routine immunization before a child reaches 1 year of age','Knowledge of RI' UNION ALL
SELECT 239,'Female Social Moblisers in Place','Proportion of female social mobilisers among social mobilisers in place','Female Mobilisers' UNION ALL
SELECT 185,'On-the-Job Supervision of Social Mobilisers','Proportion of social mobilisers who received on-the-job supervision during their last working week','Mobiliser Supervision' UNION ALL
SELECT 226,'Timely Payment to Social Mobilisers','Proportion of social mobilisers who received timely payment for last campaign/month''s salary among ALL social mobilisers involved in the campaign.','Timely Mobiliser Payment' UNION ALL
SELECT 228,'Vaccinator Training in Inter-Personal Communication Skills','Proportion of vaccinators operating in HRD who have been trained on professional Inter Personal Communication package provided by UNICEF in the last 6 months','Vaccinator Training on IPC Skills' UNION ALL
SELECT 230,'Female Vaccinator in Team','Proportion of vaccination teams in which at least one member is female in HR areas','Female Vaccinators' UNION ALL
SELECT 476,'(TW Test) Infected People','How many people infected?','tw-test-outbreak-infected-people' UNION ALL
SELECT 477,'TW Test Outbreak Infected People','TW Test Outbreak Infected People','TW Test Outbreak Infected People' UNION ALL
SELECT 279,'Endprocess_Influence5 - Radio','Endprocess_Influence5 - Radio','Radio ' UNION ALL
SELECT 258,'Endprocess_NOimmReas14 - Religious belief','Endprocess_NOimmReas14 - Religious belief','Religious Beliefs ' UNION ALL
SELECT 288,'Endprocess_Percent vaccination influencer is radio','Endprocess_Percent vaccination influencer is radio','Radio ' UNION ALL
SELECT 266,'Endprocess_NOimmReas20 - Security','Endprocess_NOimmReas20 - Security','Security ' UNION ALL
SELECT 269,'Endprocess_Number of children 0 to 59 marked','Endprocess_Number of children 0 to 59 marked','Endprocess_Marked0to59' UNION ALL
SELECT 270,'Endprocess_Number of children 0 to 59 unimmunized','Endprocess_Number of children 0 to 59 unimmunized','Endprocess_UnImmun0to59' UNION ALL
SELECT 271,'Endprocess_Number of children seen','Endprocess_Number of children seen','Endprocess_Number of children seen' UNION ALL
SELECT 275,'Outside_Total seen','Outside_Total seen','Outside_Total seen' UNION ALL
SELECT 287,'Endprocess_Percent vaccination influencer is personal decision','Endprocess_Percent vaccination influencer is personal decision','Personal Decision' UNION ALL
SELECT 277,'Endprocess_Not aware','Endprocess_Not aware','Endprocess_Not aware' UNION ALL
SELECT 285,'Endprocess_Influence8 - Vaccinator','Endprocess_Influence8 - Vaccinator','Vaccinator ' UNION ALL
SELECT 260,'Endprocess_NOimmReas16 - Unhappy with vaccination team','Endprocess_NOimmReas16 - Unhappy with vaccination team','Unhappy with Team' UNION ALL
SELECT 291,'Endprocess_Percent vaccination influencer is traditional leader','Endprocess_Percent vaccination influencer is traditional leader','Trad. Leader' UNION ALL
SELECT 282,'Endprocess_Influence3 - Traditional leader','Endprocess_Influence3 - Traditional leader','Trad. Leader ' UNION ALL
SELECT 293,'Endprocess_Percent vaccination influencer is religious leader','Endprocess_Percent vaccination influencer is religious leader','Rel. Leader' UNION ALL
SELECT 284,'Endprocess_Influence4 - Religious leader','Endprocess_Influence4 - Religious leader','Rel. Leader ' UNION ALL
SELECT 290,'Endprocess_Percent vaccination influencer is neighbour','Endprocess_Percent vaccination influencer is neighbour','Neighbour ' UNION ALL
SELECT 265,'Endprocess_Missed children - All reasons','Endprocess_Missed children - All reasons','Missed (Inside)' UNION ALL
SELECT 286,'Endprocess_All vaccination influencers','Endprocess_All vaccination influencers','Endprocess_All vaccination influencers' UNION ALL
SELECT 289,'Endprocess_Percent vaccination influencer is husband','Endprocess_Percent vaccination influencer is husband','Husband' UNION ALL
SELECT 173,'Cold Chain Functional Status','Proportion of high risk districts where at least 90% of active cold chain equipment are functional','Cold Chain Functional Status' UNION ALL
SELECT 276,'Endprocess_Percent caregiver awareness','Endprocess_Percent caregiver awareness','Caregiver Awareness' UNION ALL
SELECT 283,'Endprocess_Influence7 - Community mobiliser','Endprocess_Influence7 - Community mobiliser','Comm. Mobiliser ' UNION ALL
SELECT 292,'Endprocess_Pct vaccination influencer is community mobiliser','Endprocess_Pct vaccination influencer is community mobiliser','Comm. Mobiliser ' UNION ALL
SELECT 280,'Endprocess_Influence2 - Husband','Endprocess_Influence2 - Husband','Husband ' UNION ALL
SELECT 281,'Endprocess_Influence6 - Neighbour','Endprocess_Influence6 - Neighbour','Neighbour ' UNION ALL
SELECT 262,'Endprocess_NOimmReas18 - No felt need','Endprocess_NOimmReas18 - No felt need','No Felt Needed' UNION ALL
SELECT 264,'Endprocess_Reason for missed children - Non compliance','Endprocess_Reason for missed children - Non compliance','Non-Comp. ' UNION ALL
SELECT 261,'Endprocess_NOimmReas17 - No pluses given','Endprocess_NOimmReas17 - No pluses given','No Pluses ' UNION ALL
SELECT 267,'Endprocess_NOimmReas1 - Household not in microplan','Endprocess_NOimmReas1 - Household not in microplan','Not in Plan ' UNION ALL
SELECT 268,'Endprocess_NOimmReas2 - Household in microplan but not visited','Endprocess_NOimmReas2 - Household in microplan but not visited','Not Visited ' UNION ALL
SELECT 278,'Endprocess_Influence1 - Personal decision','Endprocess_Influence1 - Personal decision','Personal Decision ' UNION ALL
SELECT 256,'Endprocess_NOimmReas12 - Polio has cure','Endprocess_NOimmReas12 - Polio has cure','Polio has Cure ' UNION ALL
SELECT 259,'Endprocess_NOimmReas15 - Political differences','Endprocess_NOimmReas15 - Political differences','Political ' UNION ALL
SELECT 307,'Endprocess_Percent source of info is town announcer','Endprocess_Percent source of info is town announcer','Town Announcer' UNION ALL
SELECT 324,'Endprocess_Pct of children absent due to child at social event','Endprocess_Pct of children absent due to child at social event','Social Event' UNION ALL
SELECT 322,'Endprocess_Pct missed children due to security','Endprocess_Pct missed children due to security','Security' UNION ALL
SELECT 296,'Endprocess_Source of info on IPDs - Radio','Endprocess_Source of info on IPDs - Radio','Radio ' UNION ALL
SELECT 303,'Endprocess_Source of info on IPDs - Relative','Endprocess_Source of info on IPDs - Relative','Relative ' UNION ALL
SELECT 306,'Endprocess_All sources of info on IPDs','Endprocess_All sources of info on IPDs','Endprocess_All sources of info on IPDs' UNION ALL
SELECT 309,'Endprocess_Percent source of info is relative','Endprocess_Percent source of info is relative','Relative' UNION ALL
SELECT 295,'Endprocess_Source of info on IPDs - Town announcer','Endprocess_Source of info on IPDs - Town announcer','Town Announcer ' UNION ALL
SELECT 310,'Endprocess_Percent source of info is radio','Endprocess_Percent source of info is radio','Radio' UNION ALL
SELECT 314,'Endprocess_Percent source of info is poster','Endprocess_Percent source of info is poster','Poster' UNION ALL
SELECT 323,'Endprocess_Pct of children absent due to playground','Endprocess_Pct of children absent due to playground','Playground' UNION ALL
SELECT 319,'Endprocess_Pct missed children due to HH in plan but not visited','Endprocess_Pct missed children due to HH in plan but not visited','Not Visited' UNION ALL
SELECT 321,'Endprocess_Pct missed children due to non compliance','Endprocess_Pct missed children due to non compliance','Non-Comp.' UNION ALL
SELECT 311,'Endprocess_Percent source of info is newspaper','Endprocess_Percent source of info is newspaper','Newspaper' UNION ALL
SELECT 308,'Endprocess_Percent source of info is mosque announcement','Endprocess_Percent source of info is mosque announcement','Mosque' UNION ALL
SELECT 313,'Endprocess_Percent source of info is traditional leader','Endprocess_Percent source of info is traditional leader','Trad. Leader ' UNION ALL
SELECT 325,'Endprocess_Pct of children absent due to child at market','Endprocess_Pct of children absent due to child at market','Market' UNION ALL
SELECT 318,'Endprocess_Percent missed children due to HH not in plan','Endprocess_Percent missed children due to HH not in plan','Not in Plan' UNION ALL
SELECT 297,'Endprocess_Source of info on IPDs - Traditional leader','Endprocess_Source of info on IPDs - Traditional leader','Trad. Leader ' UNION ALL
SELECT 312,'Endprocess_Percent source of info is health worker','Endprocess_Percent source of info is health worker','Health Worker' UNION ALL
SELECT 316,'Endprocess_Percent source of info is religious leader','Endprocess_Percent source of info is religious leader','Rel. Leader ' UNION ALL
SELECT 315,'Endprocess_Percent source of info is community mobiliser','Endprocess_Percent source of info is community mobiliser','Comm. Mobiliser' UNION ALL
SELECT 298,'Endprocess_Source of info on IPDs - Religious leader','Endprocess_Source of info on IPDs - Religious leader','Rel. Leader ' UNION ALL
SELECT 320,'Endprocess_Pct missed children due to child absent','Endprocess_Pct missed children due to child absent','Child Absent ' UNION ALL
SELECT 302,'Endprocess_Source of info on IPDs - Banner','Endprocess_Source of info on IPDs - Banner','Banner' UNION ALL
SELECT 317,'Endprocess_Percent source of info is banner','Endprocess_Percent source of info is banner','Banner ' UNION ALL
SELECT 305,'Endprocess_Source of info on IPDs - Community mobiliser','Endprocess_Source of info on IPDs - Community mobiliser','Comm. Mobiliser ' UNION ALL
SELECT 304,'Endprocess_Source of info on IPDs - Health worker','Endprocess_Source of info on IPDs - Health worker','Health Worker ' UNION ALL
SELECT 299,'Endprocess_Source of info on IPDs - Mosque announcement','Endprocess_Source of info on IPDs - Mosque announcement','Mosque ' UNION ALL
SELECT 300,'Endprocess_Source of info on IPDs - Newspaper','Endprocess_Source of info on IPDs - Newspaper','Newspaper ' UNION ALL
SELECT 301,'Endprocess_Source of info on IPDs - Poster','Endprocess_Source of info on IPDs - Poster','Poster ' UNION ALL
SELECT 332,'Endprocess_Pct of non compliance due to too many rounds','Endprocess_Pct of non compliance due to too many rounds','Too Many Rounds' UNION ALL
SELECT 327,'Endprocess_Pct of children absent due to child at school','Endprocess_Pct of children absent due to child at school','School' UNION ALL
SELECT 330,'Endprocess_Pct of non compliance due to religious belief','Endprocess_Pct of non compliance due to religious belief','Religious Beliefs' UNION ALL
SELECT 335,'Endprocess_Pct of non compliance due to fear of OPV side effects','Endprocess_Pct of non compliance due to fear of OPV side effects','Endprocess_Pct of non compliance due to OPV side effect' UNION ALL
SELECT 336,'Endprocess_Pct of non compliance due to other remedies available','Endprocess_Pct of non compliance due to other remedies available','Endprocess_Pct of non compliance due to other remedies ' UNION ALL
SELECT 337,'Endprocess_Pct of non compliance due to unhappy with team','Endprocess_Pct of non compliance due to unhappy with team','Endprocess_Pct of non compliance due to unhappy w team' UNION ALL
SELECT 338,'Endprocess_Pct of non compliance due to no caregiver consent','Endprocess_Pct of non compliance due to no caregiver consent','Endprocess_Pct of non compliance due no caregiver conse' UNION ALL
SELECT 339,'Endprocess_Pct of non compliance due to no felt need','Endprocess_Pct of non compliance due to no felt need','Endprocess_Pct of non compliance due to no felt need' UNION ALL
SELECT 334,'Endprocess_Pct of non compliance due to political differences','Endprocess_Pct of non compliance due to political differences','Political' UNION ALL
SELECT 329,'Endprocess_Pct of non compliance due to polio is rare','Endprocess_Pct of non compliance due to polio is rare','Polio is Rare' UNION ALL
SELECT 333,'Endprocess_Pct of non compliance due to polio has cure','Endprocess_Pct of non compliance due to polio has cure','Polio has Cure' UNION ALL
SELECT 344,'Redo_Number of children 0 to 59 months missed in HH due to non compliance','Redo_Number of children 0 to 59 months missed in HH due to non compliance','Redo_No of children 0 to 59 missed in HH due to NC' UNION ALL
SELECT 348,'Redo_Percent non compliance resolved by other','Redo_Percent non compliance resolved by other','Other' UNION ALL
SELECT 331,'Endprocess_Pct of non compliance due to no pluses given','Endprocess_Pct of non compliance due to no pluses given','No Pluses' UNION ALL
SELECT 349,'Endprocess_Aware','Endprocess_Aware','Endprocess_Aware' UNION ALL
SELECT 350,'Redo_Number of children 0-59 months missed in HH due to child absence','Redo_Number of children 0-59 months missed in HH due to child absence','Redo_Child absent' UNION ALL
SELECT 351,'Redo_Number of children 0-59 months missed in other NC places','Redo_Number of children 0-59 months missed in other NC places','Redo_ChildNCOther' UNION ALL
SELECT 352,'Redo_Number of children 0-59 months missed in NC schools','Redo_Number of children 0-59 months missed in NC schools','Redo_ChildNCShool' UNION ALL
SELECT 353,'Redo_Reasons for NC - Child sick','Redo_Reasons for NC - Child sick','Redo_ChildSick' UNION ALL
SELECT 354,'Redo_No. immunised in households with community leader intervention - Child absent','Redo_No. immunised in households with community leader intervention - Child absent','Redo_COMImmRedoABSENT' UNION ALL
SELECT 355,'Redo_No. of household resolved','Redo_No. of household resolved','Redo_HHRevisited' UNION ALL
SELECT 356,'Redo_No. immunised in other NC places with intervention of community influencers','Redo_No. immunised in other NC places with intervention of community influencers','Redo_IMMOTCommNC' UNION ALL
SELECT 345,'Redo_Percent non compliance resolved by traditional leader','Redo_Percent non compliance resolved by traditional leader','Trad. Leader ' UNION ALL
SELECT 340,'Redo_Non compliance resolved by traditional leader','Redo_Non compliance resolved by traditional leader','Trad. Leader ' UNION ALL
SELECT 347,'Redo_Percent non compliance resolved by religious leader','Redo_Percent non compliance resolved by religious leader','Rel. Leader ' UNION ALL
SELECT 342,'Redo_Non compliance resolved by religious leader','Redo_Non compliance resolved by religious leader','Rel. Leader ' UNION ALL
SELECT 346,'Redo_Percent non compliance resolved by community leader','Redo_Percent non compliance resolved by community leader','Comm. Leader' UNION ALL
SELECT 328,'Endprocess_Pct of non compliance due to child sick','Endprocess_Pct of non compliance due to child sick','Child Sick ' UNION ALL
SELECT 341,'Redo_Non compliance resolved by community leader','Redo_Non compliance resolved by community leader','Comm. Leader ' UNION ALL
SELECT 343,'Redo_Non compliance resolved by other','Redo_Non compliance resolved by other','Other ' UNION ALL
SELECT 357,'Redo_No. immunised in other NC places with intervention of others','Redo_No. immunised in other NC places with intervention of others','Redo_IMMOTOtherNC' UNION ALL
SELECT 358,'Redo_No. immunised in other NC places with intervention of religious leaders','Redo_No. immunised in other NC places with intervention of religious leaders','Redo_IMMOTRelNC' UNION ALL
SELECT 359,'Redo_No. immunised in other NC places with intervention of traditional leaders','Redo_No. immunised in other NC places with intervention of traditional leaders','Redo_IMMOTTradNC' UNION ALL
SELECT 360,'Redo_No. Immunised in households with traditonal leader intervention - Child absent','Redo_No. Immunised in households with traditonal leader intervention - Child absent','Redo_ImmRedoABSENT' UNION ALL
SELECT 361,'Redo_No. immunised in NC schools with intervention of community influencer','Redo_No. immunised in NC schools with intervention of community influencer','Redo_IMMSCCommNC' UNION ALL
SELECT 362,'Redo_No. immunised in NC schools with intervention of others','Redo_No. immunised in NC schools with intervention of others','Redo_IMMSCOtherNC' UNION ALL
SELECT 363,'Redo_No. immunised in NC schools with intervention of religious leader','Redo_No. immunised in NC schools with intervention of religious leader','Redo_IMMSCRelNC' UNION ALL
SELECT 364,'Redo_No. immunised in NC schools with intervention of traditional leader','Redo_No. immunised in NC schools with intervention of traditional leader','Redo_IMMSCTradNC' UNION ALL
SELECT 365,'Redo_Reasons for NC - No caregiver consent','Redo_Reasons for NC - No caregiver consent','Redo_NoCaregiver' UNION ALL
SELECT 366,'Redo_Number of NC households','Redo_Number of NC households','Redo_NoHHRedo' UNION ALL
SELECT 370,'Redo_No. of NC children not immunised (not resolved)','Redo_No. of NC children not immunised (not resolved)','Redo_NotImmRedo' UNION ALL
SELECT 371,'Redo_No. of children absent not immunised (not resolved)','Redo_No. of children absent not immunised (not resolved)','Redo_NotImmRedoABSENT' UNION ALL
SELECT 372,'Redo_Reasons for NC - OPV safety','Redo_Reasons for NC - OPV safety','Redo_OpvSafety' UNION ALL
SELECT 368,'Redo_Reasons for NC - No need felt','Redo_Reasons for NC - No need felt','Redo_NoNeedFelt' UNION ALL
SELECT 369,'Redo_Number of other NC places','Redo_Number of other NC places','Redo_NoNOCOther' UNION ALL
SELECT 373,'Redo_No. immunised in households with intervention of others - Child absent','Redo_No. immunised in households with intervention of others - Child absent','Redo_OTHERImRedoABSENT' UNION ALL
SELECT 374,'Redo_No. of other places resolved','Redo_No. of other places resolved','Redo_OTRevisited' UNION ALL
SELECT 375,'Redo_Reasons for NC - Political differences','Redo_Reasons for NC - Political differences','Redo_PoliticalDifferences' UNION ALL
SELECT 376,'Redo_Reasons for child absent - Playground','Redo_Reasons for child absent - Playground','Redo_Reason1ABS' UNION ALL
SELECT 377,'Redo_Reasons for child absent - Market','Redo_Reasons for child absent - Market','Redo_Reason2ABS' UNION ALL
SELECT 378,'Redo_Reasons for child absent - School','Redo_Reasons for child absent - School','Redo_Reason3ABS' UNION ALL
SELECT 379,'Redo_Reasons for child absent - Farm','Redo_Reasons for child absent - Farm','Redo_Reason4ABS' UNION ALL
SELECT 380,'Redo_Reasons for child absent - Social Event','Redo_Reasons for child absent - Social Event','Redo_Reason5ABS' UNION ALL
SELECT 381,'Redo_Reasons for child absent - Other','Redo_Reasons for child absent - Other','Redo_Reason6ABS' UNION ALL
SELECT 382,'Redo_Reasons for NC - Reason not given','Redo_Reasons for NC - Reason not given','Redo_ReasonNotGiven' UNION ALL
SELECT 383,'Redo_Reasons for NC - Religious belief','Redo_Reasons for NC - Religious belief','Redo_ReligiousBelief' UNION ALL
SELECT 384,'Redo_No. immunised in households with religious leader intervention - Child absent','Redo_No. immunised in households with religious leader intervention - Child absent','Redo_RELImmRedoABSENT' UNION ALL
SELECT 385,'Redo_No. of schools resolved','Redo_No. of schools resolved','Redo_SCRevisited' UNION ALL
SELECT 386,'Redo_No. of settlements to revisit','Redo_No. of settlements to revisit','Redo_SettlementsRedo' UNION ALL
SELECT 387,'Redo_Reasons for NC - Too many rounds','Redo_Reasons for NC - Too many rounds','Redo_TooManyRounds' UNION ALL
SELECT 388,'Redo_Reasons for NC - unhappy with immunisation personnel','Redo_Reasons for NC - unhappy with immunisation personnel','Redo_UnhappyWith' UNION ALL
SELECT 367,'Redo_Number of NC schools','Redo_Number of NC schools','Redo_NoNCShools' UNION ALL
SELECT 389,'Redo_MissedRedo','Redo_MissedRedo','Redo_MissedRedo' UNION ALL
SELECT 390,'Redo_TargetRedo','Redo_TargetRedo','Redo_TargetRedo' UNION ALL
SELECT 391,'Outside_Number of settlements visited by suveyor','Outside_Number of settlements visited by suveyor','Outside_Settlementno' UNION ALL
SELECT 392,'Outside_total number of children sampled in settlement 3','Outside_total number of children sampled in settlement 3','Outside_totSeet3' UNION ALL
SELECT 393,'Outside_total children not finger marked (not immunized) in settlement 3','Outside_total children not finger marked (not immunized) in settlement 3','Outside_totMist3' UNION ALL
SELECT 394,'Outside_total number of children sampled in settlement 1','Outside_total number of children sampled in settlement 1','Outside_totSeet1' UNION ALL
SELECT 395,'Outside_total number of children sampled in settlement 2','Outside_total number of children sampled in settlement 2','Outside_totSeet2' UNION ALL
SELECT 396,'Outside_total children not finger marked (not immunized) in settlement 1','Outside_total children not finger marked (not immunized) in settlement 1','Outside_totMist1' UNION ALL
SELECT 397,'Outside_total children not finger marked (not immunized) in settlement 2','Outside_total children not finger marked (not immunized) in settlement 2','Outside_totMist2' UNION ALL
SELECT 398,'Outside_total number of locations sampled by suveyor','Outside_total number of locations sampled by suveyor','Outside_numberof Locations' UNION ALL
SELECT 399,'Outside_Unvaccinated this round','Outside_Unvaccinated this round','Outside_Unvaccinated this round' UNION ALL
SELECT 400,'Outside_0to9mth Seen','Outside_0to9mth Seen','Outside_0to9mth Seen' UNION ALL
SELECT 5,'Number of vaccine doses used in HRD','Number of vaccine doses used in HRD','Number of vaccine doses used in HRD' UNION ALL
SELECT 401,'Outside_0to9mth notMarked','Outside_0to9mth notMarked','Outside_0to9mth notMarked' UNION ALL
SELECT 404,'Outside_children 24-59mth sampled by suveyor','Outside_children 24-59mth sampled by suveyor','Outside_24to59mth Seen' UNION ALL
SELECT 402,'Outside_children 0-23mth sampled by suveyor','Outside_children 0-23mth sampled by suveyor','Outside_children 0-23mth sampled by suveyor' UNION ALL
SELECT 403,'Outside_children 0-23mth not finger marked','Outside_children 0-23mth not finger marked','Outside_children 0-23mth not finger marked' UNION ALL
SELECT 405,'Outside_children 24-59mth not finger marked (not immunized)','Outside_children 24-59mth not finger marked (not immunized)','Outside_24to59mth notMarked' UNION ALL
SELECT 406,'Outside_total number of children sampled (calculated)','Outside_total number of children sampled (calculated)','Outside_TOTAL Seen' UNION ALL
SELECT 407,'Outside_total number of children not finger marked (calculated)','Outside_total number of children not finger marked (calculated)','Outside_TOTAL Notmarked' UNION ALL
SELECT 408,'Outside_total number of children seen in settlement 4','Outside_total number of children seen in settlement 4','Outside_totSeet4' UNION ALL
SELECT 409,'Outside_total children not finger marked (not immunized) in settlement 4','Outside_total children not finger marked (not immunized) in settlement 4','Outside_totMist4' UNION ALL
SELECT 410,'Outside_total number of children sampled in settlement 5','Outside_total number of children sampled in settlement 5','Outside_totSeet5' UNION ALL
SELECT 411,'Outside_total number of children sampled in settlement 6','Outside_total number of children sampled in settlement 6','Outside_totSeet6' UNION ALL
SELECT 412,'Outside_total children not finger marked (not immunized) in settlement 5','Outside_total children not finger marked (not immunized) in settlement 5','Outside_totMist5' UNION ALL
SELECT 413,'Outside_total children not finger marked (not immunized) in settlement 6','Outside_total children not finger marked (not immunized) in settlement 6','Outside_totMist6' UNION ALL
SELECT 414,'Endprocess_HHsampled','Endprocess_HHsampled','Endprocess_HHsampled' UNION ALL
SELECT 415,'Endprocess_HHvisitedTEAMS','Endprocess_HHvisitedTEAMS','Endprocess_HHvisitedTEAMS' UNION ALL
SELECT 416,'Endprocess_ZeroDose','Endprocess_ZeroDose','Endprocess_ZeroDose' UNION ALL
SELECT 417,'Endprocess_TotalYoungest','Endprocess_TotalYoungest','Endprocess_TotalYoungest' UNION ALL
SELECT 418,'Endprocess_YoungstRI','Endprocess_YoungstRI','Endprocess_YoungstRI' UNION ALL
SELECT 419,'Endprocess_RAssessMrk','Endprocess_RAssessMrk','Endprocess_RAssessMrk' UNION ALL
SELECT 420,'Endprocess_RCorctCAT','Endprocess_RCorctCAT','Endprocess_RCorctCAT' UNION ALL
SELECT 421,'Endprocess_RIncorect','Endprocess_RIncorect','Endprocess_RIncorect' UNION ALL
SELECT 422,'Endprocess_RXAssessMrk','Endprocess_RXAssessMrk','Endprocess_RXAssessMrk' UNION ALL
SELECT 423,'Endprocess_RXCorctCAT','Endprocess_RXCorctCAT','Endprocess_RXCorctCAT' UNION ALL
SELECT 424,'Endprocess_RXIncorect','Endprocess_RXIncorect','Endprocess_RXIncorect' UNION ALL
SELECT 168,'Number of cVDPV and WPV cases','Number of cVDPV and WPV cases','Number of cVDPV and WPV cases' UNION ALL
SELECT 158,'Number of children missed due to all access issues','Number of children missed due to all access issues','Inaccessible Children' UNION ALL
SELECT 187,'Percent of refusals resolved during the previous month (both during campaigns and in between rounds)','Percent of refusals resolved during the previous month (both during campaigns and in between rounds)','Refusals Conversion' UNION ALL
SELECT 434,'Reason for inaccessible children - Perception of fear','Reason for inaccessible children - Perception of fear','Perception of fear' UNION ALL
SELECT 435,'Reason for inaccessible children - Local community not supportive','Reason for inaccessible children - Local community not supportive','Local community not supportive' UNION ALL
SELECT 436,'Reason for inaccessible children - Crime','Reason for inaccessible children - Crime','Crime' UNION ALL
SELECT 437,'Reason for inaccessible children - Militant / Anti-Govt Elements','Reason for inaccessible children - Militant / Anti-Govt Elements','Militant / Anti-Govt Elements' UNION ALL
SELECT 438,'Reason for inaccessible children - Security Operations / Incidents','Reason for inaccessible children - Security Operations / Incidents','Security Operations / Incidents' UNION ALL
SELECT 439,'Reason for inaccessible children - Management issues','Reason for inaccessible children - Management issues','Management issues' UNION ALL
SELECT 440,'Reason for inaccessible children - Environment issues','Reason for inaccessible children - Environment issues','Reason for inaccessible children - Environment issues' UNION ALL
SELECT 441,'Reason for inaccessible children - Political issues','Reason for inaccessible children - Political issues','Political issues' UNION ALL
SELECT 442,'% Reason for inaccessible children - Perception of fear','% Reason for inaccessible children - Perception of fear','% Perception of fear' UNION ALL
SELECT 443,'% Reason for inaccessible children - Local community not supportive','% Reason for inaccessible children - Local community not supportive','% Local community not supportive' UNION ALL
SELECT 444,'% Reason for inaccessible children - Crime','% Reason for inaccessible children - Crime','% Crime' UNION ALL
SELECT 445,'% Reason for inaccessible children - Militant / Anti-Govt Elements','% Reason for inaccessible children - Militant / Anti-Govt Elements','% Militant / Anti-Govt Elements' UNION ALL
SELECT 446,'% Reason for inaccessible children - Security Operations / Incidents','% Reason for inaccessible children - Security Operations / Incidents','% Security Operations / Incidents' UNION ALL
SELECT 447,'% Reason for inaccessible children - Management issues','% Reason for inaccessible children - Management issues','% Management issues' UNION ALL
SELECT 448,'% Reason for inaccessible children - Environment issues','% Reason for inaccessible children - Environment issues','% Environment issues' UNION ALL
SELECT 449,'% Reason for inaccessible children - Political issues','% Reason for inaccessible children - Political issues','% Political issues' UNION ALL
SELECT 451,'Reason for inaccessible children - No reason provided','Reason for inaccessible children - No reason provided','Reason for inaccessible children - No reason provided' UNION ALL
SELECT 450,'% Reason for inaccessible children - No reason provided','% Reason for inaccessible children - No reason provided','% No reason provided' UNION ALL
SELECT 431,'Number of non-polio AFP cases with zero doses of OPV','Number of non-polio AFP cases with zero doses of OPV','0 Doses' UNION ALL
SELECT 432,'Number of non-polio AFP cases with 1-3 doses of OPV','Number of non-polio AFP cases with 1-3 doses of OPV','1–3 Doses' UNION ALL
SELECT 433,'Number of non-polio AFP cases with 4+ doses of OPV','Number of non-polio AFP cases with 4+ doses of OPV','4+ Doses' UNION ALL
SELECT 294,'Endprocess_Percent vaccination influencer is vaccinator','Endprocess_Percent vaccination influencer is vaccinator','Vaccinator' UNION ALL
SELECT 257,'Endprocess_NOimmReas13 - There are other remedies available','Endprocess_NOimmReas13 - There are other remedies available','Other Remedies Avail.' UNION ALL
SELECT 326,'Endprocess_Pct of children absent due to child at farm','Endprocess_Pct of children absent due to child at farm','Farm' UNION ALL
SELECT 255,'Endprocess_NOimmReas11 - Polio is rare','Endprocess_NOimmReas11 - Polio is rare','Polio is Rare ' UNION ALL
SELECT 233,'UNICEF Staffing','Proportion of UNICEF Polio Positions in place at National + State / Province level','Human Resouces' UNION ALL
SELECT 245,'Routine Immunization Coverage (DPT3 or PENTA3)','Proportion of children 12 months and under in HRDs who have received DPT3 or Penta3','RI Coverage'
)x;
-- INDICATOR CALCULATIONS --
INSERT INTO calculated_indicator_component
(indicator_id, indicator_component_id, calculation,created_at)
SELECT *,now() FROM (
SELECT
21 as indicator_id
,266 as indicator_component_id
,'PART_TO_BE_SUMMED' as calculation
UNION ALL
SELECT 21,268,'PART_TO_BE_SUMMED' UNION ALL
SELECT 21,267,'PART_TO_BE_SUMMED' UNION ALL
SELECT 21,251,'PART_TO_BE_SUMMED' UNION ALL
SELECT 21,264,'PART_TO_BE_SUMMED' UNION ALL
SELECT 24,266,'PART_TO_BE_SUMMED' UNION ALL
SELECT 24,267,'PART_TO_BE_SUMMED' UNION ALL
SELECT 158,438,'PART_TO_BE_SUMMED' UNION ALL
SELECT 158,434,'PART_TO_BE_SUMMED' UNION ALL
SELECT 158,435,'PART_TO_BE_SUMMED' UNION ALL
SELECT 158,436,'PART_TO_BE_SUMMED' UNION ALL
SELECT 158,437,'PART_TO_BE_SUMMED' UNION ALL
SELECT 158,439,'PART_TO_BE_SUMMED' UNION ALL
SELECT 158,440,'PART_TO_BE_SUMMED' UNION ALL
SELECT 158,441,'PART_TO_BE_SUMMED' UNION ALL
SELECT 158,451,'PART_TO_BE_SUMMED' UNION ALL
SELECT 164,55,'WHOLE' UNION ALL
SELECT 164,251,'PART' UNION ALL
SELECT 165,24,'PART' UNION ALL
SELECT 165,55,'WHOLE' UNION ALL
SELECT 166,55,'WHOLE' UNION ALL
SELECT 166,264,'PART' UNION ALL
SELECT 167,268,'PART' UNION ALL
SELECT 167,55,'WHOLE' UNION ALL
SELECT 168,69,'PART_TO_BE_SUMMED' UNION ALL
SELECT 168,160,'PART_TO_BE_SUMMED' UNION ALL
SELECT 168,70,'PART_TO_BE_SUMMED' UNION ALL
SELECT 169,43,'WHOLE' UNION ALL
SELECT 169,44,'PART' UNION ALL
SELECT 172,197,'PART' UNION ALL
SELECT 172,195,'WHOLE' UNION ALL
SELECT 173,472,'PART' UNION ALL
SELECT 173,195,'WHOLE' UNION ALL
SELECT 174,202,'PART' UNION ALL
SELECT 174,203,'WHOLE' UNION ALL
SELECT 174,202,'PART' UNION ALL
SELECT 174,203,'WHOLE' UNION ALL
SELECT 178,34,'PART' UNION ALL
SELECT 178,33,'WHOLE' UNION ALL
SELECT 179,206,'PART' UNION ALL
SELECT 179,207,'WHOLE' UNION ALL
SELECT 180,38,'WHOLE' UNION ALL
SELECT 180,208,'PART' UNION ALL
SELECT 184,209,'PART' UNION ALL
SELECT 184,36,'WHOLE' UNION ALL
SELECT 185,36,'WHOLE' UNION ALL
SELECT 185,210,'PART' UNION ALL
SELECT 187,25,'WHOLE_OF_DIFFERENCE_DENOMINATOR' UNION ALL
SELECT 187,26,'PART_OF_DIFFERENCE' UNION ALL
SELECT 187,25,'WHOLE_OF_DIFFERENCE' UNION ALL
SELECT 189,213,'WHOLE_OF_DIFFERENCE_DENOMINATOR' UNION ALL
SELECT 189,214,'PART_OF_DIFFERENCE' UNION ALL
SELECT 189,213,'WHOLE_OF_DIFFERENCE' UNION ALL
SELECT 191,216,'PART' UNION ALL
SELECT 191,217,'WHOLE' UNION ALL
SELECT 194,196,'PART' UNION ALL
SELECT 194,195,'WHOLE' UNION ALL
SELECT 219,195,'WHOLE' UNION ALL
SELECT 219,221,'PART' UNION ALL
SELECT 222,27,'WHOLE' UNION ALL
SELECT 222,28,'PART' UNION ALL
SELECT 224,176,'PART' UNION ALL
SELECT 224,175,'WHOLE' UNION ALL
SELECT 226,463,'WHOLE' UNION ALL
SELECT 226,46,'PART' UNION ALL
SELECT 228,42,'PART' UNION ALL
SELECT 228,41,'WHOLE' UNION ALL
SELECT 230,37,'PART' UNION ALL
SELECT 230,38,'WHOLE' UNION ALL
SELECT 233,31,'WHOLE' UNION ALL
SELECT 233,32,'PART' UNION ALL
SELECT 239,40,'PART' UNION ALL
SELECT 239,36,'WHOLE' UNION ALL
SELECT 245,244,'PART' UNION ALL
SELECT 245,243,'WHOLE' UNION ALL
SELECT 251,250,'PART_TO_BE_SUMMED' UNION ALL
SELECT 251,249,'PART_TO_BE_SUMMED' UNION ALL
SELECT 251,246,'PART_TO_BE_SUMMED' UNION ALL
SELECT 251,248,'PART_TO_BE_SUMMED' UNION ALL
SELECT 251,247,'PART_TO_BE_SUMMED' UNION ALL
SELECT 264,262,'PART_TO_BE_SUMMED' UNION ALL
SELECT 264,263,'PART_TO_BE_SUMMED' UNION ALL
SELECT 264,255,'PART_TO_BE_SUMMED' UNION ALL
SELECT 264,254,'PART_TO_BE_SUMMED' UNION ALL
SELECT 264,253,'PART_TO_BE_SUMMED' UNION ALL
SELECT 264,252,'PART_TO_BE_SUMMED' UNION ALL
SELECT 264,259,'PART_TO_BE_SUMMED' UNION ALL
SELECT 264,258,'PART_TO_BE_SUMMED' UNION ALL
SELECT 264,260,'PART_TO_BE_SUMMED' UNION ALL
SELECT 264,257,'PART_TO_BE_SUMMED' UNION ALL
SELECT 264,256,'PART_TO_BE_SUMMED' UNION ALL
SELECT 264,261,'PART_TO_BE_SUMMED' UNION ALL
SELECT 265,251,'PART_TO_BE_SUMMED' UNION ALL
SELECT 265,268,'PART_TO_BE_SUMMED' UNION ALL
SELECT 265,267,'PART_TO_BE_SUMMED' UNION ALL
SELECT 265,266,'PART_TO_BE_SUMMED' UNION ALL
SELECT 265,264,'PART_TO_BE_SUMMED' UNION ALL
SELECT 271,270,'PART_TO_BE_SUMMED' UNION ALL
SELECT 271,269,'PART_TO_BE_SUMMED' UNION ALL
SELECT 272,271,'WHOLE' UNION ALL
SELECT 272,265,'PART' UNION ALL
SELECT 274,273,'PART' UNION ALL
SELECT 274,275,'WHOLE' UNION ALL
SELECT 276,414,'WHOLE_OF_DIFFERENCE_DENOMINATOR' UNION ALL
SELECT 276,414,'WHOLE_OF_DIFFERENCE' UNION ALL
SELECT 276,277,'PART_OF_DIFFERENCE' UNION ALL
SELECT 286,278,'PART_TO_BE_SUMMED' UNION ALL
SELECT 286,283,'PART_TO_BE_SUMMED' UNION ALL
SELECT 286,284,'PART_TO_BE_SUMMED' UNION ALL
SELECT 286,285,'PART_TO_BE_SUMMED' UNION ALL
SELECT 286,282,'PART_TO_BE_SUMMED' UNION ALL
SELECT 286,281,'PART_TO_BE_SUMMED' UNION ALL
SELECT 286,280,'PART_TO_BE_SUMMED' UNION ALL
SELECT 286,279,'PART_TO_BE_SUMMED' UNION ALL
SELECT 287,278,'PART' UNION ALL
SELECT 287,286,'WHOLE' UNION ALL
SELECT 288,279,'PART' UNION ALL
SELECT 288,286,'WHOLE' UNION ALL
SELECT 289,286,'WHOLE' UNION ALL
SELECT 289,280,'PART' UNION ALL
SELECT 290,286,'WHOLE' UNION ALL
SELECT 290,281,'PART' UNION ALL
SELECT 291,282,'PART' UNION ALL
SELECT 291,286,'WHOLE' UNION ALL
SELECT 292,286,'WHOLE' UNION ALL
SELECT 292,283,'PART' UNION ALL
SELECT 293,286,'WHOLE' UNION ALL
SELECT 293,284,'PART' UNION ALL
SELECT 294,285,'PART' UNION ALL
SELECT 294,286,'WHOLE' UNION ALL
SELECT 306,304,'PART_TO_BE_SUMMED' UNION ALL
SELECT 306,295,'PART_TO_BE_SUMMED' UNION ALL
SELECT 306,296,'PART_TO_BE_SUMMED' UNION ALL
SELECT 306,297,'PART_TO_BE_SUMMED' UNION ALL
SELECT 306,298,'PART_TO_BE_SUMMED' UNION ALL
SELECT 306,299,'PART_TO_BE_SUMMED' UNION ALL
SELECT 306,300,'PART_TO_BE_SUMMED' UNION ALL
SELECT 306,301,'PART_TO_BE_SUMMED' UNION ALL
SELECT 306,302,'PART_TO_BE_SUMMED' UNION ALL
SELECT 306,303,'PART_TO_BE_SUMMED' UNION ALL
SELECT 306,305,'PART_TO_BE_SUMMED' UNION ALL
SELECT 307,295,'PART' UNION ALL
SELECT 307,306,'WHOLE' UNION ALL
SELECT 308,306,'WHOLE' UNION ALL
SELECT 308,299,'PART' UNION ALL
SELECT 309,306,'WHOLE' UNION ALL
SELECT 309,303,'PART' UNION ALL
SELECT 310,296,'PART' UNION ALL
SELECT 310,306,'WHOLE' UNION ALL
SELECT 311,306,'WHOLE' UNION ALL
SELECT 311,300,'PART' UNION ALL
SELECT 312,306,'WHOLE' UNION ALL
SELECT 312,304,'PART' UNION ALL
SELECT 313,297,'PART' UNION ALL
SELECT 313,306,'WHOLE' UNION ALL
SELECT 314,301,'PART' UNION ALL
SELECT 314,306,'WHOLE' UNION ALL
SELECT 315,305,'PART' UNION ALL
SELECT 315,306,'WHOLE' UNION ALL
SELECT 316,298,'PART' UNION ALL
SELECT 316,306,'WHOLE' UNION ALL
SELECT 317,302,'PART' UNION ALL
SELECT 317,306,'WHOLE' UNION ALL
SELECT 318,267,'PART' UNION ALL
SELECT 318,265,'WHOLE' UNION ALL
SELECT 319,268,'PART' UNION ALL
SELECT 319,265,'WHOLE' UNION ALL
SELECT 320,251,'PART' UNION ALL
SELECT 320,265,'WHOLE' UNION ALL
SELECT 321,264,'PART' UNION ALL
SELECT 321,265,'WHOLE' UNION ALL
SELECT 322,266,'PART' UNION ALL
SELECT 322,265,'WHOLE' UNION ALL
SELECT 323,246,'PART' UNION ALL
SELECT 323,251,'WHOLE' UNION ALL
SELECT 324,247,'PART' UNION ALL
SELECT 324,251,'WHOLE' UNION ALL
SELECT 325,248,'PART' UNION ALL
SELECT 325,251,'WHOLE' UNION ALL
SELECT 326,249,'PART' UNION ALL
SELECT 326,251,'WHOLE' UNION ALL
SELECT 327,251,'WHOLE' UNION ALL
SELECT 327,250,'PART' UNION ALL
SELECT 328,252,'PART' UNION ALL
SELECT 328,264,'WHOLE' UNION ALL
SELECT 329,255,'PART' UNION ALL
SELECT 329,264,'WHOLE' UNION ALL
SELECT 330,264,'WHOLE' UNION ALL
SELECT 330,258,'PART' UNION ALL
SELECT 331,261,'PART' UNION ALL
SELECT 331,264,'WHOLE' UNION ALL
SELECT 332,253,'PART' UNION ALL
SELECT 332,264,'WHOLE' UNION ALL
SELECT 333,264,'WHOLE' UNION ALL
SELECT 333,256,'PART' UNION ALL
SELECT 334,264,'WHOLE' UNION ALL
SELECT 334,259,'PART' UNION ALL
SELECT 335,254,'PART' UNION ALL
SELECT 335,264,'WHOLE' UNION ALL
SELECT 336,264,'WHOLE' UNION ALL
SELECT 336,257,'PART' UNION ALL
SELECT 337,264,'WHOLE' UNION ALL
SELECT 337,260,'PART' UNION ALL
SELECT 338,264,'WHOLE' UNION ALL
SELECT 338,263,'PART' UNION ALL
SELECT 339,262,'PART' UNION ALL
SELECT 339,264,'WHOLE' UNION ALL
SELECT 345,344,'WHOLE' UNION ALL
SELECT 345,340,'PART' UNION ALL
SELECT 346,341,'PART' UNION ALL
SELECT 346,344,'WHOLE' UNION ALL
SELECT 347,342,'PART' UNION ALL
SELECT 347,344,'WHOLE' UNION ALL
SELECT 348,343,'PART' UNION ALL
SELECT 348,344,'WHOLE' UNION ALL
SELECT 442,158,'WHOLE' UNION ALL
SELECT 442,434,'PART' UNION ALL
SELECT 443,435,'PART' UNION ALL
SELECT 443,158,'WHOLE' UNION ALL
SELECT 444,158,'WHOLE' UNION ALL
SELECT 444,436,'PART' UNION ALL
SELECT 445,158,'WHOLE' UNION ALL
SELECT 445,437,'PART' UNION ALL
SELECT 446,438,'PART' UNION ALL
SELECT 446,158,'WHOLE' UNION ALL
SELECT 447,439,'PART' UNION ALL
SELECT 447,158,'WHOLE' UNION ALL
SELECT 448,440,'PART' UNION ALL
SELECT 448,158,'WHOLE' UNION ALL
SELECT 449,158,'WHOLE' UNION ALL
SELECT 449,441,'PART' UNION ALL
SELECT 450,158,'WHOLE' UNION ALL
SELECT 450,451,'PART' UNION ALL
SELECT 461,204,'WHOLE' UNION ALL
SELECT 461,176,'PART' UNION ALL
SELECT 470,439,'PART_TO_BE_SUMMED' UNION ALL
SELECT 470,441,'PART_TO_BE_SUMMED' UNION ALL
SELECT 470,451,'PART_TO_BE_SUMMED' UNION ALL
SELECT 470,438,'PART_TO_BE_SUMMED' UNION ALL
SELECT 470,437,'PART_TO_BE_SUMMED' UNION ALL
SELECT 470,436,'PART_TO_BE_SUMMED' UNION ALL
SELECT 470,435,'PART_TO_BE_SUMMED' UNION ALL
SELECT 470,434,'PART_TO_BE_SUMMED' UNION ALL
SELECT 470,440,'PART_TO_BE_SUMMED' UNION ALL
SELECT 475,55,'WHOLE' UNION ALL
SELECT 475,21,'PART'
)x;
INSERT INTO indicator_tag
(id, tag_name)
SELECT 34,'Management Indicators' UNION ALL
SELECT 1,'WHO Independent Monitoring' UNION ALL
SELECT 2,'Nigeria ODK Data';
INSERT INTO indicator_tag
(id, tag_name, parent_tag_id)
SELECT 65,'Cold Chain',42 UNION ALL
SELECT 66,'Vaccine Management',42 UNION ALL
SELECT 63,'Funding',41 UNION ALL
SELECT 64,'Human Resources',41 UNION ALL
SELECT 58,'Convergence',40 UNION ALL
SELECT 59,'RI Coverage',40 UNION ALL
SELECT 60,'RI Defaulter',40 UNION ALL
SELECT 61,'RI Knowledge',40 UNION ALL
SELECT 62,'RI Stock',40 UNION ALL
SELECT 56,'Epidemiology',39 UNION ALL
SELECT 57,'Under Immunity',39 UNION ALL
SELECT 52,'Absences',38 UNION ALL
SELECT 53,'Refusals',38 UNION ALL
SELECT 54,'Awareness',38 UNION ALL
SELECT 55,'Microplan',38 UNION ALL
SELECT 47,'Motivation',37 UNION ALL
SELECT 48,'Presence',37 UNION ALL
SELECT 49,'Profile',37 UNION ALL
SELECT 50,'Skills',37 UNION ALL
SELECT 51,'Supervision',37 UNION ALL
SELECT 46,'Missed Children by Reason',36 UNION ALL
SELECT 43,'Access Approach',35 UNION ALL
SELECT 44,'Inaccessible Children',35 UNION ALL
SELECT 45,'Transit Point',35 UNION ALL
SELECT 35,'Access',34 UNION ALL
SELECT 36,'Coverage',34 UNION ALL
SELECT 37,'FLW Capacity',34 UNION ALL
SELECT 38,'FLW Performance',34 UNION ALL
SELECT 39,'Impact',34 UNION ALL
SELECT 40,'Polio+',34 UNION ALL
SELECT 41,'Resources',34 UNION ALL
SELECT 42,'Supply',34 UNION ALL
SELECT 11,'KAP: Campaign Awareness',7 UNION ALL
SELECT 12,'Missed Children: Missed',7 UNION ALL
SELECT 13,'Missed Children: Seen',7 UNION ALL
SELECT 14,'Missed Children: Unimmunized',7 UNION ALL
SELECT 15,'Missed Children: Operations',7 UNION ALL
SELECT 16,'Missed Children: Immunized',7 UNION ALL
SELECT 17,'KAP: Source',7 UNION ALL
SELECT 18,'KAP: Influence',7 UNION ALL
SELECT 19,'Survey',7 UNION ALL
SELECT 20,'Missed Children: Absence',7 UNION ALL
SELECT 21,'Missed Children: Reasons',7 UNION ALL
SELECT 22,'Missed Children: Refusal',7 UNION ALL
SELECT 23,'Missed Children: Security',7 UNION ALL
SELECT 3,'Newborn',2 UNION ALL
SELECT 4,'Census',2 UNION ALL
SELECT 8,'Missed Children',2 UNION ALL
SELECT 9,'Campaign',2 UNION ALL
SELECT 10,'Group',2 UNION ALL
SELECT 5,'Outside',1 UNION ALL
SELECT 6,'Redo',1 UNION ALL
SELECT 7,'End Process',1;
INSERT INTO indicator_to_tag
(indicator_id, indicator_tag_id)
SELECT 174,43 UNION ALL
SELECT 201,43 UNION ALL
SELECT 202,43 UNION ALL
SELECT 203,43 UNION ALL
SELECT 158,44 UNION ALL
SELECT 434,44 UNION ALL
SELECT 435,44 UNION ALL
SELECT 436,44 UNION ALL
SELECT 437,44 UNION ALL
SELECT 438,44 UNION ALL
SELECT 439,44 UNION ALL
SELECT 440,44 UNION ALL
SELECT 441,44 UNION ALL
SELECT 442,44 UNION ALL
SELECT 443,44 UNION ALL
SELECT 444,44 UNION ALL
SELECT 445,44 UNION ALL
SELECT 446,44 UNION ALL
SELECT 447,44 UNION ALL
SELECT 448,44 UNION ALL
SELECT 449,44 UNION ALL
SELECT 450,44 UNION ALL
SELECT 451,44 UNION ALL
SELECT 175,45 UNION ALL
SELECT 176,45 UNION ALL
SELECT 177,45 UNION ALL
SELECT 204,45 UNION ALL
SELECT 224,45 UNION ALL
SELECT 461,45 UNION ALL
SELECT 21,46 UNION ALL
SELECT 24,46 UNION ALL
SELECT 55,46 UNION ALL
SELECT 164,46 UNION ALL
SELECT 165,46 UNION ALL
SELECT 166,46 UNION ALL
SELECT 167,46 UNION ALL
SELECT 475,46 UNION ALL
SELECT 46,47 UNION ALL
SELECT 226,47 UNION ALL
SELECT 463,47 UNION ALL
SELECT 33,48 UNION ALL
SELECT 34,48 UNION ALL
SELECT 35,48 UNION ALL
SELECT 178,48 UNION ALL
SELECT 179,48 UNION ALL
SELECT 206,48 UNION ALL
SELECT 207,48 UNION ALL
SELECT 37,49 UNION ALL
SELECT 38,49 UNION ALL
SELECT 40,49 UNION ALL
SELECT 180,49 UNION ALL
SELECT 208,49 UNION ALL
SELECT 230,49 UNION ALL
SELECT 239,49 UNION ALL
SELECT 36,50 UNION ALL
SELECT 41,50 UNION ALL
SELECT 42,50 UNION ALL
SELECT 49,50 UNION ALL
SELECT 184,50 UNION ALL
SELECT 209,50 UNION ALL
SELECT 228,50 UNION ALL
SELECT 185,51 UNION ALL
SELECT 210,51 UNION ALL
SELECT 189,52 UNION ALL
SELECT 213,52 UNION ALL
SELECT 214,52 UNION ALL
SELECT 215,52 UNION ALL
SELECT 25,53 UNION ALL
SELECT 26,53 UNION ALL
SELECT 187,53 UNION ALL
SELECT 211,53 UNION ALL
SELECT 29,54 UNION ALL
SELECT 30,54 UNION ALL
SELECT 27,55 UNION ALL
SELECT 28,55 UNION ALL
SELECT 222,55 UNION ALL
SELECT 1,56 UNION ALL
SELECT 69,56 UNION ALL
SELECT 70,56 UNION ALL
SELECT 159,56 UNION ALL
SELECT 160,56 UNION ALL
SELECT 431,57 UNION ALL
SELECT 432,57 UNION ALL
SELECT 433,57 UNION ALL
SELECT 193,58 UNION ALL
SELECT 218,58 UNION ALL
SELECT 243,59 UNION ALL
SELECT 244,59 UNION ALL
SELECT 245,59 UNION ALL
SELECT 192,60 UNION ALL
SELECT 236,61 UNION ALL
SELECT 191,62 UNION ALL
SELECT 216,62 UNION ALL
SELECT 217,62 UNION ALL
SELECT 43,63 UNION ALL
SELECT 44,63 UNION ALL
SELECT 169,63 UNION ALL
SELECT 31,64 UNION ALL
SELECT 32,64 UNION ALL
SELECT 233,64 UNION ALL
SELECT 173,65 UNION ALL
SELECT 198,65 UNION ALL
SELECT 199,65 UNION ALL
SELECT 472,65 UNION ALL
SELECT 5,66 UNION ALL
SELECT 51,66 UNION ALL
SELECT 172,66 UNION ALL
SELECT 194,66 UNION ALL
SELECT 195,66 UNION ALL
SELECT 196,66 UNION ALL
SELECT 197,66 UNION ALL
SELECT 219,66 UNION ALL
SELECT 220,66 UNION ALL
SELECT 221,66 UNION ALL
SELECT 264,46 UNION ALL
SELECT 251,46 UNION ALL
SELECT 268,46 UNION ALL
SELECT 349,11 UNION ALL
SELECT 414,11 UNION ALL
SELECT 277,11 UNION ALL
SELECT 276,11 UNION ALL
SELECT 286,18 UNION ALL
SELECT 278,18 UNION ALL
SELECT 280,18 UNION ALL
SELECT 282,18 UNION ALL
SELECT 284,18 UNION ALL
SELECT 279,18 UNION ALL
SELECT 281,18 UNION ALL
SELECT 283,18 UNION ALL
SELECT 285,18 UNION ALL
SELECT 292,18 UNION ALL
SELECT 289,18 UNION ALL
SELECT 290,18 UNION ALL
SELECT 287,18 UNION ALL
SELECT 288,18 UNION ALL
SELECT 293,18 UNION ALL
SELECT 291,18 UNION ALL
SELECT 294,18 UNION ALL
SELECT 306,17 UNION ALL
SELECT 317,17 UNION ALL
SELECT 315,17 UNION ALL
SELECT 312,17 UNION ALL
SELECT 308,17 UNION ALL
SELECT 311,17 UNION ALL
SELECT 314,17 UNION ALL
SELECT 310,17 UNION ALL
SELECT 309,17 UNION ALL
SELECT 316,17 UNION ALL
SELECT 307,17 UNION ALL
SELECT 313,17 UNION ALL
SELECT 302,17 UNION ALL
SELECT 305,17 UNION ALL
SELECT 304,17 UNION ALL
SELECT 299,17 UNION ALL
SELECT 300,17 UNION ALL
SELECT 301,17 UNION ALL
SELECT 296,17 UNION ALL
SELECT 303,17 UNION ALL
SELECT 298,17 UNION ALL
SELECT 295,17 UNION ALL
SELECT 297,17 UNION ALL
SELECT 246,20 UNION ALL
SELECT 247,20 UNION ALL
SELECT 248,20 UNION ALL
SELECT 249,20 UNION ALL
SELECT 250,20 UNION ALL
SELECT 320,20 UNION ALL
SELECT 326,20 UNION ALL
SELECT 325,20 UNION ALL
SELECT 327,20 UNION ALL
SELECT 324,20 UNION ALL
SELECT 323,20 UNION ALL
SELECT 251,20 UNION ALL
SELECT 269,16 UNION ALL
SELECT 272,12 UNION ALL
SELECT 267,15 UNION ALL
SELECT 268,15 UNION ALL
SELECT 319,15 UNION ALL
SELECT 318,15 UNION ALL
SELECT 265,21 UNION ALL
SELECT 254,22 UNION ALL
SELECT 255,22 UNION ALL
SELECT 256,22 UNION ALL
SELECT 257,22 UNION ALL
SELECT 258,22 UNION ALL
SELECT 259,22 UNION ALL
SELECT 260,22 UNION ALL
SELECT 261,22 UNION ALL
SELECT 262,22 UNION ALL
SELECT 263,22 UNION ALL
SELECT 252,22 UNION ALL
SELECT 253,22 UNION ALL
SELECT 321,22 UNION ALL
SELECT 328,22 UNION ALL
SELECT 335,22 UNION ALL
SELECT 338,22 UNION ALL
SELECT 339,22 UNION ALL
SELECT 331,22 UNION ALL
SELECT 336,22 UNION ALL
SELECT 333,22 UNION ALL
SELECT 329,22 UNION ALL
SELECT 334,22 UNION ALL
SELECT 330,22 UNION ALL
SELECT 332,22 UNION ALL
SELECT 337,22 UNION ALL
SELECT 264,22 UNION ALL
SELECT 266,23 UNION ALL
SELECT 322,23 UNION ALL
SELECT 271,13 UNION ALL
SELECT 270,14 UNION ALL
SELECT 415,19 UNION ALL
SELECT 416,19 UNION ALL
SELECT 401,5 UNION ALL
SELECT 400,5 UNION ALL
SELECT 403,5 UNION ALL
SELECT 402,5 UNION ALL
SELECT 405,5 UNION ALL
SELECT 404,5 UNION ALL
SELECT 391,5 UNION ALL
SELECT 274,5 UNION ALL
SELECT 396,5 UNION ALL
SELECT 397,5 UNION ALL
SELECT 393,5 UNION ALL
SELECT 409,5 UNION ALL
SELECT 412,5 UNION ALL
SELECT 413,5 UNION ALL
SELECT 273,5 UNION ALL
SELECT 407,5 UNION ALL
SELECT 406,5 UNION ALL
SELECT 394,5 UNION ALL
SELECT 395,5 UNION ALL
SELECT 392,5 UNION ALL
SELECT 410,5 UNION ALL
SELECT 411,5 UNION ALL
SELECT 408,5 UNION ALL
SELECT 398,5 UNION ALL
SELECT 275,5 UNION ALL
SELECT 399,5 UNION ALL
SELECT 354,6 UNION ALL
SELECT 373,6 UNION ALL
SELECT 384,6 UNION ALL
SELECT 360,6 UNION ALL
SELECT 361,6 UNION ALL
SELECT 362,6 UNION ALL
SELECT 363,6 UNION ALL
SELECT 364,6 UNION ALL
SELECT 356,6 UNION ALL
SELECT 357,6 UNION ALL
SELECT 358,6 UNION ALL
SELECT 359,6 UNION ALL
SELECT 371,6 UNION ALL
SELECT 355,6 UNION ALL
SELECT 370,6 UNION ALL
SELECT 374,6 UNION ALL
SELECT 385,6 UNION ALL
SELECT 386,6 UNION ALL
SELECT 341,6 UNION ALL
SELECT 343,6 UNION ALL
SELECT 342,6 UNION ALL
SELECT 340,6 UNION ALL
SELECT 344,6 UNION ALL
SELECT 350,6 UNION ALL
SELECT 352,6 UNION ALL
SELECT 351,6 UNION ALL
SELECT 366,6 UNION ALL
SELECT 367,6 UNION ALL
SELECT 369,6 UNION ALL
SELECT 346,6 UNION ALL
SELECT 348,6 UNION ALL
SELECT 347,6 UNION ALL
SELECT 345,6 UNION ALL
SELECT 379,6 UNION ALL
SELECT 377,6 UNION ALL
SELECT 381,6 UNION ALL
SELECT 376,6 UNION ALL
SELECT 378,6 UNION ALL
SELECT 380,6 UNION ALL
SELECT 353,6 UNION ALL
SELECT 365,6 UNION ALL
SELECT 368,6 UNION ALL
SELECT 372,6 UNION ALL
SELECT 375,6 UNION ALL
SELECT 382,6 UNION ALL
SELECT 383,6 UNION ALL
SELECT 387,6 UNION ALL
SELECT 388,6 UNION ALL
SELECT 390,6 UNION ALL
SELECT 91,4 UNION ALL
SELECT 74,9 UNION ALL
SELECT 88,9 UNION ALL
SELECT 79,9 UNION ALL
SELECT 80,9 UNION ALL
SELECT 86,9 UNION ALL
SELECT 77,9 UNION ALL
SELECT 78,9 UNION ALL
SELECT 87,4 UNION ALL
SELECT 85,4 UNION ALL
SELECT 84,4 UNION ALL
SELECT 93,4 UNION ALL
SELECT 94,4 UNION ALL
SELECT 75,4 UNION ALL
SELECT 92,4 UNION ALL
SELECT 90,4 UNION ALL
SELECT 76,8 UNION ALL
SELECT 103,8 UNION ALL
SELECT 102,8 UNION ALL
SELECT 99,8 UNION ALL
SELECT 98,8 UNION ALL
SELECT 124,8 UNION ALL
SELECT 123,8 UNION ALL
SELECT 113,8 UNION ALL
SELECT 114,8 UNION ALL
SELECT 97,8 UNION ALL
SELECT 96,8 UNION ALL
SELECT 116,8 UNION ALL
SELECT 115,8 UNION ALL
SELECT 100,8 UNION ALL
SELECT 101,8 UNION ALL
SELECT 130,8 UNION ALL
SELECT 131,8 UNION ALL
SELECT 120,8 UNION ALL
SELECT 121,8 UNION ALL
SELECT 138,8 UNION ALL
SELECT 139,8 UNION ALL
SELECT 118,8 UNION ALL
SELECT 117,8 UNION ALL
SELECT 146,8 UNION ALL
SELECT 145,8 UNION ALL
SELECT 106,8 UNION ALL
SELECT 105,8 UNION ALL
SELECT 147,8 UNION ALL
SELECT 148,8 UNION ALL
SELECT 132,8 UNION ALL
SELECT 133,8 UNION ALL
SELECT 143,8 UNION ALL
SELECT 142,8 UNION ALL
SELECT 119,8 UNION ALL
SELECT 122,8 UNION ALL
SELECT 108,8 UNION ALL
SELECT 107,8 UNION ALL
SELECT 128,8 UNION ALL
SELECT 129,8 UNION ALL
SELECT 110,8 UNION ALL
SELECT 109,8 UNION ALL
SELECT 140,8 UNION ALL
SELECT 141,8 UNION ALL
SELECT 137,8 UNION ALL
SELECT 136,8 UNION ALL
SELECT 83,3 UNION ALL
SELECT 89,3 UNION ALL
SELECT 82,3 UNION ALL
SELECT 81,3 UNION ALL
SELECT 112,10 UNION ALL
SELECT 111,10 UNION ALL
SELECT 127,10 UNION ALL
SELECT 126,10 UNION ALL
SELECT 149,10 UNION ALL
SELECT 104,10 UNION ALL
SELECT 134,10 UNION ALL
SELECT 125,10 UNION ALL
SELECT 150,10 UNION ALL
SELECT 151,10 UNION ALL
SELECT 144,10 UNION ALL
SELECT 152,10 UNION ALL
SELECT 135,10 UNION ALL
SELECT 95,10;
-- CAMPAIGNS --
INSERT INTO campaign
(id, start_date,end_date,slug,campaign_type_id, office_id, created_at)
-- to do this in excel... :) --
-- CONCATENATE("SELECT ",A1,",'",TEXT(B1,"mm/dd/yy"),"','",TEXT(B1,"mm/dd/yy"),"','",D1,"' UNION ALL")
SELECT id, CAST(x.start_date AS DATE),CAST(x.end_date as date),x.slug, 1,1,now() FROM (
SELECT 99 as id,'11/01/13' as start_date,'11/01/13' as end_date,'nigeria-november-2013' as slug UNION ALL
SELECT 101,'09/01/13','09/01/13','nigeria-september-2013' UNION ALL
SELECT 102,'03/01/12','03/01/12','nigeria-march-2012' UNION ALL
SELECT 103,'06/01/12','06/01/12','nigeria-june-2012' UNION ALL
SELECT 104,'08/01/14','08/01/14','nigeria-august-2014' UNION ALL
SELECT 105,'03/01/13','03/01/13','nigeria-march-2013' UNION ALL
SELECT 106,'10/01/13','10/01/13','nigeria-october-2013' UNION ALL
SELECT 107,'05/01/13','05/01/13','nigeria-may-2013' UNION ALL
SELECT 108,'03/01/14','03/01/14','nigeria-march-2014' UNION ALL
SELECT 109,'02/01/13','02/01/13','nigeria-february-2013' UNION ALL
SELECT 110,'12/01/12','12/01/12','nigeria-december-2012' UNION ALL
SELECT 111,'06/01/14','06/01/14','nigeria-june-2014' UNION ALL
SELECT 112,'12/01/13','12/01/13','nigeria-december-2013' UNION ALL
SELECT 113,'09/01/12','09/01/12','nigeria-september-2012' UNION ALL
SELECT 114,'04/01/13','04/01/13','nigeria-april-2013' UNION ALL
SELECT 115,'04/01/14','04/01/14','nigeria-april-2014' UNION ALL
SELECT 116,'06/01/13','06/01/13','nigeria-june-2013' UNION ALL
SELECT 117,'01/01/14','01/01/14','nigeria-january-2014' UNION ALL
SELECT 118,'01/01/12','01/01/12','nigeria-january-2012' UNION ALL
SELECT 119,'08/01/12','08/01/12','nigeria-august-2012' UNION ALL
SELECT 120,'01/01/13','01/01/13','nigeria-january-2013' UNION ALL
SELECT 121,'10/01/12','10/01/12','nigeria-october-2012' UNION ALL
SELECT 122,'10/01/14','10/01/14','nigeria-october-2014' UNION ALL
SELECT 123,'08/01/13','08/01/13','nigeria-august-2013' UNION ALL
SELECT 124,'07/01/14','07/01/14','nigeria-july-2014' UNION ALL
SELECT 125,'05/01/14','05/01/14','nigeria-may-2014' UNION ALL
SELECT 126,'11/01/12','11/01/12','nigeria-november-2012' UNION ALL
SELECT 127,'07/01/12','07/01/12','nigeria-july-2012' UNION ALL
SELECT 128,'09/01/14','09/01/14','nigeria-september-2014' UNION ALL
SELECT 129,'04/01/12','04/01/12','nigeria-april-2012' UNION ALL
SELECT 130,'02/01/14','02/01/14','nigeria-february-2014' UNION ALL
SELECT 131,'07/01/13','07/01/13','nigeria-july-2013' UNION ALL
SELECT 132,'05/01/12','05/01/12','nigeria-may-2012' UNION ALL
SELECT 100,'11/01/14','11/01/14','nigeria-november-2014' UNION ALL
SELECT 201,'12/01/14','12/01/14','nigeria-2014-12-01' UNION ALL
SELECT 210,'01/01/15','01/01/15','nigeria-2015-01-01' UNION ALL
SELECT 211,'02/01/15','02/01/15','nigeria-2015-02-01' UNION ALL
SELECT 212,'03/01/15','03/01/15','nigeria-2015-03-01' UNION ALL
SELECT 216,'02/01/12','02/01/12','nigeria-2012-02-01' UNION ALL
SELECT 221,'04/01/15','04/01/15','nigeria-2015-04-01' UNION ALL
SELECT 222,'05/01/15','05/01/15','nigeria-2015-05-01' UNION ALL
SELECT 223,'06/01/15','06/01/15','nigeria-2015-06-01'
)x;
-- source_object_map --
INSERT INTO source_object_map
(master_object_id, source_object_code, content_type, mapped_by_id)
SELECT *,1 FROM (
SELECT * FROM (
SELECT 32 as master_object_id ,'Number of Unicef polio positions in their posts in PBR-approved structures' as source_object_code,'indicator' as content_type UNION ALL
SELECT 31 as master_object_id ,'Target number of Unicef polio positions in PBR-approved structures' as source_object_code,'indicator' as content_type UNION ALL
SELECT 24 as master_object_id ,'Number of children missed due to other reasons' as source_object_code,'indicator' as content_type UNION ALL
SELECT 25 as master_object_id ,'Number of refusals before re-visit' as source_object_code,'indicator' as content_type UNION ALL
SELECT 26 as master_object_id ,'Number of refusals after re-visit' as source_object_code,'indicator' as content_type UNION ALL
SELECT 35 as master_object_id ,'Number of target social mobilizers' as source_object_code,'indicator' as content_type UNION ALL
SELECT 40 as master_object_id ,'Number of female social mobilizers' as source_object_code,'indicator' as content_type UNION ALL
SELECT 67 as master_object_id ,'Percentage of States/Regions with OPV supply arriving at state/region level in sufficient time before campaign' as source_object_code,'indicator' as content_type UNION ALL
SELECT 418 as master_object_id ,'YoungstRI' as source_object_code,'indicator' as content_type UNION ALL
SELECT 420 as master_object_id ,'RCorctCAT' as source_object_code,'indicator' as content_type UNION ALL
SELECT 417 as master_object_id ,'TotalYoungest' as source_object_code,'indicator' as content_type UNION ALL
SELECT 421 as master_object_id ,'RIncorect' as source_object_code,'indicator' as content_type UNION ALL
SELECT 415 as master_object_id ,'HHvisitedTEAMS' as source_object_code,'indicator' as content_type UNION ALL
SELECT 423 as master_object_id ,'RXCorctCAT' as source_object_code,'indicator' as content_type UNION ALL
SELECT 422 as master_object_id ,'RXAssessMrk' as source_object_code,'indicator' as content_type UNION ALL
SELECT 424 as master_object_id ,'RXIncorect' as source_object_code,'indicator' as content_type UNION ALL
SELECT 269 as master_object_id ,'Sum of Marked0to59' as source_object_code,'indicator' as content_type UNION ALL
SELECT 270 as master_object_id ,'Sum of UnImmun0to59' as source_object_code,'indicator' as content_type UNION ALL
SELECT 38 as master_object_id ,'TOTAL teams Checked' as source_object_code,'indicator' as content_type UNION ALL
SELECT 34 as master_object_id ,'# HR areas (Clusters) with social mobilizers' as source_object_code,'indicator' as content_type UNION ALL
SELECT 35 as master_object_id ,' # target social mobilizers' as source_object_code,'indicator' as content_type UNION ALL
SELECT 29 as master_object_id ,'# caregivers' as source_object_code,'indicator' as content_type UNION ALL
SELECT 25 as master_object_id ,'# refusals before re-visit' as source_object_code,'indicator' as content_type UNION ALL
SELECT 28 as master_object_id ,'# Microplans incoroporating social data' as source_object_code,'indicator' as content_type UNION ALL
SELECT 44 as master_object_id ,'Amount committed' as source_object_code,'indicator' as content_type UNION ALL
SELECT 53 as master_object_id ,'# districts having NO stock-outs of OPV' as source_object_code,'indicator' as content_type UNION ALL
SELECT 37 as master_object_id ,'# teams w/ at least one female worker' as source_object_code,'indicator' as content_type UNION ALL
SELECT 38 as master_object_id ,'# teams' as source_object_code,'indicator' as content_type UNION ALL
SELECT 62 as master_object_id ,'# w/ capacity' as source_object_code,'indicator' as content_type UNION ALL
SELECT 33 as master_object_id ,' # of HR areas (clusters) targeted ' as source_object_code,'indicator' as content_type UNION ALL
SELECT 27 as master_object_id ,'# Microplans in LPD' as source_object_code,'indicator' as content_type UNION ALL
SELECT 24 as master_object_id ,'Other reasons' as source_object_code,'indicator' as content_type UNION ALL
SELECT 26 as master_object_id ,'# refusals after re-visit' as source_object_code,'indicator' as content_type UNION ALL
SELECT 40 as master_object_id ,'# female social mobilizers' as source_object_code,'indicator' as content_type UNION ALL
SELECT 55 as master_object_id ,'# of targeted under-five children' as source_object_code,'indicator' as content_type UNION ALL
SELECT 43 as master_object_id ,'Amount TOTAL FRR funds' as source_object_code,'indicator' as content_type UNION ALL
SELECT 36 as master_object_id ,'# in place' as source_object_code,'indicator' as content_type UNION ALL
SELECT 5 as master_object_id ,'# vaccine doses used' as source_object_code,'indicator' as content_type UNION ALL
SELECT 34 as master_object_id ,'# HR areas with social mobilizers' as source_object_code,'indicator' as content_type UNION ALL
SELECT 32 as master_object_id ,'Number of core polio communication personnel in place in a country programme' as source_object_code,'indicator' as content_type UNION ALL
SELECT 27 as master_object_id ,'# Microplans in High Risk District' as source_object_code,'indicator' as content_type UNION ALL
SELECT 41 as master_object_id ,'# front line workers' as source_object_code,'indicator' as content_type UNION ALL
SELECT 31 as master_object_id ,'Target number of core polio personnel in place in a country programme' as source_object_code,'indicator' as content_type UNION ALL
SELECT 93 as master_object_id ,'CensusNewBornsF' as source_object_code,'indicator' as content_type UNION ALL
SELECT 49 as master_object_id ,'# trained on RI in past 6 mos.' as source_object_code,'indicator' as content_type UNION ALL
SELECT 51 as master_object_id ,'# children vaccined' as source_object_code,'indicator' as content_type UNION ALL
SELECT 41 as master_object_id ,' # front line workers' as source_object_code,'indicator' as content_type UNION ALL
SELECT 31 as master_object_id ,'Target core polio personnel in place in a country programme' as source_object_code,'indicator' as content_type UNION ALL
SELECT 30 as master_object_id ,'# aware' as source_object_code,'indicator' as content_type UNION ALL
SELECT 33 as master_object_id ,' # of HR areas targeted ' as source_object_code,'indicator' as content_type UNION ALL
SELECT 35 as master_object_id ,'Total # of targetted social mobilizers' as source_object_code,'indicator' as content_type UNION ALL
SELECT 35 as master_object_id ,'# target social mobilizers' as source_object_code,'indicator' as content_type UNION ALL
SELECT 49 as master_object_id ,'# trained on RI in past 6 months.' as source_object_code,'indicator' as content_type UNION ALL
SELECT 31 as master_object_id ,'Target core polio communication personnel in place in a country programme' as source_object_code,'indicator' as content_type UNION ALL
SELECT 38 as master_object_id ,' # teams' as source_object_code,'indicator' as content_type UNION ALL
SELECT 42 as master_object_id ,'# workers w/ IPC skills' as source_object_code,'indicator' as content_type UNION ALL
SELECT 32 as master_object_id ,'Number of core polio personnel in place in a country programme' as source_object_code,'indicator' as content_type UNION ALL
SELECT 94 as master_object_id ,'CensusNewBornsM' as source_object_code,'indicator' as content_type UNION ALL
SELECT 62 as master_object_id ,'# health facilities w/ capacity' as source_object_code,'indicator' as content_type UNION ALL
SELECT 83 as master_object_id ,'Tot_Newborns' as source_object_code,'indicator' as content_type UNION ALL
SELECT 46 as master_object_id ,'# received payment timely' as source_object_code,'indicator' as content_type UNION ALL
SELECT 62 as master_object_id ,'% w/ capacity' as source_object_code,'indicator' as content_type UNION ALL
SELECT 85 as master_object_id ,'Census2_11MoF' as source_object_code,'indicator' as content_type UNION ALL
SELECT 31 as master_object_id ,'Target # of core polio communication' as source_object_code,'indicator' as content_type UNION ALL
SELECT 84 as master_object_id ,'Census2_11MoM' as source_object_code,'indicator' as content_type UNION ALL
SELECT 32 as master_object_id ,'# of core polio communication personnel in place' as source_object_code,'indicator' as content_type UNION ALL
SELECT 33 as master_object_id ,'# of HR areas targeted' as source_object_code,'indicator' as content_type UNION ALL
SELECT 69 as master_object_id ,'Number of cases of cVDPV2' as source_object_code,'indicator' as content_type UNION ALL
SELECT 162 as master_object_id ,'Number of cases of iVDPV2' as source_object_code,'indicator' as content_type UNION ALL
SELECT 70 as master_object_id ,'Number of cases of WPV1' as source_object_code,'indicator' as content_type UNION ALL
SELECT 160 as master_object_id ,'Number of cases of WPV3' as source_object_code,'indicator' as content_type UNION ALL
SELECT 159 as master_object_id ,'Number of cases of aVDPV2' as source_object_code,'indicator' as content_type UNION ALL
SELECT 161 as master_object_id ,'Number of cases of WPV1WPV3' as source_object_code,'indicator' as content_type UNION ALL
SELECT 31 as master_object_id ,'Target number of core polio communications in place' as source_object_code,'indicator' as content_type UNION ALL
SELECT 40 as master_object_id ,'Number of social mobilisers who are female' as source_object_code,'indicator' as content_type UNION ALL
SELECT 38 as master_object_id ,'# of teams' as source_object_code,'indicator' as content_type UNION ALL
SELECT 90 as master_object_id ,'Tot_2_11Months' as source_object_code,'indicator' as content_type UNION ALL
SELECT 31 as master_object_id ,'Target number of core polio communication positions' as source_object_code,'indicator' as content_type UNION ALL
SELECT 28 as master_object_id ,'# Microplans incorporating social data' as source_object_code,'indicator' as content_type UNION ALL
SELECT 32 as master_object_id ,'# of core polio communication in place' as source_object_code,'indicator' as content_type UNION ALL
SELECT 35 as master_object_id ,'Target number of social mobilisers' as source_object_code,'indicator' as content_type UNION ALL
SELECT 21 as master_object_id ,'All missed children' as source_object_code,'indicator' as content_type UNION ALL
SELECT 46 as master_object_id ,'Number of social mobilisers who were paid on time' as source_object_code,'indicator' as content_type UNION ALL
SELECT 91 as master_object_id ,'Census12_59MoF' as source_object_code,'indicator' as content_type UNION ALL
SELECT 36 as master_object_id ,'Number of social mobilisers in place' as source_object_code,'indicator' as content_type UNION ALL
SELECT 56 as master_object_id ,'# of subregional units' as source_object_code,'indicator' as content_type UNION ALL
SELECT 31 as master_object_id ,'Target # of core polio communication ' as source_object_code,'indicator' as content_type UNION ALL
SELECT 46 as master_object_id ,'Number of social mobilizers who were paid on time' as source_object_code,'indicator' as content_type UNION ALL
SELECT 46 as master_object_id ,'Number of social mobilizers paid on time' as source_object_code,'indicator' as content_type UNION ALL
SELECT 36 as master_object_id ,'Number of social mobilizers in place' as source_object_code,'indicator' as content_type UNION ALL
SELECT 43 as master_object_id ,'Amount FRR' as source_object_code,'indicator' as content_type UNION ALL
SELECT 46 as master_object_id ,'# of social mobilizers who received payment on time' as source_object_code,'indicator' as content_type UNION ALL
SELECT 67 as master_object_id ,'% with OPV arriving in sufficient time' as source_object_code,'indicator' as content_type UNION ALL
SELECT 51 as master_object_id ,'# of children vaccined' as source_object_code,'indicator' as content_type UNION ALL
SELECT 35 as master_object_id ,'Target # of social mobilizers' as source_object_code,'indicator' as content_type UNION ALL
SELECT 36 as master_object_id ,'# of social mobilizers in place' as source_object_code,'indicator' as content_type UNION ALL
SELECT 34 as master_object_id ,'# of HR areas with social mobilizers' as source_object_code,'indicator' as content_type UNION ALL
SELECT 57 as master_object_id ,'# of subregional units where OPV arrived in sufficient time' as source_object_code,'indicator' as content_type UNION ALL
SELECT 87 as master_object_id ,'Census12_59MoM' as source_object_code,'indicator' as content_type UNION ALL
SELECT 46 as master_object_id ,'# of social mobilizers paid on time' as source_object_code,'indicator' as content_type UNION ALL
SELECT 40 as master_object_id ,'# of female social mobilizers' as source_object_code,'indicator' as content_type UNION ALL
SELECT 35 as master_object_id ,'Target number of social mobilizers' as source_object_code,'indicator' as content_type UNION ALL
SELECT 37 as master_object_id ,'# polio teams w/ at least one female worker' as source_object_code,'indicator' as content_type UNION ALL
SELECT 263 as master_object_id ,'NOimmReas19' as source_object_code,'indicator' as content_type UNION ALL
SELECT 92 as master_object_id ,'Tot_12_59Months' as source_object_code,'indicator' as content_type UNION ALL
SELECT 416 as master_object_id ,'ZeroDose' as source_object_code,'indicator' as content_type UNION ALL
SELECT 257 as master_object_id ,'NOimmReas13' as source_object_code,'indicator' as content_type UNION ALL
SELECT 256 as master_object_id ,'NOimmReas12' as source_object_code,'indicator' as content_type UNION ALL
SELECT 255 as master_object_id ,'NOimmReas11' as source_object_code,'indicator' as content_type UNION ALL
SELECT 254 as master_object_id ,'NOimmReas10' as source_object_code,'indicator' as content_type UNION ALL
SELECT 261 as master_object_id ,'NOimmReas17' as source_object_code,'indicator' as content_type UNION ALL
SELECT 260 as master_object_id ,'NOimmReas16' as source_object_code,'indicator' as content_type UNION ALL
SELECT 295 as master_object_id ,'STannounc' as source_object_code,'indicator' as content_type UNION ALL
SELECT 270 as master_object_id ,'UnImmun0to59' as source_object_code,'indicator' as content_type UNION ALL
SELECT 246 as master_object_id ,'NOimmReas3' as source_object_code,'indicator' as content_type UNION ALL
SELECT 268 as master_object_id ,'NOimmReas2' as source_object_code,'indicator' as content_type UNION ALL
SELECT 267 as master_object_id ,'NOimmReas1' as source_object_code,'indicator' as content_type UNION ALL
SELECT 250 as master_object_id ,'NOimmReas7' as source_object_code,'indicator' as content_type UNION ALL
SELECT 249 as master_object_id ,'NOimmReas6' as source_object_code,'indicator' as content_type UNION ALL
SELECT 75 as master_object_id ,'Tot_Census' as source_object_code,'indicator' as content_type UNION ALL
SELECT 247 as master_object_id ,'NOimmReas4' as source_object_code,'indicator' as content_type UNION ALL
SELECT 262 as master_object_id ,'NOimmReas18' as source_object_code,'indicator' as content_type UNION ALL
SELECT 82 as master_object_id ,'VaxNewBornsF' as source_object_code,'indicator' as content_type UNION ALL
SELECT 81 as master_object_id ,'VaxNewBornsM' as source_object_code,'indicator' as content_type UNION ALL
SELECT 296 as master_object_id ,'SRadio' as source_object_code,'indicator' as content_type UNION ALL
SELECT 89 as master_object_id ,'Tot_VaxNewBorn' as source_object_code,'indicator' as content_type UNION ALL
SELECT 299 as master_object_id ,'SMosque' as source_object_code,'indicator' as content_type UNION ALL
SELECT 298 as master_object_id ,'SReiliglead' as source_object_code,'indicator' as content_type UNION ALL
SELECT 303 as master_object_id ,'SRelative' as source_object_code,'indicator' as content_type UNION ALL
SELECT 297 as master_object_id ,'STradlead' as source_object_code,'indicator' as content_type UNION ALL
SELECT 77 as master_object_id ,'Vax2_11MoF' as source_object_code,'indicator' as content_type UNION ALL
SELECT 302 as master_object_id ,'Sbanner' as source_object_code,'indicator' as content_type UNION ALL
SELECT 305 as master_object_id ,'Scommmob' as source_object_code,'indicator' as content_type UNION ALL
SELECT 277 as master_object_id ,'SNOTAWARE' as source_object_code,'indicator' as content_type UNION ALL
SELECT 266 as master_object_id ,'NOimmReas20' as source_object_code,'indicator' as content_type UNION ALL
SELECT 248 as master_object_id ,'NOimmReas5' as source_object_code,'indicator' as content_type UNION ALL
SELECT 300 as master_object_id ,'SNewspaper' as source_object_code,'indicator' as content_type UNION ALL
SELECT 78 as master_object_id ,'Vax2_11MoM' as source_object_code,'indicator' as content_type UNION ALL
SELECT 86 as master_object_id ,'Tot_Vax2_11Mo' as source_object_code,'indicator' as content_type UNION ALL
SELECT 279 as master_object_id ,'Influence5' as source_object_code,'indicator' as content_type UNION ALL
SELECT 284 as master_object_id ,'Influence4' as source_object_code,'indicator' as content_type UNION ALL
SELECT 283 as master_object_id ,'Influence7' as source_object_code,'indicator' as content_type UNION ALL
SELECT 281 as master_object_id ,'Influence6' as source_object_code,'indicator' as content_type UNION ALL
SELECT 278 as master_object_id ,'Influence1' as source_object_code,'indicator' as content_type UNION ALL
SELECT 80 as master_object_id ,'Vax12_59MoF' as source_object_code,'indicator' as content_type UNION ALL
SELECT 282 as master_object_id ,'Influence3' as source_object_code,'indicator' as content_type UNION ALL
SELECT 280 as master_object_id ,'Influence2' as source_object_code,'indicator' as content_type UNION ALL
SELECT 79 as master_object_id ,'Vax12_59MoM' as source_object_code,'indicator' as content_type UNION ALL
SELECT 88 as master_object_id ,'Tot_Vax12_59Mo' as source_object_code,'indicator' as content_type UNION ALL
SELECT 258 as master_object_id ,'NOimmReas14' as source_object_code,'indicator' as content_type UNION ALL
SELECT 285 as master_object_id ,'Influence8' as source_object_code,'indicator' as content_type UNION ALL
SELECT 414 as master_object_id ,'HHsampled' as source_object_code,'indicator' as content_type UNION ALL
SELECT 74 as master_object_id ,'Tot_Vax' as source_object_code,'indicator' as content_type UNION ALL
SELECT 76 as master_object_id ,'Tot_Missed' as source_object_code,'indicator' as content_type UNION ALL
SELECT 259 as master_object_id ,'NOimmReas15' as source_object_code,'indicator' as content_type UNION ALL
SELECT 253 as master_object_id ,'NOimmReas9' as source_object_code,'indicator' as content_type UNION ALL
SELECT 269 as master_object_id ,'Marked0to59' as source_object_code,'indicator' as content_type UNION ALL
SELECT 252 as master_object_id ,'NOimmReas8' as source_object_code,'indicator' as content_type UNION ALL
SELECT 304 as master_object_id ,'SHworker' as source_object_code,'indicator' as content_type UNION ALL
SELECT 301 as master_object_id ,'SPoster' as source_object_code,'indicator' as content_type UNION ALL
SELECT 382 as master_object_id ,'ReasonNotGiven' as source_object_code,'indicator' as content_type UNION ALL
SELECT 375 as master_object_id ,'PoliticalDifferences' as source_object_code,'indicator' as content_type UNION ALL
SELECT 370 as master_object_id ,'NotImmRedo' as source_object_code,'indicator' as content_type UNION ALL
SELECT 386 as master_object_id ,'SettlementsRedo' as source_object_code,'indicator' as content_type UNION ALL
SELECT 390 as master_object_id ,'TargetRedo' as source_object_code,'indicator' as content_type UNION ALL
SELECT 352 as master_object_id ,'ChildNCShool' as source_object_code,'indicator' as content_type UNION ALL
SELECT 341 as master_object_id ,'COMImmRedo' as source_object_code,'indicator' as content_type UNION ALL
SELECT 368 as master_object_id ,'NoNeedFelt' as source_object_code,'indicator' as content_type UNION ALL
SELECT 363 as master_object_id ,'IMMSCRelNC' as source_object_code,'indicator' as content_type UNION ALL
SELECT 350 as master_object_id ,'ChildAbsent' as source_object_code,'indicator' as content_type UNION ALL
SELECT 354 as master_object_id ,'COMImmRedoABSENT' as source_object_code,'indicator' as content_type UNION ALL
SELECT 362 as master_object_id ,'IMMSCOtherNC' as source_object_code,'indicator' as content_type UNION ALL
SELECT 376 as master_object_id ,'Reason1ABS' as source_object_code,'indicator' as content_type UNION ALL
SELECT 351 as master_object_id ,'ChildNCOther' as source_object_code,'indicator' as content_type UNION ALL
SELECT 359 as master_object_id ,'IMMOTTradNC' as source_object_code,'indicator' as content_type UNION ALL
SELECT 367 as master_object_id ,'NoNCShools' as source_object_code,'indicator' as content_type UNION ALL
SELECT 377 as master_object_id ,'Reason2ABS' as source_object_code,'indicator' as content_type UNION ALL
SELECT 355 as master_object_id ,'HHRevisited' as source_object_code,'indicator' as content_type UNION ALL
SELECT 342 as master_object_id ,'RELImmRedo' as source_object_code,'indicator' as content_type UNION ALL
SELECT 374 as master_object_id ,'OTRevisited' as source_object_code,'indicator' as content_type UNION ALL
SELECT 385 as master_object_id ,'SCRevisited' as source_object_code,'indicator' as content_type UNION ALL
SELECT 360 as master_object_id ,'ImmRedoABSENT' as source_object_code,'indicator' as content_type UNION ALL
SELECT 369 as master_object_id ,'NoNOCOther' as source_object_code,'indicator' as content_type UNION ALL
SELECT 373 as master_object_id ,'OTHERImRedoABSENT' as source_object_code,'indicator' as content_type UNION ALL
SELECT 379 as master_object_id ,'Reason4ABS' as source_object_code,'indicator' as content_type UNION ALL
SELECT 389 as master_object_id ,'MissedRedo' as source_object_code,'indicator' as content_type UNION ALL
SELECT 353 as master_object_id ,'ChildSick' as source_object_code,'indicator' as content_type UNION ALL
SELECT 371 as master_object_id ,'NotImmRedoABSENT' as source_object_code,'indicator' as content_type UNION ALL
SELECT 381 as master_object_id ,'Reason6ABS' as source_object_code,'indicator' as content_type UNION ALL
SELECT 388 as master_object_id ,'UnhappyWith' as source_object_code,'indicator' as content_type UNION ALL
SELECT 361 as master_object_id ,'IMMSCCommNC' as source_object_code,'indicator' as content_type UNION ALL
SELECT 419 as master_object_id ,'RAssessMrk' as source_object_code,'indicator' as content_type UNION ALL
SELECT 380 as master_object_id ,'Reason5ABS' as source_object_code,'indicator' as content_type UNION ALL
SELECT 365 as master_object_id ,'NoCaregiver' as source_object_code,'indicator' as content_type UNION ALL
SELECT 384 as master_object_id ,'RELImmRedoABSENT' as source_object_code,'indicator' as content_type UNION ALL
SELECT 357 as master_object_id ,'IMMOTOtherNC' as source_object_code,'indicator' as content_type UNION ALL
SELECT 364 as master_object_id ,'IMMSCTradNC' as source_object_code,'indicator' as content_type UNION ALL
SELECT 358 as master_object_id ,'IMMOTRelNC' as source_object_code,'indicator' as content_type UNION ALL
SELECT 366 as master_object_id ,'NoHHRedo' as source_object_code,'indicator' as content_type UNION ALL
SELECT 378 as master_object_id ,'Reason3ABS' as source_object_code,'indicator' as content_type UNION ALL
SELECT 340 as master_object_id ,'ImmRedo' as source_object_code,'indicator' as content_type UNION ALL
SELECT 356 as master_object_id ,'IMMOTCommNC' as source_object_code,'indicator' as content_type UNION ALL
SELECT 343 as master_object_id ,'OTHERImRedo' as source_object_code,'indicator' as content_type UNION ALL
SELECT 383 as master_object_id ,'ReligiousBelief' as source_object_code,'indicator' as content_type UNION ALL
SELECT 344 as master_object_id ,'NonCompliance' as source_object_code,'indicator' as content_type UNION ALL
SELECT 372 as master_object_id ,'OpvSafety' as source_object_code,'indicator' as content_type UNION ALL
SELECT 387 as master_object_id ,'TooManyRounds' as source_object_code,'indicator' as content_type UNION ALL
SELECT 402 as master_object_id ,'0to23mth Seen' as source_object_code,'indicator' as content_type UNION ALL
SELECT 403 as master_object_id ,'0to23mth notMarked' as source_object_code,'indicator' as content_type UNION ALL
SELECT 405 as master_object_id ,'24to59mth notMarked' as source_object_code,'indicator' as content_type UNION ALL
SELECT 275 as master_object_id ,'TOTAL Seen' as source_object_code,'indicator' as content_type UNION ALL
SELECT 395 as master_object_id ,'totSeet2' as source_object_code,'indicator' as content_type UNION ALL
SELECT 392 as master_object_id ,'totSeet3' as source_object_code,'indicator' as content_type UNION ALL
SELECT 394 as master_object_id ,'totSeet1' as source_object_code,'indicator' as content_type UNION ALL
SELECT 411 as master_object_id ,'totSeet6' as source_object_code,'indicator' as content_type UNION ALL
SELECT 408 as master_object_id ,'totSeet4' as source_object_code,'indicator' as content_type UNION ALL
SELECT 410 as master_object_id ,'totSeet5' as source_object_code,'indicator' as content_type UNION ALL
SELECT 400 as master_object_id ,'0to9mth Seen' as source_object_code,'indicator' as content_type UNION ALL
SELECT 397 as master_object_id ,'totMist2' as source_object_code,'indicator' as content_type UNION ALL
SELECT 393 as master_object_id ,'totMist3' as source_object_code,'indicator' as content_type UNION ALL
SELECT 396 as master_object_id ,'totMist1' as source_object_code,'indicator' as content_type UNION ALL
SELECT 413 as master_object_id ,'totMist6' as source_object_code,'indicator' as content_type UNION ALL
SELECT 273 as master_object_id ,'TOTAL Notmarked' as source_object_code,'indicator' as content_type UNION ALL
SELECT 409 as master_object_id ,'totMist4' as source_object_code,'indicator' as content_type UNION ALL
SELECT 412 as master_object_id ,'totMist5' as source_object_code,'indicator' as content_type UNION ALL
SELECT 399 as master_object_id ,'Unvaccinated this round' as source_object_code,'indicator' as content_type UNION ALL
SELECT 404 as master_object_id ,'24to59mth Seen' as source_object_code,'indicator' as content_type UNION ALL
SELECT 398 as master_object_id ,'numberof Locations' as source_object_code,'indicator' as content_type UNION ALL
SELECT 401 as master_object_id ,'0to9mth notMarked' as source_object_code,'indicator' as content_type UNION ALL
SELECT 161 as master_object_id ,'Number of cases of W1W3' as source_object_code,'indicator' as content_type UNION ALL
SELECT 451 as master_object_id ,'Reason for inaccessible children - No reason provided' as source_object_code,'indicator' as content_type UNION ALL
SELECT 441 as master_object_id ,'Political issues' as source_object_code,'indicator' as content_type UNION ALL
SELECT 440 as master_object_id ,'Environment issues' as source_object_code,'indicator' as content_type UNION ALL
SELECT 439 as master_object_id ,'Management issues' as source_object_code,'indicator' as content_type UNION ALL
SELECT 438 as master_object_id ,'Security Operations / Incidents' as source_object_code,'indicator' as content_type UNION ALL
SELECT 437 as master_object_id ,'Militant / Anti-Govt Elements' as source_object_code,'indicator' as content_type UNION ALL
SELECT 436 as master_object_id ,'Crime' as source_object_code,'indicator' as content_type UNION ALL
SELECT 435 as master_object_id ,'Local community not supportive' as source_object_code,'indicator' as content_type UNION ALL
SELECT 434 as master_object_id ,'Perception of fear' as source_object_code,'indicator' as content_type UNION ALL
SELECT 198 as master_object_id ,'Number of functional active cold chain equipment in the district' as source_object_code,'indicator' as content_type UNION ALL
SELECT 221 as master_object_id ,'Number of HRDs that have polio vaccine wastage rate in SIAs between 5 and 15%' as source_object_code,'indicator' as content_type UNION ALL
SELECT 220 as master_object_id ,'Vaccine wastage rate' as source_object_code,'indicator' as content_type UNION ALL
SELECT 36 as master_object_id ,'Number of social mobilizers in place' as source_object_code,'indicator' as content_type UNION ALL
SELECT 433 as master_object_id ,'d4' as source_object_code,'indicator' as content_type UNION ALL
SELECT 432 as master_object_id ,'d1_3' as source_object_code,'indicator' as content_type UNION ALL
SELECT 431 as master_object_id ,'d0' as source_object_code,'indicator' as content_type UNION ALL
SELECT 69 as master_object_id ,'cVDPV2' as source_object_code,'indicator' as content_type UNION ALL
SELECT 70 as master_object_id ,'WPV1' as source_object_code,'indicator' as content_type UNION ALL
SELECT 70 as master_object_id ,'Number of cases of WPV 1' as source_object_code,'indicator' as content_type UNION ALL
SELECT 243 as master_object_id ,'Number of children 12 months and under' as source_object_code,'indicator' as content_type UNION ALL
SELECT 244 as master_object_id ,'Number of children under 12 months who received DPT3 or Penta3' as source_object_code,'indicator' as content_type UNION ALL
SELECT 177 as master_object_id ,'# of children vaccinated at transit points last month' as source_object_code,'indicator' as content_type UNION ALL
SELECT 158 as master_object_id ,'Number of children missed due to all access issues' as source_object_code,'indicator' as content_type UNION ALL
SELECT 43 as master_object_id ,'Amount total requested FRR funds' as source_object_code,'indicator' as content_type UNION ALL
SELECT 44 as master_object_id ,'Amount FRR funds committed' as source_object_code,'indicator' as content_type UNION ALL
SELECT 217 as master_object_id ,'Number of RI sessions monitored' as source_object_code,'indicator' as content_type UNION ALL
SELECT 216 as master_object_id ,'Number of RI sessions monitored having stockouts of any vaccine in the last month' as source_object_code,'indicator' as content_type UNION ALL
SELECT 195 as master_object_id ,'Number of high risk districts' as source_object_code,'indicator' as content_type UNION ALL
SELECT 218 as master_object_id ,'Number of HR districts with locations where OPV is delivered together with any other polio-funded services demanded by the community' as source_object_code,'indicator' as content_type UNION ALL
SELECT 192 as master_object_id ,'Number of RI defaulters mobilized by social mobilizers last month (with accessibility breakdown)' as source_object_code,'indicator' as content_type UNION ALL
SELECT 29 as master_object_id ,'Number of caregivers in HR districts' as source_object_code,'indicator' as content_type UNION ALL
SELECT 197 as master_object_id ,'# of HRD which reported on balance SIA vaccine stocks after last SIA round' as source_object_code,'indicator' as content_type UNION ALL
SELECT 199 as master_object_id ,'Total number of all active cold chain equipment in the district' as source_object_code,'indicator' as content_type UNION ALL
SELECT 51 as master_object_id ,'Number of children vaccinated in HRD' as source_object_code,'indicator' as content_type UNION ALL
SELECT 5 as master_object_id ,'Number of vaccine doses used in HRD' as source_object_code,'indicator' as content_type UNION ALL
SELECT 196 as master_object_id ,'HR District did not receive polio vaccine supply at least 3 days before the planned start date of campaign (yes/no)' as source_object_code,'indicator' as content_type UNION ALL
SELECT 210 as master_object_id ,'Number of social mobilizers who received on-the-job supervision during their last working week' as source_object_code,'indicator' as content_type UNION ALL
SELECT 209 as master_object_id ,'Number of SMs trained or refreshed with the integrated health package in the last 6 months' as source_object_code,'indicator' as content_type UNION ALL
SELECT 38 as master_object_id ,'Number of vaccination teams' as source_object_code,'indicator' as content_type UNION ALL
SELECT 208 as master_object_id ,'Number of vaccination teams with at least 1 member from the local community' as source_object_code,'indicator' as content_type UNION ALL
SELECT 207 as master_object_id ,'Target # of social mobilizers and supervisors' as source_object_code,'indicator' as content_type UNION ALL
SELECT 206 as master_object_id ,'Number of SMs and supervisors in place' as source_object_code,'indicator' as content_type UNION ALL
SELECT 33 as master_object_id ,'Number of HR sub-districts' as source_object_code,'indicator' as content_type UNION ALL
SELECT 34 as master_object_id ,'Number of HR sub-districts with at least 1 permanent SM' as source_object_code,'indicator' as content_type UNION ALL
SELECT 66 as master_object_id ,'# health fcilities having NO stock-outs of OPV' as source_object_code,'indicator' as content_type UNION ALL
SELECT 244 as master_object_id ,'# children received Penta3' as source_object_code,'indicator' as content_type UNION ALL
SELECT 45 as master_object_id ,'Amount FRR updated amount' as source_object_code,'indicator' as content_type UNION ALL
SELECT 46 as master_object_id ,'# received payment on time' as source_object_code,'indicator' as content_type UNION ALL
SELECT 38 as master_object_id ,' # polio teams' as source_object_code,'indicator' as content_type UNION ALL
SELECT 36 as master_object_id ,'# social mobilizers in place' as source_object_code,'indicator' as content_type UNION ALL
SELECT 31 as master_object_id ,'Target # of core polio communication personnel' as source_object_code,'indicator' as content_type UNION ALL
SELECT 46 as master_object_id ,'Number of social mobilizers who received timely payment for last campaign/month''s salary' as source_object_code,'indicator' as content_type UNION ALL
SELECT 42 as master_object_id ,'Number of vaccinators and SMs operating in HRDs trained on professional IPC package in last 6 months' as source_object_code,'indicator' as content_type UNION ALL
SELECT 37 as master_object_id ,'Number of vaccination teams with at least 1 female member' as source_object_code,'indicator' as content_type UNION ALL
SELECT 42 as master_object_id ,'Number of vaccinators and SMs operating in HRD who have been trained on professional Inter Personal Communication packaged in the last 6 months' as source_object_code,'indicator' as content_type UNION ALL
SELECT 41 as master_object_id ,'Number of vaccinators and SMs' as source_object_code,'indicator' as content_type UNION ALL
SELECT 55 as master_object_id ,'Number of children targeted in high-risk districts' as source_object_code,'indicator' as content_type UNION ALL
SELECT 37 as master_object_id ,'Number of vaccination teams with at least one female' as source_object_code,'indicator' as content_type UNION ALL
SELECT 41 as master_object_id ,'Number of vaccinators and social mobilizers' as source_object_code,'indicator' as content_type UNION ALL
SELECT 95 as master_object_id ,'spec_grp_choice' as source_object_code,'indicator' as content_type UNION ALL
SELECT 208 as master_object_id ,'TSettle' as source_object_code,'indicator' as content_type UNION ALL
SELECT 214 as master_object_id ,'Number of absences after re-visit' as source_object_code,'indicator' as content_type UNION ALL
SELECT 213 as master_object_id ,'Number of absences before re-visit' as source_object_code,'indicator' as content_type UNION ALL
SELECT 432 as master_object_id ,'Number of non-polio AFP cases with 1-3 doses of OPV' as source_object_code,'indicator' as content_type UNION ALL
SELECT 431 as master_object_id ,'Number of non-polio AFP cases with zero doses of OPV' as source_object_code,'indicator' as content_type UNION ALL
SELECT 433 as master_object_id ,'Number of non-polio AFP cases with 4+ doses of OPV' as source_object_code,'indicator' as content_type UNION ALL
SELECT 70 as master_object_id ,'Number of WPV1 cases' as source_object_code,'indicator' as content_type UNION ALL
SELECT 69 as master_object_id ,'Number of cVDPV2 cases' as source_object_code,'indicator' as content_type UNION ALL
SELECT 203 as master_object_id ,'Is an access-challenged district' as source_object_code,'indicator' as content_type UNION ALL
SELECT 204 as master_object_id ,'Total number of LT vaccination transit points planned by the programme' as source_object_code,'indicator' as content_type UNION ALL
SELECT 51 as master_object_id ,'Number of children vaccined in HRD' as source_object_code,'indicator' as content_type UNION ALL
SELECT 33 as master_object_id ,'Number of high risk sub-districts' as source_object_code,'indicator' as content_type UNION ALL
SELECT 46 as master_object_id ,'Number of social mobilizers receiving timely payment for last campaign' as source_object_code,'indicator' as content_type UNION ALL
SELECT 207 as master_object_id ,'Target number of social mobilizers and supervisors' as source_object_code,'indicator' as content_type UNION ALL
SELECT 34 as master_object_id ,'Number of high risk sub-districts covered by at least 1 social mobilizer' as source_object_code,'indicator' as content_type UNION ALL
SELECT 177 as master_object_id ,'Number of children vaccinated at transit points last month' as source_object_code,'indicator' as content_type UNION ALL
SELECT 38 as master_object_id ,'# of vaccination teams in HRA' as source_object_code,'indicator' as content_type UNION ALL
SELECT 202 as master_object_id ,'Is an access-challenged district that has a specific access approach identified' as source_object_code,'indicator' as content_type UNION ALL
SELECT 175 as master_object_id ,'Number of established LT vaccination transit points' as source_object_code,'indicator' as content_type UNION ALL
SELECT 463 as master_object_id ,'number of social mobilisers participating the telephone survey' as source_object_code,'indicator' as content_type UNION ALL
SELECT 209 as master_object_id ,'Number of social mobilizers trained or refreshed with the integrated health package in the last 6 months' as source_object_code,'indicator' as content_type UNION ALL
SELECT 221 as master_object_id ,'Is an HRD that has polio vaccine wastage rate in SIAs between 5 and 15%' as source_object_code,'indicator' as content_type UNION ALL
SELECT 196 as master_object_id ,'HR district did NOT receive polio vaccine supply at least 3 days before the planned start date of campaign' as source_object_code,'indicator' as content_type UNION ALL
SELECT 206 as master_object_id ,'Number of social mobilizers and supervisors in place' as source_object_code,'indicator' as content_type UNION ALL
SELECT 176 as master_object_id ,'Number of established LT vaccination transit points with a dedicated social mobilizer' as source_object_code,'indicator' as content_type UNION ALL
SELECT 195 as master_object_id ,'Is a high risk district' as source_object_code,'indicator' as content_type UNION ALL
SELECT 203 as master_object_id ,'Is an access-challenged district (Yes/No)' as source_object_code,'indicator' as content_type UNION ALL
SELECT 202 as master_object_id ,'Has a specific access approach identified (Yes/No)' as source_object_code,'indicator' as content_type UNION ALL
SELECT 244 as master_object_id ,'# of children who received Penta 3' as source_object_code,'indicator' as content_type UNION ALL
SELECT 243 as master_object_id ,'# of children 7-12 months old' as source_object_code,'indicator' as content_type UNION ALL
SELECT 27 as master_object_id ,'# of micro plans reviewed' as source_object_code,'indicator' as content_type UNION ALL
SELECT 197 as master_object_id ,'District reported balance of SIA vaccine stocks (Yes/No)' as source_object_code,'indicator' as content_type UNION ALL
SELECT 208 as master_object_id ,'Number of vaccination teams with at least one member from local community' as source_object_code,'indicator' as content_type UNION ALL
SELECT 158 as master_object_id ,'Number of children missed due to all access reasons' as source_object_code,'indicator' as content_type UNION ALL
SELECT 440 as master_object_id ,'Reason for inaccessible children - Environment issues' as source_object_code,'indicator' as content_type UNION ALL
SELECT 436 as master_object_id ,'Reason for inaccessible children - Crime' as source_object_code,'indicator' as content_type UNION ALL
SELECT 437 as master_object_id ,'Reason for inaccessible children - Militant / Anti-Govt Elements' as source_object_code,'indicator' as content_type UNION ALL
SELECT 439 as master_object_id ,'Reason for inaccessible children - Management issues' as source_object_code,'indicator' as content_type UNION ALL
SELECT 435 as master_object_id ,'Reason for inaccessible children - Local community not supportive' as source_object_code,'indicator' as content_type UNION ALL
SELECT 438 as master_object_id ,'Reason for inaccessible children - Security Operations / Incidents' as source_object_code,'indicator' as content_type UNION ALL
SELECT 434 as master_object_id ,'Reason for inaccessible children - Perception of fear' as source_object_code,'indicator' as content_type UNION ALL
SELECT 199 as master_object_id ,'Total number of all active cold chain equipment in district' as source_object_code,'indicator' as content_type UNION ALL
SELECT 26 as master_object_id ,'Number of refusals afte re-visit' as source_object_code,'indicator' as content_type UNION ALL
SELECT 27 as master_object_id ,'Number of microplans reviewed' as source_object_code,'indicator' as content_type UNION ALL
SELECT 41 as master_object_id ,'Number of vaccinators' as source_object_code,'indicator' as content_type UNION ALL
SELECT 42 as master_object_id ,'Number of vaccinators trained on IPC skills' as source_object_code,'indicator' as content_type UNION ALL
SELECT 199 as master_object_id ,'# of health facilities' as source_object_code,'indicator' as content_type UNION ALL
SELECT 55 as master_object_id ,'Number of targeted children in HRA' as source_object_code,'indicator' as content_type UNION ALL
SELECT 198 as master_object_id ,'# of high-risk districts with 90% of active cold chain equipments functional' as source_object_code,'indicator' as content_type UNION ALL
SELECT 28 as master_object_id ,'Number of Microplans incoroporating social data' as source_object_code,'indicator' as content_type UNION ALL
SELECT 204 as master_object_id ,'# of identfied/planned target points' as source_object_code,'indicator' as content_type UNION ALL
SELECT 207 as master_object_id ,'Number of planned SM and supervisors' as source_object_code,'indicator' as content_type UNION ALL
SELECT 55 as master_object_id ,'# of target children' as source_object_code,'indicator' as content_type UNION ALL
SELECT 196 as master_object_id ,'District DID NOT receiv OPV 3 days before campaign' as source_object_code,'indicator' as content_type UNION ALL
SELECT 214 as master_object_id ,'# absences after re-visit' as source_object_code,'indicator' as content_type UNION ALL
SELECT 213 as master_object_id ,'# of absences before re-visit' as source_object_code,'indicator' as content_type UNION ALL
SELECT 25 as master_object_id ,'# of refusals before re-visit' as source_object_code,'indicator' as content_type UNION ALL
SELECT 33 as master_object_id ,'Number of clusters (sub-district units)' as source_object_code,'indicator' as content_type UNION ALL
SELECT 221 as master_object_id ,'district wastage rate between 5 - 15%' as source_object_code,'indicator' as content_type UNION ALL
SELECT 470 as master_object_id ,'# of children missed due to access issues' as source_object_code,'indicator' as content_type UNION ALL
SELECT 40 as master_object_id ,'# of female SMs in place' as source_object_code,'indicator' as content_type UNION ALL
SELECT 46 as master_object_id ,'Number of social mobilizers who received timely payment' as source_object_code,'indicator' as content_type UNION ALL
SELECT 175 as master_object_id ,'# of established transit points' as source_object_code,'indicator' as content_type UNION ALL
SELECT 177 as master_object_id ,'# of children vaccinated at TP' as source_object_code,'indicator' as content_type UNION ALL
SELECT 220 as master_object_id ,'% wastage' as source_object_code,'indicator' as content_type UNION ALL
SELECT 463 as master_object_id ,'Number of social mobilizers responding telephone survey' as source_object_code,'indicator' as content_type UNION ALL
SELECT 37 as master_object_id ,'# vaccination teams with at least one female ' as source_object_code,'indicator' as content_type UNION ALL
SELECT 195 as master_object_id ,'District is high-risk' as source_object_code,'indicator' as content_type UNION ALL
SELECT 36 as master_object_id ,'# of SMs in place' as source_object_code,'indicator' as content_type UNION ALL
SELECT 350 as master_object_id ,'# of children missed due to absence' as source_object_code,'indicator' as content_type UNION ALL
SELECT 208 as master_object_id ,'# vaccination teams with at least one member from local community' as source_object_code,'indicator' as content_type UNION ALL
SELECT 176 as master_object_id ,'# of established transit points with social mobiliser' as source_object_code,'indicator' as content_type UNION ALL
SELECT 34 as master_object_id ,'Number of clusters covered by SMs' as source_object_code,'indicator' as content_type UNION ALL
SELECT 112 as master_object_id ,'group_msd_chd-msd_poldiffsf' as source_object_code,'indicator' as content_type UNION ALL
SELECT 125 as master_object_id ,'group_spec_events-spec_newborn' as source_object_code,'indicator' as content_type UNION ALL
SELECT 141 as master_object_id ,'group_msd_chd-msd_toomanyroundsm' as source_object_code,'indicator' as content_type UNION ALL
SELECT 143 as master_object_id ,'group_msd_chd-msd_poliouncommonf' as source_object_code,'indicator' as content_type UNION ALL
SELECT 132 as master_object_id ,'group_msd_chd-msd_poliohascuref' as source_object_code,'indicator' as content_type UNION ALL
SELECT 124 as master_object_id ,'group_msd_chd-msd_playgroundf' as source_object_code,'indicator' as content_type UNION ALL
SELECT 98 as master_object_id ,'group_msd_chd-msd_marketm' as source_object_code,'indicator' as content_type UNION ALL
SELECT 135 as master_object_id ,'group_spec_events-spec_zerodose' as source_object_code,'indicator' as content_type UNION ALL
SELECT 109 as master_object_id ,'group_msd_chd-msd_soceventm' as source_object_code,'indicator' as content_type UNION ALL
SELECT 131 as master_object_id ,'group_msd_chd-msd_familymovedm' as source_object_code,'indicator' as content_type UNION ALL
SELECT 106 as master_object_id ,'group_msd_chd-msd_noplusesf' as source_object_code,'indicator' as content_type UNION ALL
SELECT 130 as master_object_id ,'group_msd_chd-msd_familymovedf' as source_object_code,'indicator' as content_type UNION ALL
SELECT 118 as master_object_id ,'group_msd_chd-msd_noconsentf' as source_object_code,'indicator' as content_type UNION ALL
SELECT 129 as master_object_id ,'group_msd_chd-msd_sideeffectsm' as source_object_code,'indicator' as content_type UNION ALL
SELECT 146 as master_object_id ,'group_msd_chd-msd_nogovtservicesf' as source_object_code,'indicator' as content_type UNION ALL
SELECT 127 as master_object_id ,'group_msd_chd-tot_missed_check' as source_object_code,'indicator' as content_type UNION ALL
SELECT 142 as master_object_id ,'group_msd_chd-msd_poliouncommonm' as source_object_code,'indicator' as content_type UNION ALL
SELECT 119 as master_object_id ,'group_msd_chd-msd_relbeliefsf' as source_object_code,'indicator' as content_type UNION ALL
SELECT 103 as master_object_id ,'group_msd_chd-msd_agedoutf' as source_object_code,'indicator' as content_type UNION ALL
SELECT 136 as master_object_id ,'group_msd_chd-msd_unhappywteamm' as source_object_code,'indicator' as content_type UNION ALL
SELECT 99 as master_object_id ,'group_msd_chd-msd_marketf' as source_object_code,'indicator' as content_type UNION ALL
SELECT 145 as master_object_id ,'group_msd_chd-msd_nogovtservicesm' as source_object_code,'indicator' as content_type UNION ALL
SELECT 150 as master_object_id ,'group_spec_events-spec_otherdisease' as source_object_code,'indicator' as content_type UNION ALL
SELECT 97 as master_object_id ,'group_msd_chd-msd_farmf' as source_object_code,'indicator' as content_type UNION ALL
SELECT 152 as master_object_id ,'group_spec_events-spec_vcmattendedncer' as source_object_code,'indicator' as content_type UNION ALL
SELECT 149 as master_object_id ,'group_spec_events-spec_cmamreferral' as source_object_code,'indicator' as content_type UNION ALL
SELECT 138 as master_object_id ,'group_msd_chd-msd_hhnotvisitedf' as source_object_code,'indicator' as content_type UNION ALL
SELECT 111 as master_object_id ,'group_msd_chd-msd_poldiffsm' as source_object_code,'indicator' as content_type UNION ALL
SELECT 96 as master_object_id ,'group_msd_chd-msd_farmm' as source_object_code,'indicator' as content_type UNION ALL
SELECT 102 as master_object_id ,'group_msd_chd-msd_agedoutm' as source_object_code,'indicator' as content_type UNION ALL
SELECT 144 as master_object_id ,'group_spec_events-spec_rireferral' as source_object_code,'indicator' as content_type UNION ALL
SELECT 120 as master_object_id ,'group_msd_chd-msd_nofeltneedf' as source_object_code,'indicator' as content_type UNION ALL
SELECT 114 as master_object_id ,'group_msd_chd-msd_childdiedm' as source_object_code,'indicator' as content_type UNION ALL
SELECT 110 as master_object_id ,'group_msd_chd-msd_soceventf' as source_object_code,'indicator' as content_type UNION ALL
SELECT 141 as master_object_id ,'group_msd_chd-msd_toomanyroundsf' as source_object_code,'indicator' as content_type UNION ALL
SELECT 139 as master_object_id ,'group_msd_chd-msd_hhnotvisitedm' as source_object_code,'indicator' as content_type UNION ALL
SELECT 134 as master_object_id ,'group_spec_events-spec_mslscase' as source_object_code,'indicator' as content_type UNION ALL
SELECT 104 as master_object_id ,'group_spec_events-spec_fic' as source_object_code,'indicator' as content_type UNION ALL
SELECT 133 as master_object_id ,'group_msd_chd-msd_poliohascurem' as source_object_code,'indicator' as content_type UNION ALL
SELECT 108 as master_object_id ,'group_msd_chd-msd_securityf' as source_object_code,'indicator' as content_type UNION ALL
SELECT 126 as master_object_id ,'group_spec_events-spec_afpcase' as source_object_code,'indicator' as content_type UNION ALL
SELECT 122 as master_object_id ,'group_msd_chd-msd_relbeliefsm' as source_object_code,'indicator' as content_type UNION ALL
SELECT 76 as master_object_id ,'tot_missed' as source_object_code,'indicator' as content_type UNION ALL
SELECT 113 as master_object_id ,'group_msd_chd-msd_childdiedf' as source_object_code,'indicator' as content_type UNION ALL
SELECT 137 as master_object_id ,'group_msd_chd-msd_unhappywteamf' as source_object_code,'indicator' as content_type UNION ALL
SELECT 101 as master_object_id ,'group_msd_chd-msd_schoolm' as source_object_code,'indicator' as content_type UNION ALL
SELECT 147 as master_object_id ,'group_msd_chd-msd_otherprotectionf' as source_object_code,'indicator' as content_type UNION ALL
SELECT 107 as master_object_id ,'group_msd_chd-msd_securitym' as source_object_code,'indicator' as content_type UNION ALL
SELECT 148 as master_object_id ,'group_msd_chd-msd_otherprotectionm' as source_object_code,'indicator' as content_type UNION ALL
SELECT 123 as master_object_id ,'group_msd_chd-msd_playgroundm' as source_object_code,'indicator' as content_type UNION ALL
SELECT 100 as master_object_id ,'group_msd_chd-msd_schoolf' as source_object_code,'indicator' as content_type UNION ALL
SELECT 115 as master_object_id ,'group_msd_chd-msd_childsickm' as source_object_code,'indicator' as content_type UNION ALL
SELECT 116 as master_object_id ,'group_msd_chd-msd_childsickf' as source_object_code,'indicator' as content_type UNION ALL
SELECT 105 as master_object_id ,'group_msd_chd-msd_noplusesm' as source_object_code,'indicator' as content_type UNION ALL
SELECT 128 as master_object_id ,'group_msd_chd-msd_sideeffectsf' as source_object_code,'indicator' as content_type UNION ALL
SELECT 121 as master_object_id ,'group_msd_chd-msd_nofeltneedm' as source_object_code,'indicator' as content_type UNION ALL
SELECT 151 as master_object_id ,'group_spec_events-spec_pregnantmother' as source_object_code,'indicator' as content_type UNION ALL
SELECT 117 as master_object_id ,'group_msd_chd-msd_noconsentm' as source_object_code,'indicator' as content_type UNION ALL
SELECT 55 as master_object_id ,'Number of target children' as source_object_code,'indicator' as content_type
) ind
WHERE EXISTS ( SELECT 1 FROM indicator i where ind.master_object_id = i.id)
UNION ALL
SELECT * FROM (
SELECT 99 as master_object_id ,'Nigeria November 2013' as source_object_code,'campaign' as content_type UNION ALL
SELECT 100 as master_object_id ,'Nigeria November 2014' as source_object_code,'campaign' as content_type UNION ALL
SELECT 101 as master_object_id ,'Nigeria September 2013' as source_object_code,'campaign' as content_type UNION ALL
SELECT 102 as master_object_id ,'Nigeria March 2012' as source_object_code,'campaign' as content_type UNION ALL
SELECT 103 as master_object_id ,'Nigeria June 2012' as source_object_code,'campaign' as content_type UNION ALL
SELECT 104 as master_object_id ,'Nigeria August 2014' as source_object_code,'campaign' as content_type UNION ALL
SELECT 105 as master_object_id ,'Nigeria March 2013' as source_object_code,'campaign' as content_type UNION ALL
SELECT 106 as master_object_id ,'Nigeria October 2013' as source_object_code,'campaign' as content_type UNION ALL
SELECT 107 as master_object_id ,'Nigeria May 2013' as source_object_code,'campaign' as content_type UNION ALL
SELECT 108 as master_object_id ,'Nigeria March 2014' as source_object_code,'campaign' as content_type UNION ALL
SELECT 109 as master_object_id ,'Nigeria February 2013' as source_object_code,'campaign' as content_type UNION ALL
SELECT 110 as master_object_id ,'Nigeria December 2012' as source_object_code,'campaign' as content_type UNION ALL
SELECT 111 as master_object_id ,'Nigeria June 2014' as source_object_code,'campaign' as content_type UNION ALL
SELECT 112 as master_object_id ,'Nigeria December 2013' as source_object_code,'campaign' as content_type UNION ALL
SELECT 113 as master_object_id ,'Nigeria September 2012' as source_object_code,'campaign' as content_type UNION ALL
SELECT 114 as master_object_id ,'Nigeria April 2013' as source_object_code,'campaign' as content_type UNION ALL
SELECT 115 as master_object_id ,'Nigeria April 2014' as source_object_code,'campaign' as content_type UNION ALL
SELECT 116 as master_object_id ,'Nigeria June 2013' as source_object_code,'campaign' as content_type UNION ALL
SELECT 117 as master_object_id ,'Nigeria January 2014' as source_object_code,'campaign' as content_type UNION ALL
SELECT 118 as master_object_id ,'Nigeria January 2012' as source_object_code,'campaign' as content_type UNION ALL
SELECT 119 as master_object_id ,'Nigeria August 2012' as source_object_code,'campaign' as content_type UNION ALL
SELECT 120 as master_object_id ,'Nigeria January 2013' as source_object_code,'campaign' as content_type UNION ALL
SELECT 121 as master_object_id ,'Nigeria October 2012' as source_object_code,'campaign' as content_type UNION ALL
SELECT 122 as master_object_id ,'Nigeria October 2014' as source_object_code,'campaign' as content_type UNION ALL
SELECT 123 as master_object_id ,'Nigeria August 2013' as source_object_code,'campaign' as content_type UNION ALL
SELECT 124 as master_object_id ,'Nigeria July 2014' as source_object_code,'campaign' as content_type UNION ALL
SELECT 125 as master_object_id ,'Nigeria May 2014' as source_object_code,'campaign' as content_type UNION ALL
SELECT 126 as master_object_id ,'Nigeria November 2012' as source_object_code,'campaign' as content_type UNION ALL
SELECT 127 as master_object_id ,'Nigeria July 2012' as source_object_code,'campaign' as content_type UNION ALL
SELECT 128 as master_object_id ,'Nigeria September 2014' as source_object_code,'campaign' as content_type UNION ALL
SELECT 129 as master_object_id ,'Nigeria April 2012' as source_object_code,'campaign' as content_type UNION ALL
SELECT 130 as master_object_id ,'Nigeria February 2014' as source_object_code,'campaign' as content_type UNION ALL
SELECT 131 as master_object_id ,'Nigeria July 2013' as source_object_code,'campaign' as content_type UNION ALL
SELECT 132 as master_object_id ,'Nigeria May 2012' as source_object_code,'campaign' as content_type UNION ALL
SELECT 133 as master_object_id ,'Afghanistan July 2012' as source_object_code,'campaign' as content_type UNION ALL
SELECT 134 as master_object_id ,'Afghanistan November 2012' as source_object_code,'campaign' as content_type UNION ALL
SELECT 135 as master_object_id ,'Afghanistan December 2012' as source_object_code,'campaign' as content_type UNION ALL
SELECT 136 as master_object_id ,'Afghanistan September 2013' as source_object_code,'campaign' as content_type UNION ALL
SELECT 137 as master_object_id ,'Afghanistan February 2014' as source_object_code,'campaign' as content_type UNION ALL
SELECT 138 as master_object_id ,'Afghanistan March 2013' as source_object_code,'campaign' as content_type UNION ALL
SELECT 139 as master_object_id ,'Afghanistan June 2014' as source_object_code,'campaign' as content_type UNION ALL
SELECT 140 as master_object_id ,'Afghanistan September 2012' as source_object_code,'campaign' as content_type UNION ALL
SELECT 141 as master_object_id ,'Afghanistan October 2012' as source_object_code,'campaign' as content_type UNION ALL
SELECT 143 as master_object_id ,'Afghanistan June 2013' as source_object_code,'campaign' as content_type UNION ALL
SELECT 144 as master_object_id ,'Afghanistan September 2014' as source_object_code,'campaign' as content_type UNION ALL
SELECT 145 as master_object_id ,'Afghanistan November 2013' as source_object_code,'campaign' as content_type UNION ALL
SELECT 146 as master_object_id ,'Afghanistan May 2012' as source_object_code,'campaign' as content_type UNION ALL
SELECT 147 as master_object_id ,'Afghanistan August 2013' as source_object_code,'campaign' as content_type UNION ALL
SELECT 148 as master_object_id ,'Afghanistan February 2012' as source_object_code,'campaign' as content_type UNION ALL
SELECT 149 as master_object_id ,'Afghanistan August 2012' as source_object_code,'campaign' as content_type UNION ALL
SELECT 150 as master_object_id ,'Afghanistan May 2014' as source_object_code,'campaign' as content_type UNION ALL
SELECT 151 as master_object_id ,'Afghanistan July 2013' as source_object_code,'campaign' as content_type UNION ALL
SELECT 152 as master_object_id ,'Afghanistan August 2014' as source_object_code,'campaign' as content_type UNION ALL
SELECT 153 as master_object_id ,'Afghanistan January 2014' as source_object_code,'campaign' as content_type UNION ALL
SELECT 154 as master_object_id ,'Afghanistan March 2012' as source_object_code,'campaign' as content_type UNION ALL
SELECT 156 as master_object_id ,'Afghanistan January 2012' as source_object_code,'campaign' as content_type UNION ALL
SELECT 157 as master_object_id ,'Afghanistan March 2014' as source_object_code,'campaign' as content_type UNION ALL
SELECT 158 as master_object_id ,'Afghanistan December 2013' as source_object_code,'campaign' as content_type UNION ALL
SELECT 159 as master_object_id ,'Afghanistan October 2013' as source_object_code,'campaign' as content_type UNION ALL
SELECT 160 as master_object_id ,'Afghanistan January 2013' as source_object_code,'campaign' as content_type UNION ALL
SELECT 161 as master_object_id ,'Afghanistan February 2013' as source_object_code,'campaign' as content_type UNION ALL
SELECT 162 as master_object_id ,'Afghanistan June 2012' as source_object_code,'campaign' as content_type UNION ALL
SELECT 163 as master_object_id ,'Pakistan February 2014' as source_object_code,'campaign' as content_type UNION ALL
SELECT 164 as master_object_id ,'Pakistan November 2013' as source_object_code,'campaign' as content_type UNION ALL
SELECT 165 as master_object_id ,'Pakistan August 2013' as source_object_code,'campaign' as content_type UNION ALL
SELECT 166 as master_object_id ,'Pakistan June 2013' as source_object_code,'campaign' as content_type UNION ALL
SELECT 167 as master_object_id ,'Pakistan January 2014' as source_object_code,'campaign' as content_type UNION ALL
SELECT 168 as master_object_id ,'Pakistan December 2013' as source_object_code,'campaign' as content_type UNION ALL
SELECT 169 as master_object_id ,'Pakistan May 2013' as source_object_code,'campaign' as content_type UNION ALL
SELECT 170 as master_object_id ,'Pakistan July 2013' as source_object_code,'campaign' as content_type UNION ALL
SELECT 171 as master_object_id ,'Pakistan July 2014' as source_object_code,'campaign' as content_type UNION ALL
SELECT 172 as master_object_id ,'Pakistan October 2013' as source_object_code,'campaign' as content_type UNION ALL
SELECT 173 as master_object_id ,'Pakistan April 2013' as source_object_code,'campaign' as content_type UNION ALL
SELECT 174 as master_object_id ,'Pakistan June 2014' as source_object_code,'campaign' as content_type UNION ALL
SELECT 175 as master_object_id ,'Pakistan May 2014' as source_object_code,'campaign' as content_type UNION ALL
SELECT 176 as master_object_id ,'Pakistan March 2014' as source_object_code,'campaign' as content_type UNION ALL
SELECT 177 as master_object_id ,'Pakistan April 2014' as source_object_code,'campaign' as content_type UNION ALL
SELECT 178 as master_object_id ,'Pakistan August 2014' as source_object_code,'campaign' as content_type UNION ALL
SELECT 179 as master_object_id ,'Pakistan September 2013' as source_object_code,'campaign' as content_type UNION ALL
SELECT 106 as master_object_id ,'Nigeria 2013.10' as source_object_code,'campaign' as content_type UNION ALL
SELECT 112 as master_object_id ,'Nigeria 2013.12' as source_object_code,'campaign' as content_type UNION ALL
SELECT 120 as master_object_id ,'Nigeria 2013.1' as source_object_code,'campaign' as content_type UNION ALL
SELECT 105 as master_object_id ,'Nigeria 2013.3' as source_object_code,'campaign' as content_type UNION ALL
SELECT 109 as master_object_id ,'Nigeria 2013.2' as source_object_code,'campaign' as content_type UNION ALL
SELECT 107 as master_object_id ,'Nigeria 2013.5' as source_object_code,'campaign' as content_type UNION ALL
SELECT 114 as master_object_id ,'Nigeria 2013.4' as source_object_code,'campaign' as content_type UNION ALL
SELECT 131 as master_object_id ,'Nigeria 2013.7' as source_object_code,'campaign' as content_type UNION ALL
SELECT 116 as master_object_id ,'Nigeria 2013.6' as source_object_code,'campaign' as content_type UNION ALL
SELECT 101 as master_object_id ,'Nigeria 2013.9' as source_object_code,'campaign' as content_type UNION ALL
SELECT 123 as master_object_id ,'Nigeria 2013.8' as source_object_code,'campaign' as content_type UNION ALL
SELECT 180 as master_object_id ,'Pakistan 2014.9' as source_object_code,'campaign' as content_type UNION ALL
SELECT 178 as master_object_id ,'Pakistan 2014.8' as source_object_code,'campaign' as content_type UNION ALL
SELECT 171 as master_object_id ,'Pakistan 2014.7' as source_object_code,'campaign' as content_type UNION ALL
SELECT 174 as master_object_id ,'Pakistan 2014.6' as source_object_code,'campaign' as content_type UNION ALL
SELECT 175 as master_object_id ,'Pakistan 2014.5' as source_object_code,'campaign' as content_type UNION ALL
SELECT 177 as master_object_id ,'Pakistan 2014.4' as source_object_code,'campaign' as content_type UNION ALL
SELECT 176 as master_object_id ,'Pakistan 2014.3' as source_object_code,'campaign' as content_type UNION ALL
SELECT 163 as master_object_id ,'Pakistan 2014.2' as source_object_code,'campaign' as content_type UNION ALL
SELECT 167 as master_object_id ,'Pakistan 2014.1' as source_object_code,'campaign' as content_type UNION ALL
SELECT 181 as master_object_id ,'Pakistan 2012.1' as source_object_code,'campaign' as content_type UNION ALL
SELECT 182 as master_object_id ,'Pakistan 2012.3' as source_object_code,'campaign' as content_type UNION ALL
SELECT 183 as master_object_id ,'Pakistan 2012.2' as source_object_code,'campaign' as content_type UNION ALL
SELECT 184 as master_object_id ,'Pakistan 2012.5' as source_object_code,'campaign' as content_type UNION ALL
SELECT 185 as master_object_id ,'Pakistan 2012.4' as source_object_code,'campaign' as content_type UNION ALL
SELECT 186 as master_object_id ,'Pakistan 2012.7' as source_object_code,'campaign' as content_type UNION ALL
SELECT 187 as master_object_id ,'Pakistan 2012.6' as source_object_code,'campaign' as content_type UNION ALL
SELECT 188 as master_object_id ,'Pakistan 2012.9' as source_object_code,'campaign' as content_type UNION ALL
SELECT 189 as master_object_id ,'Pakistan 2012.8' as source_object_code,'campaign' as content_type UNION ALL
SELECT 137 as master_object_id ,'Afghanistan 2014.2' as source_object_code,'campaign' as content_type UNION ALL
SELECT 144 as master_object_id ,'Afghanistan 2014.9' as source_object_code,'campaign' as content_type UNION ALL
SELECT 152 as master_object_id ,'Afghanistan 2014.8' as source_object_code,'campaign' as content_type UNION ALL
SELECT 134 as master_object_id ,'Afghanistan 2012.11' as source_object_code,'campaign' as content_type UNION ALL
SELECT 141 as master_object_id ,'Afghanistan 2012.10' as source_object_code,'campaign' as content_type UNION ALL
SELECT 153 as master_object_id ,'Afghanistan 2014.1' as source_object_code,'campaign' as content_type UNION ALL
SELECT 135 as master_object_id ,'Afghanistan 2012.12' as source_object_code,'campaign' as content_type UNION ALL
SELECT 139 as master_object_id ,'Afghanistan 2014.6' as source_object_code,'campaign' as content_type UNION ALL
SELECT 150 as master_object_id ,'Afghanistan 2014.5' as source_object_code,'campaign' as content_type UNION ALL
SELECT 190 as master_object_id ,'Afghanistan 2014.4' as source_object_code,'campaign' as content_type UNION ALL
SELECT 173 as master_object_id ,'Pakistan 2013.4' as source_object_code,'campaign' as content_type UNION ALL
SELECT 169 as master_object_id ,'Pakistan 2013.5' as source_object_code,'campaign' as content_type UNION ALL
SELECT 166 as master_object_id ,'Pakistan 2013.6' as source_object_code,'campaign' as content_type UNION ALL
SELECT 170 as master_object_id ,'Pakistan 2013.7' as source_object_code,'campaign' as content_type UNION ALL
SELECT 191 as master_object_id ,'Pakistan 2013.1' as source_object_code,'campaign' as content_type UNION ALL
SELECT 192 as master_object_id ,'Pakistan 2013.2' as source_object_code,'campaign' as content_type UNION ALL
SELECT 193 as master_object_id ,'Pakistan 2013.3' as source_object_code,'campaign' as content_type UNION ALL
SELECT 165 as master_object_id ,'Pakistan 2013.8' as source_object_code,'campaign' as content_type UNION ALL
SELECT 179 as master_object_id ,'Pakistan 2013.9' as source_object_code,'campaign' as content_type UNION ALL
SELECT 130 as master_object_id ,'Nigeria 2014.2' as source_object_code,'campaign' as content_type UNION ALL
SELECT 108 as master_object_id ,'Nigeria 2014.3' as source_object_code,'campaign' as content_type UNION ALL
SELECT 111 as master_object_id ,'Nigeria 2014.6' as source_object_code,'campaign' as content_type UNION ALL
SELECT 124 as master_object_id ,'Nigeria 2014.7' as source_object_code,'campaign' as content_type UNION ALL
SELECT 115 as master_object_id ,'Nigeria 2014.4' as source_object_code,'campaign' as content_type UNION ALL
SELECT 125 as master_object_id ,'Nigeria 2014.5' as source_object_code,'campaign' as content_type UNION ALL
SELECT 104 as master_object_id ,'Nigeria 2014.8' as source_object_code,'campaign' as content_type UNION ALL
SELECT 110 as master_object_id ,'Nigeria 2012.12' as source_object_code,'campaign' as content_type UNION ALL
SELECT 121 as master_object_id ,'Nigeria 2012.10' as source_object_code,'campaign' as content_type UNION ALL
SELECT 126 as master_object_id ,'Nigeria 2012.11' as source_object_code,'campaign' as content_type UNION ALL
SELECT 160 as master_object_id ,'Afghanistan 2013.1' as source_object_code,'campaign' as content_type UNION ALL
SELECT 161 as master_object_id ,'Afghanistan 2013.2' as source_object_code,'campaign' as content_type UNION ALL
SELECT 138 as master_object_id ,'Afghanistan 2013.3' as source_object_code,'campaign' as content_type UNION ALL
SELECT 119 as master_object_id ,'Nigeria 2012.8' as source_object_code,'campaign' as content_type UNION ALL
SELECT 113 as master_object_id ,'Nigeria 2012.9' as source_object_code,'campaign' as content_type UNION ALL
SELECT 143 as master_object_id ,'Afghanistan 2013.6' as source_object_code,'campaign' as content_type UNION ALL
SELECT 151 as master_object_id ,'Afghanistan 2013.7' as source_object_code,'campaign' as content_type UNION ALL
SELECT 129 as master_object_id ,'Nigeria 2012.4' as source_object_code,'campaign' as content_type UNION ALL
SELECT 132 as master_object_id ,'Nigeria 2012.5' as source_object_code,'campaign' as content_type UNION ALL
SELECT 103 as master_object_id ,'Nigeria 2012.6' as source_object_code,'campaign' as content_type UNION ALL
SELECT 127 as master_object_id ,'Nigeria 2012.7' as source_object_code,'campaign' as content_type UNION ALL
SELECT 118 as master_object_id ,'Nigeria 2012.1' as source_object_code,'campaign' as content_type UNION ALL
SELECT 102 as master_object_id ,'Nigeria 2012.3' as source_object_code,'campaign' as content_type UNION ALL
SELECT 194 as master_object_id ,'Pakistan 2012.11' as source_object_code,'campaign' as content_type UNION ALL
SELECT 195 as master_object_id ,'Pakistan 2012.10' as source_object_code,'campaign' as content_type UNION ALL
SELECT 196 as master_object_id ,'Pakistan 2012.12' as source_object_code,'campaign' as content_type UNION ALL
SELECT 140 as master_object_id ,'Afghanistan 2012.9' as source_object_code,'campaign' as content_type UNION ALL
SELECT 149 as master_object_id ,'Afghanistan 2012.8' as source_object_code,'campaign' as content_type UNION ALL
SELECT 146 as master_object_id ,'Afghanistan 2012.5' as source_object_code,'campaign' as content_type UNION ALL
SELECT 133 as master_object_id ,'Afghanistan 2012.7' as source_object_code,'campaign' as content_type UNION ALL
SELECT 162 as master_object_id ,'Afghanistan 2012.6' as source_object_code,'campaign' as content_type UNION ALL
SELECT 156 as master_object_id ,'Afghanistan 2012.1' as source_object_code,'campaign' as content_type UNION ALL
SELECT 154 as master_object_id ,'Afghanistan 2012.3' as source_object_code,'campaign' as content_type UNION ALL
SELECT 148 as master_object_id ,'Afghanistan 2012.2' as source_object_code,'campaign' as content_type UNION ALL
SELECT 168 as master_object_id ,'Pakistan 2013.12' as source_object_code,'campaign' as content_type UNION ALL
SELECT 172 as master_object_id ,'Pakistan 2013.10' as source_object_code,'campaign' as content_type UNION ALL
SELECT 164 as master_object_id ,'Pakistan 2013.11' as source_object_code,'campaign' as content_type UNION ALL
SELECT 147 as master_object_id ,'Afghanistan 2013.8' as source_object_code,'campaign' as content_type UNION ALL
SELECT 136 as master_object_id ,'Afghanistan 2013.9' as source_object_code,'campaign' as content_type UNION ALL
SELECT 158 as master_object_id ,'Afghanistan 2013.12' as source_object_code,'campaign' as content_type UNION ALL
SELECT 159 as master_object_id ,'Afghanistan 2013.10' as source_object_code,'campaign' as content_type UNION ALL
SELECT 145 as master_object_id ,'Afghanistan 2013.11' as source_object_code,'campaign' as content_type UNION ALL
SELECT 115 as master_object_id ,'Tue Apr 01 00:00:00 UTC 2014' as source_object_code,'campaign' as content_type UNION ALL
SELECT 201 as master_object_id ,'Mon Dec 01 00:00:00 UTC 2014' as source_object_code,'campaign' as content_type UNION ALL
SELECT 111 as master_object_id ,'Sun Jun 01 00:00:00 UTC 2014' as source_object_code,'campaign' as content_type UNION ALL
SELECT 201 as master_object_id ,'41974' as source_object_code,'campaign' as content_type UNION ALL
SELECT 207 as master_object_id ,'Pakistan January 2015' as source_object_code,'campaign' as content_type UNION ALL
SELECT 208 as master_object_id ,'Pakistan February 2015' as source_object_code,'campaign' as content_type UNION ALL
SELECT 206 as master_object_id ,'Afghanistan February 2015' as source_object_code,'campaign' as content_type UNION ALL
SELECT 211 as master_object_id ,'Nigeria February 2015' as source_object_code,'campaign' as content_type UNION ALL
SELECT 210 as master_object_id ,'Nigeria January 2015' as source_object_code,'campaign' as content_type UNION ALL
SELECT 214 as master_object_id ,'Pakistan March 2015' as source_object_code,'campaign' as content_type UNION ALL
SELECT 213 as master_object_id ,'Afghanistan March 2015' as source_object_code,'campaign' as content_type UNION ALL
SELECT 209 as master_object_id ,'Afghanistan January 2015' as source_object_code,'campaign' as content_type UNION ALL
SELECT 212 as master_object_id ,'Nigeria March 2015' as source_object_code,'campaign' as content_type UNION ALL
SELECT 142 as master_object_id ,'Afghanistan April 2013' as source_object_code,'campaign' as content_type UNION ALL
SELECT 155 as master_object_id ,'Afghanistan May 2013' as source_object_code,'campaign' as content_type UNION ALL
SELECT 216 as master_object_id ,'Nigeria February 2012' as source_object_code,'campaign' as content_type UNION ALL
SELECT 215 as master_object_id ,'Afghanistan April 2012' as source_object_code,'campaign' as content_type UNION ALL
SELECT 210 as master_object_id ,'Nigeria Jan 2015' as source_object_code,'campaign' as content_type UNION ALL
SELECT 212 as master_object_id ,'Nigeria Mar 1, 2015' as source_object_code,'campaign' as content_type UNION ALL
SELECT 223 as master_object_id ,'nigeria-2015-06-01' as source_object_code,'campaign' as content_type UNION ALL
SELECT 197 as master_object_id ,'Pakistan December 2014' as source_object_code,'campaign' as content_type UNION ALL
SELECT 200 as master_object_id ,'Afghanistan December 2014' as source_object_code,'campaign' as content_type UNION ALL
SELECT 190 as master_object_id ,'Afghanistan April 2014' as source_object_code,'campaign' as content_type UNION ALL
SELECT 198 as master_object_id ,'Afghanistan October 2014' as source_object_code,'campaign' as content_type UNION ALL
SELECT 201 as master_object_id ,'Nigeria December 2014' as source_object_code,'campaign' as content_type UNION ALL
SELECT 202 as master_object_id ,'Afghanistan November 2014' as source_object_code,'campaign' as content_type UNION ALL
SELECT 199 as master_object_id ,'Afghanistan July 2014' as source_object_code,'campaign' as content_type UNION ALL
SELECT 203 as master_object_id ,'Pakistan November 2014' as source_object_code,'campaign' as content_type UNION ALL
SELECT 204 as master_object_id ,'Pakistan October 2014' as source_object_code,'campaign' as content_type UNION ALL
SELECT 180 as master_object_id ,'Pakistan September 2014' as source_object_code,'campaign' as content_type UNION ALL
SELECT 171 as master_object_id ,'Pakistan Jul 2014' as source_object_code,'campaign' as content_type UNION ALL
SELECT 199 as master_object_id ,'Afghanistan Jul 2014' as source_object_code,'campaign' as content_type UNION ALL
SELECT 124 as master_object_id ,'Nigeria Jul 2014' as source_object_code,'campaign' as content_type UNION ALL
SELECT 163 as master_object_id ,'Paksitan February 2014' as source_object_code,'campaign' as content_type UNION ALL
SELECT 150 as master_object_id ,'Afganistan May 2014' as source_object_code,'campaign' as content_type UNION ALL
SELECT 191 as master_object_id ,'Pakistan January 2013' as source_object_code,'campaign' as content_type UNION ALL
SELECT 188 as master_object_id ,'Pakistan September 2012' as source_object_code,'campaign' as content_type UNION ALL
SELECT 189 as master_object_id ,'Pakistan August 2012' as source_object_code,'campaign' as content_type UNION ALL
SELECT 195 as master_object_id ,'Pakistan October 2012' as source_object_code,'campaign' as content_type UNION ALL
SELECT 194 as master_object_id ,'Pakistan November 2012' as source_object_code,'campaign' as content_type UNION ALL
SELECT 187 as master_object_id ,'Pakistan June 2012' as source_object_code,'campaign' as content_type UNION ALL
SELECT 181 as master_object_id ,'Pakistan January 2012' as source_object_code,'campaign' as content_type UNION ALL
SELECT 186 as master_object_id ,'Pakistan July 2012' as source_object_code,'campaign' as content_type UNION ALL
SELECT 192 as master_object_id ,'Pakistan February 2013' as source_object_code,'campaign' as content_type UNION ALL
SELECT 184 as master_object_id ,'Pakistan May 2012' as source_object_code,'campaign' as content_type UNION ALL
SELECT 183 as master_object_id ,'Pakistan February 2012' as source_object_code,'campaign' as content_type UNION ALL
SELECT 182 as master_object_id ,'Pakistan March 2012' as source_object_code,'campaign' as content_type UNION ALL
SELECT 193 as master_object_id ,'Pakistan March 2013' as source_object_code,'campaign' as content_type UNION ALL
SELECT 196 as master_object_id ,'Pakistan December 2012' as source_object_code,'campaign' as content_type UNION ALL
SELECT 185 as master_object_id ,'Pakistan April 2012' as source_object_code,'campaign' as content_type
)c
WHERE EXISTS ( SELECT 1 FROM campaign camp where c.master_object_id = camp.id)
)x;
INSERT INTO source_object_map (master_object_id, source_object_code, mapped_by_id, content_type)
SELECT
c.id
, c.slug
, 1
, 'campaign'
FROM campaign c WHERE NOT EXISTS (
SELECT 1 FROM source_object_map m
WHERE m.source_object_code = c.slug
);
-- JD 9/1/2015 ODK INGEST--
INSERT INTO source_object_map
(source_object_code,master_object_id,content_type,mapped_by_id)
SELECT * , 'indicator',1 FROM (
SELECT 'group_msd_chd:Msd_FarmM',96 UNION ALL
SELECT 'group_msd_chd:Msd_FarmF',97 UNION ALL
SELECT 'group_msd_chd:Msd_MarketM',98 UNION ALL
SELECT 'group_msd_chd:Msd_MarketF',99 UNION ALL
SELECT 'group_msd_chd:Msd_SchoolF',100 UNION ALL
SELECT 'group_msd_chd:Msd_SchoolM',101 UNION ALL
SELECT 'group_msd_chd:Msd_AgedOutM',102 UNION ALL
SELECT 'group_msd_chd:Msd_AgedOutF',103 UNION ALL
SELECT 'group_spec_events:Spec_FIC',104 UNION ALL
SELECT 'group_msd_chd:Msd_NoPlusesM',105 UNION ALL
SELECT 'group_msd_chd:Msd_NoPlusesF',106 UNION ALL
SELECT 'group_msd_chd:Msd_SecurityM',107 UNION ALL
SELECT 'group_msd_chd:Msd_SecurityF',108 UNION ALL
SELECT 'group_msd_chd:Msd_SocEventM',109 UNION ALL
SELECT 'group_msd_chd:Msd_SocEventF',110 UNION ALL
SELECT 'group_msd_chd:Msd_PolDiffsM',111 UNION ALL
SELECT 'group_msd_chd:Msd_PolDiffsF',112 UNION ALL
SELECT 'group_msd_chd:Msd_ChildDiedF',113 UNION ALL
SELECT 'group_msd_chd:Msd_ChildDiedM',114 UNION ALL
SELECT 'group_msd_chd:Msd_ChildSickM',115 UNION ALL
SELECT 'group_msd_chd:Msd_ChildSickF',116 UNION ALL
SELECT 'group_msd_chd:Msd_NoConsentM',117 UNION ALL
SELECT 'group_msd_chd:Msd_NoConsentF',118 UNION ALL
SELECT 'group_msd_chd:Msd_RelBeliefsF',119 UNION ALL
SELECT 'group_msd_chd:Msd_NoFeltNeedF',120 UNION ALL
SELECT 'group_msd_chd:Msd_NoFeltNeedM',121 UNION ALL
SELECT 'group_msd_chd:Msd_RelBeliefsM',122 UNION ALL
SELECT 'group_msd_chd:Msd_PlaygroundM',123 UNION ALL
SELECT 'group_msd_chd:Msd_PlaygroundF',124 UNION ALL
SELECT 'group_spec_events:Spec_Newborn',125 UNION ALL
SELECT 'group_spec_events:Spec_AFPCase',126 UNION ALL
SELECT 'group_msd_chd:Tot_Missed_Check',127 UNION ALL
SELECT 'group_msd_chd:Msd_SideEffectsF',128 UNION ALL
SELECT 'group_msd_chd:Msd_SideEffectsM',129 UNION ALL
SELECT 'group_msd_chd:Msd_FamilyMovedF',130 UNION ALL
SELECT 'group_msd_chd:Msd_FamilyMovedM',131 UNION ALL
SELECT 'group_msd_chd:Msd_PolioHasCureF',132 UNION ALL
SELECT 'group_msd_chd:Msd_PolioHasCureM',133 UNION ALL
SELECT 'group_spec_events:Spec_MslsCase',134 UNION ALL
SELECT 'group_spec_events:Spec_ZeroDose',135 UNION ALL
SELECT 'group_msd_chd:Msd_UnhappyWTeamM',136 UNION ALL
SELECT 'group_msd_chd:Msd_UnhappyWTeamF',137 UNION ALL
SELECT 'group_msd_chd:Msd_HHNotVisitedF',138 UNION ALL
SELECT 'group_msd_chd:Msd_HHNotVisitedM',139 UNION ALL
SELECT 'group_msd_chd:Msd_TooManyRoundsF',140 UNION ALL
SELECT 'group_msd_chd:Msd_TooManyRoundsM',141 UNION ALL
SELECT 'group_msd_chd:Msd_PolioUncommonM',142 UNION ALL
SELECT 'group_msd_chd:Msd_PolioUncommonF',143 UNION ALL
SELECT 'group_spec_events:Spec_RIReferral',144 UNION ALL
SELECT 'group_msd_chd:Msd_NoGovtServicesM',145 UNION ALL
SELECT 'group_msd_chd:Msd_NoGovtServicesF',146 UNION ALL
SELECT 'group_msd_chd:Msd_OtherProtectionF',147 UNION ALL
SELECT 'group_msd_chd:Msd_OtherProtectionM',148 UNION ALL
SELECT 'group_spec_events:Spec_CMAMReferral',149 UNION ALL
SELECT 'group_spec_events:Spec_OtherDisease',150 UNION ALL
SELECT 'group_spec_events:Spec_PregnantMother',151 UNION ALL
SELECT 'group_spec_events:Spec_VCMAttendedNCer',152
)x;
INSERT INTO document_to_source_object_map
(document_id,source_object_map_id)
SELECT sd.id , som.id
FROM source_data_document sd
INNER JOIN source_object_map som
ON som.content_type in ('indicator','campaign')
AND sd.docfile = 'initialize-db';
""")
]
|
unicef/polio
|
datapoints/migrations/0004_load_indicators_campaigns_permissions.py
|
Python
|
agpl-3.0
| 180,207
|
[
"VisIt"
] |
50a8951a89e09fa3b6758cabc9a70f9095a583670950d795c769d815a4b4d233
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2006-2014 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
##
""" Products report implementation """
from stoqlib.reporting.report import (ObjectListReport,
ObjectTreeReport,
TableReport)
from stoqlib.lib.formatters import format_quantity
from stoqlib.lib.translation import stoqlib_gettext as _
class ProductReport(ObjectListReport):
""" This report show a list of all products returned by a SearchBar,
listing both its description and its balance in the stock selected.
"""
title = _("Product Listing")
filter_format_string = _("on branch <u>%s</u>")
class ProductBrandReport(ObjectListReport):
""" This report show a list of all products brand returned by a SearchBar,
listing both its brand and its product quantity in the stock selected.
"""
title = _("Brand Listing")
filter_format_string = _("on branch <u>%s</u>")
summary = ['quantity']
class ProductBrandByBranchReport(ObjectTreeReport):
""" This report show a list of all products brand by branch returned by
a SearchBar, listing both its brand and its product quantity in the stock
selected.
"""
title = _("Brand by branch Listing")
filter_format_string = _("on branch <u>%s</u>")
summary = ['quantity']
def has_parent(self, obj):
return obj.company
# Overriding the method to summarize correctly
def accumulate(self, obj):
if not obj.company:
self._summary['quantity'] += obj.quantity
class SimpleProductReport(ObjectListReport):
title = _("Product Listing")
summary = ['stock']
class ProductPriceReport(TableReport):
""" This report show a list of all products returned by a SearchBar,
listing both its description and price in the selected stock.
"""
title = _("Product Listing")
filter_format_string = _("on branch <u>%s</u>")
def __init__(self, filename, products, *args, **kwargs):
branch_name = kwargs.pop('branch_name')
self.main_object_name = (_("product from branch %s") % branch_name,
_("products from branch %s") % branch_name)
TableReport.__init__(self, filename, products, *args, **kwargs)
def get_columns(self):
return [dict(title=_('Code'), align='right'),
dict(title=_('Description')),
dict(title=_('Price'), align='right')]
def get_row(self, obj):
return [obj.code, obj.description, obj.price]
class ProductQuantityReport(ObjectListReport):
""" This report show a list of all products returned by a SearchBar,
listing both its description, quantity solded and quantity received.
"""
title = _("Product Listing")
main_object_name = (_("product"), _("products"))
summary = ['quantity_sold', 'quantity_received', 'quantity_transfered',
'quantity_produced', 'quantity_lost', 'quantity_consumed']
class ProductsSoldReport(ObjectListReport):
""" This report lists all products sold with the average stock cost for
a given period of time.
"""
title = _("Products Sold Listing")
main_object_name = (_("product sold"), _("products sold"))
class ProductCountingReport(TableReport):
"""This report shows a list of all products returned by a Searchbar
and it leaves several fields in blank, like quantity, partial value
and total value. These fields must be filled out in the counting
process, manually.
"""
title = _("Product Counting")
main_object_name = (_("product"), _("products"))
def get_columns(self):
columns = [
dict(title=_('Code'), align='right'),
dict(title=_('Description')),
dict(title=_('Fiscal Class')),
dict(title=_('Quantity'), align='right'),
dict(title=_('Unit')),
]
return columns
def get_row(self, obj):
return [obj.code, obj.description, obj.tax_constant.description, '',
obj.unit_description]
def format_data(data):
# must return zero or relatory show None instead of 0
if data is None:
return 0
return format_quantity(data)
class ProductStockReport(ObjectListReport):
title = _("Product Stock Report")
main_object_name = (_("product"), _("products"))
class ProductClosedStockReport(ObjectListReport):
title = _("Closed Product Stock Report")
main_object_name = (_("product"), _("products"))
|
tiagocardosos/stoq
|
stoqlib/reporting/product.py
|
Python
|
gpl-2.0
| 5,347
|
[
"VisIt"
] |
1fbbf4604786102d669ca79000e7f3b8066355316ce620d1b8a0aa717a245cd5
|
# -*- coding: utf-8 -*-
"""
jinja2.visitor
~~~~~~~~~~~~~~
This module implements a visitor for the nodes.
:copyright: (c) 2009 by the Jinja Team.
:license: BSD.
"""
from jinja2.nodes import Node
class NodeVisitor(object):
"""Walks the abstract syntax tree and call visitor functions for every
node found. The visitor functions may return values which will be
forwarded by the `visit` method.
Per default the visitor functions for the nodes are ``'visit_'`` +
class name of the node. So a `TryFinally` node visit function would
be `visit_TryFinally`. This behavior can be changed by overriding
the `get_visitor` function. If no visitor function exists for a node
(return value `None`) the `generic_visit` visitor is used instead.
"""
def get_visitor(self, node):
"""Return the visitor function for this node or `None` if no visitor
exists for this node. In that case the generic visit function is
used instead.
"""
method = 'visit_' + node.__class__.__name__
return getattr(self, method, None)
def visit(self, node, *args, **kwargs):
"""Visit a node."""
f = self.get_visitor(node)
if f is not None:
return f(node, *args, **kwargs)
return self.generic_visit(node, *args, **kwargs)
def generic_visit(self, node, *args, **kwargs):
"""Called if no explicit visitor function exists for a node."""
for node in node.iter_child_nodes():
self.visit(node, *args, **kwargs)
class NodeTransformer(NodeVisitor):
"""Walks the abstract syntax tree and allows modifications of nodes.
The `NodeTransformer` will walk the AST and use the return value of the
visitor functions to replace or remove the old node. If the return
value of the visitor function is `None` the node will be removed
from the previous location otherwise it's replaced with the return
value. The return value may be the original node in which case no
replacement takes place.
"""
def generic_visit(self, node, *args, **kwargs):
for field, old_value in node.iter_fields():
if isinstance(old_value, list):
new_values = []
for value in old_value:
if isinstance(value, Node):
value = self.visit(value, *args, **kwargs)
if value is None:
continue
elif not isinstance(value, Node):
new_values.extend(value)
continue
new_values.append(value)
old_value[:] = new_values
elif isinstance(old_value, Node):
new_node = self.visit(old_value, *args, **kwargs)
if new_node is None:
delattr(node, field)
else:
setattr(node, field, new_node)
return node
def visit_list(self, node, *args, **kwargs):
"""As transformers may return lists in some places this method
can be used to enforce a list as return value.
"""
rv = self.visit(node, *args, **kwargs)
if not isinstance(rv, list):
rv = [rv]
return rv
|
yesudeep/tisshrmlr
|
app/jinja2/jinja2/visitor.py
|
Python
|
mit
| 3,316
|
[
"VisIt"
] |
5f192b3b8c82ac6254af1099700f95260ba784ba17e19fcd7ba43cdee80218b2
|
# Docstrings for generated ufuncs
#
# The syntax is designed to look like the function add_newdoc is being
# called from numpy.lib, but in this file add_newdoc puts the
# docstrings in a dictionary. This dictionary is used in
# generate_ufuncs.py to generate the docstrings for the ufuncs in
# scipy.special at the C level when the ufuncs are created at compile
# time.
from __future__ import division, print_function, absolute_import
docdict = {}
def get(name):
return docdict.get(name)
def add_newdoc(place, name, doc):
docdict['.'.join((place, name))] = doc
add_newdoc("scipy.special", "sph_harm",
r"""
sph_harm(m, n, theta, phi)
Compute spherical harmonics.
The spherical harmonics are defined as
.. math::
Y^m_n(\theta,\phi) = \sqrt{\frac{2n+1}{4\pi} \frac{(n-m)!}{(n+m)!}}
e^{i m \theta} P^m_n(\cos(\phi))
where :math:`P_n^m` are the associated Legendre functions; see `lpmv`.
Parameters
----------
m : array_like
Order of the harmonic (int); must have ``|m| <= n``.
n : array_like
Degree of the harmonic (int); must have ``n >= 0``. This is
often denoted by ``l`` (lower case L) in descriptions of
spherical harmonics.
theta : array_like
Azimuthal (longitudinal) coordinate; must be in ``[0, 2*pi]``.
phi : array_like
Polar (colatitudinal) coordinate; must be in ``[0, pi]``.
Returns
-------
y_mn : complex float
The harmonic :math:`Y^m_n` sampled at ``theta`` and ``phi``.
Notes
-----
There are different conventions for the meanings of the input
arguments ``theta`` and ``phi``. In SciPy ``theta`` is the
azimuthal angle and ``phi`` is the polar angle. It is common to
see the opposite convention, that is, ``theta`` as the polar angle
and ``phi`` as the azimuthal angle.
Note that SciPy's spherical harmonics include the Condon-Shortley
phase [2]_ because it is part of `lpmv`.
With SciPy's conventions, the first several spherical harmonics
are
.. math::
Y_0^0(\theta, \phi) &= \frac{1}{2} \sqrt{\frac{1}{\pi}} \\
Y_1^{-1}(\theta, \phi) &= \frac{1}{2} \sqrt{\frac{3}{2\pi}}
e^{-i\theta} \sin(\phi) \\
Y_1^0(\theta, \phi) &= \frac{1}{2} \sqrt{\frac{3}{\pi}}
\cos(\phi) \\
Y_1^1(\theta, \phi) &= -\frac{1}{2} \sqrt{\frac{3}{2\pi}}
e^{i\theta} \sin(\phi).
References
----------
.. [1] Digital Library of Mathematical Functions, 14.30.
http://dlmf.nist.gov/14.30
.. [2] https://en.wikipedia.org/wiki/Spherical_harmonics#Condon.E2.80.93Shortley_phase
""")
add_newdoc("scipy.special", "_ellip_harm",
"""
Internal function, use `ellip_harm` instead.
""")
add_newdoc("scipy.special", "_ellip_norm",
"""
Internal function, use `ellip_norm` instead.
""")
add_newdoc("scipy.special", "_lambertw",
"""
Internal function, use `lambertw` instead.
""")
add_newdoc("scipy.special", "wrightomega",
r"""
wrightomega(z, out=None)
Wright Omega function.
Defined as the solution to
.. math::
\omega + \log(\omega) = z
where :math:`\log` is the principal branch of the complex logarithm.
Parameters
----------
z : array_like
Points at which to evaluate the Wright Omega function
Returns
-------
omega : ndarray
Values of the Wright Omega function
Notes
-----
.. versionadded:: 0.19.0
The function can also be defined as
.. math::
\omega(z) = W_{K(z)}(e^z)
where :math:`K(z) = \lceil (\Im(z) - \pi)/(2\pi) \rceil` is the
unwinding number and :math:`W` is the Lambert W function.
The implementation here is taken from [1]_.
See Also
--------
lambertw : The Lambert W function
References
----------
.. [1] Lawrence, Corless, and Jeffrey, "Algorithm 917: Complex
Double-Precision Evaluation of the Wright :math:`\omega`
Function." ACM Transactions on Mathematical Software,
2012. :doi:`10.1145/2168773.2168779`.
""")
add_newdoc("scipy.special", "airy",
r"""
airy(z)
Airy functions and their derivatives.
Parameters
----------
z : array_like
Real or complex argument.
Returns
-------
Ai, Aip, Bi, Bip : ndarrays
Airy functions Ai and Bi, and their derivatives Aip and Bip.
Notes
-----
The Airy functions Ai and Bi are two independent solutions of
.. math:: y''(x) = x y(x).
For real `z` in [-10, 10], the computation is carried out by calling
the Cephes [1]_ `airy` routine, which uses power series summation
for small `z` and rational minimax approximations for large `z`.
Outside this range, the AMOS [2]_ `zairy` and `zbiry` routines are
employed. They are computed using power series for :math:`|z| < 1` and
the following relations to modified Bessel functions for larger `z`
(where :math:`t \equiv 2 z^{3/2}/3`):
.. math::
Ai(z) = \frac{1}{\pi \sqrt{3}} K_{1/3}(t)
Ai'(z) = -\frac{z}{\pi \sqrt{3}} K_{2/3}(t)
Bi(z) = \sqrt{\frac{z}{3}} \left(I_{-1/3}(t) + I_{1/3}(t) \right)
Bi'(z) = \frac{z}{\sqrt{3}} \left(I_{-2/3}(t) + I_{2/3}(t)\right)
See also
--------
airye : exponentially scaled Airy functions.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
.. [2] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/.org/amos/
""")
add_newdoc("scipy.special", "airye",
"""
airye(z)
Exponentially scaled Airy functions and their derivatives.
Scaling::
eAi = Ai * exp(2.0/3.0*z*sqrt(z))
eAip = Aip * exp(2.0/3.0*z*sqrt(z))
eBi = Bi * exp(-abs(2.0/3.0*(z*sqrt(z)).real))
eBip = Bip * exp(-abs(2.0/3.0*(z*sqrt(z)).real))
Parameters
----------
z : array_like
Real or complex argument.
Returns
-------
eAi, eAip, eBi, eBip : array_like
Airy functions Ai and Bi, and their derivatives Aip and Bip
Notes
-----
Wrapper for the AMOS [1]_ routines `zairy` and `zbiry`.
See also
--------
airy
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "bdtr",
r"""
bdtr(k, n, p)
Binomial distribution cumulative distribution function.
Sum of the terms 0 through `k` of the Binomial probability density.
.. math::
\mathrm{bdtr}(k, n, p) = \sum_{j=0}^k {{n}\choose{j}} p^j (1-p)^{n-j}
Parameters
----------
k : array_like
Number of successes (int).
n : array_like
Number of events (int).
p : array_like
Probability of success in a single event (float).
Returns
-------
y : ndarray
Probability of `k` or fewer successes in `n` independent events with
success probabilities of `p`.
Notes
-----
The terms are not summed directly; instead the regularized incomplete beta
function is employed, according to the formula,
.. math::
\mathrm{bdtr}(k, n, p) = I_{1 - p}(n - k, k + 1).
Wrapper for the Cephes [1]_ routine `bdtr`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "bdtrc",
r"""
bdtrc(k, n, p)
Binomial distribution survival function.
Sum of the terms `k + 1` through `n` of the binomial probability density,
.. math::
\mathrm{bdtrc}(k, n, p) = \sum_{j=k+1}^n {{n}\choose{j}} p^j (1-p)^{n-j}
Parameters
----------
k : array_like
Number of successes (int).
n : array_like
Number of events (int)
p : array_like
Probability of success in a single event.
Returns
-------
y : ndarray
Probability of `k + 1` or more successes in `n` independent events
with success probabilities of `p`.
See also
--------
bdtr
betainc
Notes
-----
The terms are not summed directly; instead the regularized incomplete beta
function is employed, according to the formula,
.. math::
\mathrm{bdtrc}(k, n, p) = I_{p}(k + 1, n - k).
Wrapper for the Cephes [1]_ routine `bdtrc`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "bdtri",
"""
bdtri(k, n, y)
Inverse function to `bdtr` with respect to `p`.
Finds the event probability `p` such that the sum of the terms 0 through
`k` of the binomial probability density is equal to the given cumulative
probability `y`.
Parameters
----------
k : array_like
Number of successes (float).
n : array_like
Number of events (float)
y : array_like
Cumulative probability (probability of `k` or fewer successes in `n`
events).
Returns
-------
p : ndarray
The event probability such that `bdtr(k, n, p) = y`.
See also
--------
bdtr
betaincinv
Notes
-----
The computation is carried out using the inverse beta integral function
and the relation,::
1 - p = betaincinv(n - k, k + 1, y).
Wrapper for the Cephes [1]_ routine `bdtri`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "bdtrik",
"""
bdtrik(y, n, p)
Inverse function to `bdtr` with respect to `k`.
Finds the number of successes `k` such that the sum of the terms 0 through
`k` of the Binomial probability density for `n` events with probability
`p` is equal to the given cumulative probability `y`.
Parameters
----------
y : array_like
Cumulative probability (probability of `k` or fewer successes in `n`
events).
n : array_like
Number of events (float).
p : array_like
Success probability (float).
Returns
-------
k : ndarray
The number of successes `k` such that `bdtr(k, n, p) = y`.
See also
--------
bdtr
Notes
-----
Formula 26.5.24 of [1]_ is used to reduce the binomial distribution to the
cumulative incomplete beta distribution.
Computation of `k` involves a seach for a value that produces the desired
value of `y`. The search relies on the monotinicity of `y` with `k`.
Wrapper for the CDFLIB [2]_ Fortran routine `cdfbin`.
References
----------
.. [1] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
.. [2] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
""")
add_newdoc("scipy.special", "bdtrin",
"""
bdtrin(k, y, p)
Inverse function to `bdtr` with respect to `n`.
Finds the number of events `n` such that the sum of the terms 0 through
`k` of the Binomial probability density for events with probability `p` is
equal to the given cumulative probability `y`.
Parameters
----------
k : array_like
Number of successes (float).
y : array_like
Cumulative probability (probability of `k` or fewer successes in `n`
events).
p : array_like
Success probability (float).
Returns
-------
n : ndarray
The number of events `n` such that `bdtr(k, n, p) = y`.
See also
--------
bdtr
Notes
-----
Formula 26.5.24 of [1]_ is used to reduce the binomial distribution to the
cumulative incomplete beta distribution.
Computation of `n` involves a seach for a value that produces the desired
value of `y`. The search relies on the monotinicity of `y` with `n`.
Wrapper for the CDFLIB [2]_ Fortran routine `cdfbin`.
References
----------
.. [1] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
.. [2] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
""")
add_newdoc("scipy.special", "binom",
"""
binom(n, k)
Binomial coefficient
See Also
--------
comb : The number of combinations of N things taken k at a time.
""")
add_newdoc("scipy.special", "btdtria",
r"""
btdtria(p, b, x)
Inverse of `btdtr` with respect to `a`.
This is the inverse of the beta cumulative distribution function, `btdtr`,
considered as a function of `a`, returning the value of `a` for which
`btdtr(a, b, x) = p`, or
.. math::
p = \int_0^x \frac{\Gamma(a + b)}{\Gamma(a)\Gamma(b)} t^{a-1} (1-t)^{b-1}\,dt
Parameters
----------
p : array_like
Cumulative probability, in [0, 1].
b : array_like
Shape parameter (`b` > 0).
x : array_like
The quantile, in [0, 1].
Returns
-------
a : ndarray
The value of the shape parameter `a` such that `btdtr(a, b, x) = p`.
See Also
--------
btdtr : Cumulative density function of the beta distribution.
btdtri : Inverse with respect to `x`.
btdtrib : Inverse with respect to `b`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfbet`.
The cumulative distribution function `p` is computed using a routine by
DiDinato and Morris [2]_. Computation of `a` involves a seach for a value
that produces the desired value of `p`. The search relies on the
monotinicity of `p` with `a`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] DiDinato, A. R. and Morris, A. H.,
Algorithm 708: Significant Digit Computation of the Incomplete Beta
Function Ratios. ACM Trans. Math. Softw. 18 (1993), 360-373.
""")
add_newdoc("scipy.special", "btdtrib",
r"""
btdtria(a, p, x)
Inverse of `btdtr` with respect to `b`.
This is the inverse of the beta cumulative distribution function, `btdtr`,
considered as a function of `b`, returning the value of `b` for which
`btdtr(a, b, x) = p`, or
.. math::
p = \int_0^x \frac{\Gamma(a + b)}{\Gamma(a)\Gamma(b)} t^{a-1} (1-t)^{b-1}\,dt
Parameters
----------
a : array_like
Shape parameter (`a` > 0).
p : array_like
Cumulative probability, in [0, 1].
x : array_like
The quantile, in [0, 1].
Returns
-------
b : ndarray
The value of the shape parameter `b` such that `btdtr(a, b, x) = p`.
See Also
--------
btdtr : Cumulative density function of the beta distribution.
btdtri : Inverse with respect to `x`.
btdtria : Inverse with respect to `a`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfbet`.
The cumulative distribution function `p` is computed using a routine by
DiDinato and Morris [2]_. Computation of `b` involves a seach for a value
that produces the desired value of `p`. The search relies on the
monotinicity of `p` with `b`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] DiDinato, A. R. and Morris, A. H.,
Algorithm 708: Significant Digit Computation of the Incomplete Beta
Function Ratios. ACM Trans. Math. Softw. 18 (1993), 360-373.
""")
add_newdoc("scipy.special", "bei",
"""
bei(x)
Kelvin function bei
""")
add_newdoc("scipy.special", "beip",
"""
beip(x)
Derivative of the Kelvin function `bei`
""")
add_newdoc("scipy.special", "ber",
"""
ber(x)
Kelvin function ber.
""")
add_newdoc("scipy.special", "berp",
"""
berp(x)
Derivative of the Kelvin function `ber`
""")
add_newdoc("scipy.special", "besselpoly",
r"""
besselpoly(a, lmb, nu)
Weighted integral of a Bessel function.
.. math::
\int_0^1 x^\lambda J_\nu(2 a x) \, dx
where :math:`J_\nu` is a Bessel function and :math:`\lambda=lmb`,
:math:`\nu=nu`.
""")
add_newdoc("scipy.special", "beta",
"""
beta(a, b)
Beta function.
::
beta(a, b) = gamma(a) * gamma(b) / gamma(a+b)
""")
add_newdoc("scipy.special", "betainc",
"""
betainc(a, b, x)
Incomplete beta integral.
Compute the incomplete beta integral of the arguments, evaluated
from zero to `x`::
gamma(a+b) / (gamma(a)*gamma(b)) * integral(t**(a-1) (1-t)**(b-1), t=0..x).
Notes
-----
The incomplete beta is also sometimes defined without the terms
in gamma, in which case the above definition is the so-called regularized
incomplete beta. Under this definition, you can get the incomplete beta by
multiplying the result of the scipy function by beta(a, b).
""")
add_newdoc("scipy.special", "betaincinv",
"""
betaincinv(a, b, y)
Inverse function to beta integral.
Compute `x` such that betainc(a, b, x) = y.
""")
add_newdoc("scipy.special", "betaln",
"""
betaln(a, b)
Natural logarithm of absolute value of beta function.
Computes ``ln(abs(beta(a, b)))``.
""")
add_newdoc("scipy.special", "boxcox",
"""
boxcox(x, lmbda)
Compute the Box-Cox transformation.
The Box-Cox transformation is::
y = (x**lmbda - 1) / lmbda if lmbda != 0
log(x) if lmbda == 0
Returns `nan` if ``x < 0``.
Returns `-inf` if ``x == 0`` and ``lmbda < 0``.
Parameters
----------
x : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
y : array
Transformed data.
Notes
-----
.. versionadded:: 0.14.0
Examples
--------
>>> from scipy.special import boxcox
>>> boxcox([1, 4, 10], 2.5)
array([ 0. , 12.4 , 126.09110641])
>>> boxcox(2, [0, 1, 2])
array([ 0.69314718, 1. , 1.5 ])
""")
add_newdoc("scipy.special", "boxcox1p",
"""
boxcox1p(x, lmbda)
Compute the Box-Cox transformation of 1 + `x`.
The Box-Cox transformation computed by `boxcox1p` is::
y = ((1+x)**lmbda - 1) / lmbda if lmbda != 0
log(1+x) if lmbda == 0
Returns `nan` if ``x < -1``.
Returns `-inf` if ``x == -1`` and ``lmbda < 0``.
Parameters
----------
x : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
y : array
Transformed data.
Notes
-----
.. versionadded:: 0.14.0
Examples
--------
>>> from scipy.special import boxcox1p
>>> boxcox1p(1e-4, [0, 0.5, 1])
array([ 9.99950003e-05, 9.99975001e-05, 1.00000000e-04])
>>> boxcox1p([0.01, 0.1], 0.25)
array([ 0.00996272, 0.09645476])
""")
add_newdoc("scipy.special", "inv_boxcox",
"""
inv_boxcox(y, lmbda)
Compute the inverse of the Box-Cox transformation.
Find ``x`` such that::
y = (x**lmbda - 1) / lmbda if lmbda != 0
log(x) if lmbda == 0
Parameters
----------
y : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
x : array
Transformed data.
Notes
-----
.. versionadded:: 0.16.0
Examples
--------
>>> from scipy.special import boxcox, inv_boxcox
>>> y = boxcox([1, 4, 10], 2.5)
>>> inv_boxcox(y, 2.5)
array([1., 4., 10.])
""")
add_newdoc("scipy.special", "inv_boxcox1p",
"""
inv_boxcox1p(y, lmbda)
Compute the inverse of the Box-Cox transformation.
Find ``x`` such that::
y = ((1+x)**lmbda - 1) / lmbda if lmbda != 0
log(1+x) if lmbda == 0
Parameters
----------
y : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
x : array
Transformed data.
Notes
-----
.. versionadded:: 0.16.0
Examples
--------
>>> from scipy.special import boxcox1p, inv_boxcox1p
>>> y = boxcox1p([1, 4, 10], 2.5)
>>> inv_boxcox1p(y, 2.5)
array([1., 4., 10.])
""")
add_newdoc("scipy.special", "btdtr",
r"""
btdtr(a, b, x)
Cumulative density function of the beta distribution.
Returns the integral from zero to `x` of the beta probability density
function,
.. math::
I = \int_0^x \frac{\Gamma(a + b)}{\Gamma(a)\Gamma(b)} t^{a-1} (1-t)^{b-1}\,dt
where :math:`\Gamma` is the gamma function.
Parameters
----------
a : array_like
Shape parameter (a > 0).
b : array_like
Shape parameter (b > 0).
x : array_like
Upper limit of integration, in [0, 1].
Returns
-------
I : ndarray
Cumulative density function of the beta distribution with parameters
`a` and `b` at `x`.
See Also
--------
betainc
Notes
-----
This function is identical to the incomplete beta integral function
`betainc`.
Wrapper for the Cephes [1]_ routine `btdtr`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "btdtri",
r"""
btdtri(a, b, p)
The `p`-th quantile of the beta distribution.
This function is the inverse of the beta cumulative distribution function,
`btdtr`, returning the value of `x` for which `btdtr(a, b, x) = p`, or
.. math::
p = \int_0^x \frac{\Gamma(a + b)}{\Gamma(a)\Gamma(b)} t^{a-1} (1-t)^{b-1}\,dt
Parameters
----------
a : array_like
Shape parameter (`a` > 0).
b : array_like
Shape parameter (`b` > 0).
p : array_like
Cumulative probability, in [0, 1].
Returns
-------
x : ndarray
The quantile corresponding to `p`.
See Also
--------
betaincinv
btdtr
Notes
-----
The value of `x` is found by interval halving or Newton iterations.
Wrapper for the Cephes [1]_ routine `incbi`, which solves the equivalent
problem of finding the inverse of the incomplete beta integral.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "cbrt",
"""
cbrt(x)
Cube root of `x`
""")
add_newdoc("scipy.special", "chdtr",
"""
chdtr(v, x)
Chi square cumulative distribution function
Returns the area under the left hand tail (from 0 to `x`) of the Chi
square probability density function with `v` degrees of freedom::
1/(2**(v/2) * gamma(v/2)) * integral(t**(v/2-1) * exp(-t/2), t=0..x)
""")
add_newdoc("scipy.special", "chdtrc",
"""
chdtrc(v, x)
Chi square survival function
Returns the area under the right hand tail (from `x` to
infinity) of the Chi square probability density function with `v`
degrees of freedom::
1/(2**(v/2) * gamma(v/2)) * integral(t**(v/2-1) * exp(-t/2), t=x..inf)
""")
add_newdoc("scipy.special", "chdtri",
"""
chdtri(v, p)
Inverse to `chdtrc`
Returns the argument x such that ``chdtrc(v, x) == p``.
""")
add_newdoc("scipy.special", "chdtriv",
"""
chdtriv(p, x)
Inverse to `chdtr` vs `v`
Returns the argument v such that ``chdtr(v, x) == p``.
""")
add_newdoc("scipy.special", "chndtr",
"""
chndtr(x, df, nc)
Non-central chi square cumulative distribution function
""")
add_newdoc("scipy.special", "chndtrix",
"""
chndtrix(p, df, nc)
Inverse to `chndtr` vs `x`
""")
add_newdoc("scipy.special", "chndtridf",
"""
chndtridf(x, p, nc)
Inverse to `chndtr` vs `df`
""")
add_newdoc("scipy.special", "chndtrinc",
"""
chndtrinc(x, df, p)
Inverse to `chndtr` vs `nc`
""")
add_newdoc("scipy.special", "cosdg",
"""
cosdg(x)
Cosine of the angle `x` given in degrees.
""")
add_newdoc("scipy.special", "cosm1",
"""
cosm1(x)
cos(x) - 1 for use when `x` is near zero.
""")
add_newdoc("scipy.special", "cotdg",
"""
cotdg(x)
Cotangent of the angle `x` given in degrees.
""")
add_newdoc("scipy.special", "dawsn",
"""
dawsn(x)
Dawson's integral.
Computes::
exp(-x**2) * integral(exp(t**2), t=0..x).
See Also
--------
wofz, erf, erfc, erfcx, erfi
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-15, 15, num=1000)
>>> plt.plot(x, special.dawsn(x))
>>> plt.xlabel('$x$')
>>> plt.ylabel('$dawsn(x)$')
>>> plt.show()
""")
add_newdoc("scipy.special", "ellipe",
"""
ellipe(m)
Complete elliptic integral of the second kind
This function is defined as
.. math:: E(m) = \\int_0^{\\pi/2} [1 - m \\sin(t)^2]^{1/2} dt
Parameters
----------
m : array_like
Defines the parameter of the elliptic integral.
Returns
-------
E : ndarray
Value of the elliptic integral.
Notes
-----
Wrapper for the Cephes [1]_ routine `ellpe`.
For `m > 0` the computation uses the approximation,
.. math:: E(m) \\approx P(1-m) - (1-m) \\log(1-m) Q(1-m),
where :math:`P` and :math:`Q` are tenth-order polynomials. For
`m < 0`, the relation
.. math:: E(m) = E(m/(m - 1)) \\sqrt(1-m)
is used.
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind, near `m` = 1
ellipk : Complete elliptic integral of the first kind
ellipkinc : Incomplete elliptic integral of the first kind
ellipeinc : Incomplete elliptic integral of the second kind
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "ellipeinc",
"""
ellipeinc(phi, m)
Incomplete elliptic integral of the second kind
This function is defined as
.. math:: E(\\phi, m) = \\int_0^{\\phi} [1 - m \\sin(t)^2]^{1/2} dt
Parameters
----------
phi : array_like
amplitude of the elliptic integral.
m : array_like
parameter of the elliptic integral.
Returns
-------
E : ndarray
Value of the elliptic integral.
Notes
-----
Wrapper for the Cephes [1]_ routine `ellie`.
Computation uses arithmetic-geometric means algorithm.
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind, near `m` = 1
ellipk : Complete elliptic integral of the first kind
ellipkinc : Incomplete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "ellipj",
"""
ellipj(u, m)
Jacobian elliptic functions
Calculates the Jacobian elliptic functions of parameter `m` between
0 and 1, and real argument `u`.
Parameters
----------
m : array_like
Parameter.
u : array_like
Argument.
Returns
-------
sn, cn, dn, ph : ndarrays
The returned functions::
sn(u|m), cn(u|m), dn(u|m)
The value `ph` is such that if `u = ellipk(ph, m)`,
then `sn(u|m) = sin(ph)` and `cn(u|m) = cos(ph)`.
Notes
-----
Wrapper for the Cephes [1]_ routine `ellpj`.
These functions are periodic, with quarter-period on the real axis
equal to the complete elliptic integral `ellipk(m)`.
Relation to incomplete elliptic integral: If `u = ellipk(phi,m)`, then
`sn(u|m) = sin(phi)`, and `cn(u|m) = cos(phi)`. The `phi` is called
the amplitude of `u`.
Computation is by means of the arithmetic-geometric mean algorithm,
except when `m` is within 1e-9 of 0 or 1. In the latter case with `m`
close to 1, the approximation applies only for `phi < pi/2`.
See also
--------
ellipk : Complete elliptic integral of the first kind.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "ellipkm1",
"""
ellipkm1(p)
Complete elliptic integral of the first kind around `m` = 1
This function is defined as
.. math:: K(p) = \\int_0^{\\pi/2} [1 - m \\sin(t)^2]^{-1/2} dt
where `m = 1 - p`.
Parameters
----------
p : array_like
Defines the parameter of the elliptic integral as `m = 1 - p`.
Returns
-------
K : ndarray
Value of the elliptic integral.
Notes
-----
Wrapper for the Cephes [1]_ routine `ellpk`.
For `p <= 1`, computation uses the approximation,
.. math:: K(p) \\approx P(p) - \\log(p) Q(p),
where :math:`P` and :math:`Q` are tenth-order polynomials. The
argument `p` is used internally rather than `m` so that the logarithmic
singularity at `m = 1` will be shifted to the origin; this preserves
maximum accuracy. For `p > 1`, the identity
.. math:: K(p) = K(1/p)/\\sqrt(p)
is used.
See Also
--------
ellipk : Complete elliptic integral of the first kind
ellipkinc : Incomplete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
ellipeinc : Incomplete elliptic integral of the second kind
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "ellipkinc",
"""
ellipkinc(phi, m)
Incomplete elliptic integral of the first kind
This function is defined as
.. math:: K(\\phi, m) = \\int_0^{\\phi} [1 - m \\sin(t)^2]^{-1/2} dt
This function is also called `F(phi, m)`.
Parameters
----------
phi : array_like
amplitude of the elliptic integral
m : array_like
parameter of the elliptic integral
Returns
-------
K : ndarray
Value of the elliptic integral
Notes
-----
Wrapper for the Cephes [1]_ routine `ellik`. The computation is
carried out using the arithmetic-geometric mean algorithm.
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind, near `m` = 1
ellipk : Complete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
ellipeinc : Incomplete elliptic integral of the second kind
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "entr",
r"""
entr(x)
Elementwise function for computing entropy.
.. math:: \text{entr}(x) = \begin{cases} - x \log(x) & x > 0 \\ 0 & x = 0 \\ -\infty & \text{otherwise} \end{cases}
Parameters
----------
x : ndarray
Input array.
Returns
-------
res : ndarray
The value of the elementwise entropy function at the given points `x`.
See Also
--------
kl_div, rel_entr
Notes
-----
This function is concave.
.. versionadded:: 0.15.0
""")
add_newdoc("scipy.special", "erf",
"""
erf(z)
Returns the error function of complex argument.
It is defined as ``2/sqrt(pi)*integral(exp(-t**2), t=0..z)``.
Parameters
----------
x : ndarray
Input array.
Returns
-------
res : ndarray
The values of the error function at the given points `x`.
See Also
--------
erfc, erfinv, erfcinv, wofz, erfcx, erfi
Notes
-----
The cumulative of the unit normal distribution is given by
``Phi(z) = 1/2[1 + erf(z/sqrt(2))]``.
References
----------
.. [1] http://en.wikipedia.org/wiki/Error_function
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover,
1972. http://www.math.sfu.ca/~cbm/aands/page_297.htm
.. [3] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-3, 3)
>>> plt.plot(x, special.erf(x))
>>> plt.xlabel('$x$')
>>> plt.ylabel('$erf(x)$')
>>> plt.show()
""")
add_newdoc("scipy.special", "erfc",
"""
erfc(x)
Complementary error function, ``1 - erf(x)``.
See Also
--------
erf, erfi, erfcx, dawsn, wofz
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-3, 3)
>>> plt.plot(x, special.erfc(x))
>>> plt.xlabel('$x$')
>>> plt.ylabel('$erfc(x)$')
>>> plt.show()
""")
add_newdoc("scipy.special", "erfi",
"""
erfi(z)
Imaginary error function, ``-i erf(i z)``.
See Also
--------
erf, erfc, erfcx, dawsn, wofz
Notes
-----
.. versionadded:: 0.12.0
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-3, 3)
>>> plt.plot(x, special.erfi(x))
>>> plt.xlabel('$x$')
>>> plt.ylabel('$erfi(x)$')
>>> plt.show()
""")
add_newdoc("scipy.special", "erfcx",
"""
erfcx(x)
Scaled complementary error function, ``exp(x**2) * erfc(x)``.
See Also
--------
erf, erfc, erfi, dawsn, wofz
Notes
-----
.. versionadded:: 0.12.0
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-3, 3)
>>> plt.plot(x, special.erfcx(x))
>>> plt.xlabel('$x$')
>>> plt.ylabel('$erfcx(x)$')
>>> plt.show()
""")
add_newdoc("scipy.special", "eval_jacobi",
r"""
eval_jacobi(n, alpha, beta, x, out=None)
Evaluate Jacobi polynomial at a point.
The Jacobi polynomials can be defined via the Gauss hypergeometric
function :math:`{}_2F_1` as
.. math::
P_n^{(\alpha, \beta)}(x) = \frac{(\alpha + 1)_n}{\Gamma(n + 1)}
{}_2F_1(-n, 1 + \alpha + \beta + n; \alpha + 1; (1 - z)/2)
where :math:`(\cdot)_n` is the Pochhammer symbol; see `poch`. When
:math:`n` is an integer the result is a polynomial of degree
:math:`n`.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer the result is
determined via the relation to the Gauss hypergeometric
function.
alpha : array_like
Parameter
beta : array_like
Parameter
x : array_like
Points at which to evaluate the polynomial
Returns
-------
P : ndarray
Values of the Jacobi polynomial
See Also
--------
roots_jacobi : roots and quadrature weights of Jacobi polynomials
jacobi : Jacobi polynomial object
hyp2f1 : Gauss hypergeometric function
""")
add_newdoc("scipy.special", "eval_sh_jacobi",
r"""
eval_sh_jacobi(n, p, q, x, out=None)
Evaluate shifted Jacobi polynomial at a point.
Defined by
.. math::
G_n^{(p, q)}(x)
= \binom{2n + p - 1}{n}^{-1} P_n^{(p - q, q - 1)}(2x - 1),
where :math:`P_n^{(\cdot, \cdot)}` is the n-th Jacobi polynomial.
Parameters
----------
n : int
Degree of the polynomial. If not an integer, the result is
determined via the relation to `binom` and `eval_jacobi`.
p : float
Parameter
q : float
Parameter
Returns
-------
G : ndarray
Values of the shifted Jacobi polynomial.
See Also
--------
roots_sh_jacobi : roots and quadrature weights of shifted Jacobi
polynomials
sh_jacobi : shifted Jacobi polynomial object
eval_jacobi : evaluate Jacobi polynomials
""")
add_newdoc("scipy.special", "eval_gegenbauer",
r"""
eval_gegenbauer(n, alpha, x, out=None)
Evaluate Gegenbauer polynomial at a point.
The Gegenbauer polynomials can be defined via the Gauss
hypergeometric function :math:`{}_2F_1` as
.. math::
C_n^{(\alpha)} = \frac{(2\alpha)_n}{\Gamma(n + 1)}
{}_2F_1(-n, 2\alpha + n; \alpha + 1/2; (1 - z)/2).
When :math:`n` is an integer the result is a polynomial of degree
:math:`n`.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to the Gauss hypergeometric
function.
alpha : array_like
Parameter
x : array_like
Points at which to evaluate the Gegenbauer polynomial
Returns
-------
C : ndarray
Values of the Gegenbauer polynomial
See Also
--------
roots_gegenbauer : roots and quadrature weights of Gegenbauer
polynomials
gegenbauer : Gegenbauer polynomial object
hyp2f1 : Gauss hypergeometric function
""")
add_newdoc("scipy.special", "eval_chebyt",
r"""
eval_chebyt(n, x, out=None)
Evaluate Chebyshev polynomial of the first kind at a point.
The Chebyshev polynomials of the first kind can be defined via the
Gauss hypergeometric function :math:`{}_2F_1` as
.. math::
T_n(x) = {}_2F_1(n, -n; 1/2; (1 - x)/2).
When :math:`n` is an integer the result is a polynomial of degree
:math:`n`.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to the Gauss hypergeometric
function.
x : array_like
Points at which to evaluate the Chebyshev polynomial
Returns
-------
T : ndarray
Values of the Chebyshev polynomial
See Also
--------
roots_chebyt : roots and quadrature weights of Chebyshev
polynomials of the first kind
chebyu : Chebychev polynomial object
eval_chebyu : evaluate Chebyshev polynomials of the second kind
hyp2f1 : Gauss hypergeometric function
numpy.polynomial.chebyshev.Chebyshev : Chebyshev series
Notes
-----
This routine is numerically stable for `x` in ``[-1, 1]`` at least
up to order ``10000``.
""")
add_newdoc("scipy.special", "eval_chebyu",
r"""
eval_chebyu(n, x, out=None)
Evaluate Chebyshev polynomial of the second kind at a point.
The Chebyshev polynomials of the second kind can be defined via
the Gauss hypergeometric function :math:`{}_2F_1` as
.. math::
U_n(x) = (n + 1) {}_2F_1(-n, n + 2; 3/2; (1 - x)/2).
When :math:`n` is an integer the result is a polynomial of degree
:math:`n`.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to the Gauss hypergeometric
function.
x : array_like
Points at which to evaluate the Chebyshev polynomial
Returns
-------
U : ndarray
Values of the Chebyshev polynomial
See Also
--------
roots_chebyu : roots and quadrature weights of Chebyshev
polynomials of the second kind
chebyu : Chebyshev polynomial object
eval_chebyt : evaluate Chebyshev polynomials of the first kind
hyp2f1 : Gauss hypergeometric function
""")
add_newdoc("scipy.special", "eval_chebys",
r"""
eval_chebys(n, x, out=None)
Evaluate Chebyshev polynomial of the second kind on [-2, 2] at a
point.
These polynomials are defined as
.. math::
S_n(x) = U_n(x/2)
where :math:`U_n` is a Chebyshev polynomial of the second kind.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to `eval_chebyu`.
x : array_like
Points at which to evaluate the Chebyshev polynomial
Returns
-------
S : ndarray
Values of the Chebyshev polynomial
See Also
--------
roots_chebys : roots and quadrature weights of Chebyshev
polynomials of the second kind on [-2, 2]
chebys : Chebyshev polynomial object
eval_chebyu : evaluate Chebyshev polynomials of the second kind
""")
add_newdoc("scipy.special", "eval_chebyc",
r"""
eval_chebyc(n, x, out=None)
Evaluate Chebyshev polynomial of the first kind on [-2, 2] at a
point.
These polynomials are defined as
.. math::
S_n(x) = T_n(x/2)
where :math:`T_n` is a Chebyshev polynomial of the first kind.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to `eval_chebyt`.
x : array_like
Points at which to evaluate the Chebyshev polynomial
Returns
-------
C : ndarray
Values of the Chebyshev polynomial
See Also
--------
roots_chebyc : roots and quadrature weights of Chebyshev
polynomials of the first kind on [-2, 2]
chebyc : Chebyshev polynomial object
numpy.polynomial.chebyshev.Chebyshev : Chebyshev series
eval_chebyt : evaluate Chebycshev polynomials of the first kind
""")
add_newdoc("scipy.special", "eval_sh_chebyt",
r"""
eval_sh_chebyt(n, x, out=None)
Evaluate shifted Chebyshev polynomial of the first kind at a
point.
These polynomials are defined as
.. math::
T_n^*(x) = T_n(2x - 1)
where :math:`T_n` is a Chebyshev polynomial of the first kind.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to `eval_chebyt`.
x : array_like
Points at which to evaluate the shifted Chebyshev polynomial
Returns
-------
T : ndarray
Values of the shifted Chebyshev polynomial
See Also
--------
roots_sh_chebyt : roots and quadrature weights of shifted
Chebyshev polynomials of the first kind
sh_chebyt : shifted Chebyshev polynomial object
eval_chebyt : evalaute Chebyshev polynomials of the first kind
numpy.polynomial.chebyshev.Chebyshev : Chebyshev series
""")
add_newdoc("scipy.special", "eval_sh_chebyu",
r"""
eval_sh_chebyu(n, x, out=None)
Evaluate shifted Chebyshev polynomial of the second kind at a
point.
These polynomials are defined as
.. math::
U_n^*(x) = U_n(2x - 1)
where :math:`U_n` is a Chebyshev polynomial of the first kind.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to `eval_chebyu`.
x : array_like
Points at which to evaluate the shifted Chebyshev polynomial
Returns
-------
U : ndarray
Values of the shifted Chebyshev polynomial
See Also
--------
roots_sh_chebyu : roots and quadrature weights of shifted
Chebychev polynomials of the second kind
sh_chebyu : shifted Chebyshev polynomial object
eval_chebyu : evaluate Chebyshev polynomials of the second kind
""")
add_newdoc("scipy.special", "eval_legendre",
r"""
eval_legendre(n, x, out=None)
Evaluate Legendre polynomial at a point.
The Legendre polynomials can be defined via the Gauss
hypergeometric function :math:`{}_2F_1` as
.. math::
P_n(x) = {}_2F_1(-n, n + 1; 1; (1 - x)/2).
When :math:`n` is an integer the result is a polynomial of degree
:math:`n`.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to the Gauss hypergeometric
function.
x : array_like
Points at which to evaluate the Legendre polynomial
Returns
-------
P : ndarray
Values of the Legendre polynomial
See Also
--------
roots_legendre : roots and quadrature weights of Legendre
polynomials
legendre : Legendre polynomial object
hyp2f1 : Gauss hypergeometric function
numpy.polynomial.legendre.Legendre : Legendre series
""")
add_newdoc("scipy.special", "eval_sh_legendre",
r"""
eval_sh_legendre(n, x, out=None)
Evaluate shifted Legendre polynomial at a point.
These polynomials are defined as
.. math::
P_n^*(x) = P_n(2x - 1)
where :math:`P_n` is a Legendre polynomial.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the value is
determined via the relation to `eval_legendre`.
x : array_like
Points at which to evaluate the shifted Legendre polynomial
Returns
-------
P : ndarray
Values of the shifted Legendre polynomial
See Also
--------
roots_sh_legendre : roots and quadrature weights of shifted
Legendre polynomials
sh_legendre : shifted Legendre polynomial object
eval_legendre : evaluate Legendre polynomials
numpy.polynomial.legendre.Legendre : Legendre series
""")
add_newdoc("scipy.special", "eval_genlaguerre",
r"""
eval_genlaguerre(n, alpha, x, out=None)
Evaluate generalized Laguerre polynomial at a point.
The generalized Laguerre polynomials can be defined via the
confluent hypergeometric function :math:`{}_1F_1` as
.. math::
L_n^{(\alpha)}(x) = \binom{n + \alpha}{n}
{}_1F_1(-n, \alpha + 1, x).
When :math:`n` is an integer the result is a polynomial of degree
:math:`n`. The Laguerre polynomials are the special case where
:math:`\alpha = 0`.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer the result is
determined via the relation to the confluent hypergeometric
function.
alpha : array_like
Parameter; must have ``alpha > -1``
x : array_like
Points at which to evaluate the generalized Laguerre
polynomial
Returns
-------
L : ndarray
Values of the generalized Laguerre polynomial
See Also
--------
roots_genlaguerre : roots and quadrature weights of generalized
Laguerre polynomials
genlaguerre : generalized Laguerre polynomial object
hyp1f1 : confluent hypergeometric function
eval_laguerre : evaluate Laguerre polynomials
""")
add_newdoc("scipy.special", "eval_laguerre",
r"""
eval_laguerre(n, x, out=None)
Evaluate Laguerre polynomial at a point.
The Laguerre polynomials can be defined via the confluent
hypergeometric function :math:`{}_1F_1` as
.. math::
L_n(x) = {}_1F_1(-n, 1, x).
When :math:`n` is an integer the result is a polynomial of degree
:math:`n`.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer the result is
determined via the relation to the confluent hypergeometric
function.
x : array_like
Points at which to evaluate the Laguerre polynomial
Returns
-------
L : ndarray
Values of the Laguerre polynomial
See Also
--------
roots_laguerre : roots and quadrature weights of Laguerre
polynomials
laguerre : Laguerre polynomial object
numpy.polynomial.laguerre.Laguerre : Laguerre series
eval_genlaguerre : evaluate generalized Laguerre polynomials
""")
add_newdoc("scipy.special", "eval_hermite",
r"""
eval_hermite(n, x, out=None)
Evaluate physicist's Hermite polynomial at a point.
Defined by
.. math::
H_n(x) = (-1)^n e^{x^2} \frac{d^n}{dx^n} e^{-x^2};
:math:`H_n` is a polynomial of degree :math:`n`.
Parameters
----------
n : array_like
Degree of the polynomial
x : array_like
Points at which to evaluate the Hermite polynomial
Returns
-------
H : ndarray
Values of the Hermite polynomial
See Also
--------
roots_hermite : roots and quadrature weights of physicist's
Hermite polynomials
hermite : physicist's Hermite polynomial object
numpy.polynomial.hermite.Hermite : Physicist's Hermite series
eval_hermitenorm : evaluate Probabilist's Hermite polynomials
""")
add_newdoc("scipy.special", "eval_hermitenorm",
r"""
eval_hermitenorm(n, x, out=None)
Evaluate probabilist's (normalized) Hermite polynomial at a
point.
Defined by
.. math::
He_n(x) = (-1)^n e^{x^2/2} \frac{d^n}{dx^n} e^{-x^2/2};
:math:`He_n` is a polynomial of degree :math:`n`.
Parameters
----------
n : array_like
Degree of the polynomial
x : array_like
Points at which to evaluate the Hermite polynomial
Returns
-------
He : ndarray
Values of the Hermite polynomial
See Also
--------
roots_hermitenorm : roots and quadrature weights of probabilist's
Hermite polynomials
hermitenorm : probabilist's Hermite polynomial object
numpy.polynomial.hermite_e.HermiteE : Probabilist's Hermite series
eval_hermite : evaluate physicist's Hermite polynomials
""")
add_newdoc("scipy.special", "exp1",
"""
exp1(z)
Exponential integral E_1 of complex argument z
::
integral(exp(-z*t)/t, t=1..inf).
""")
add_newdoc("scipy.special", "exp10",
"""
exp10(x)
10**x
""")
add_newdoc("scipy.special", "exp2",
"""
exp2(x)
2**x
""")
add_newdoc("scipy.special", "expi",
"""
expi(x)
Exponential integral Ei
Defined as::
integral(exp(t)/t, t=-inf..x)
See `expn` for a different exponential integral.
""")
add_newdoc('scipy.special', 'expit',
"""
expit(x)
Expit ufunc for ndarrays.
The expit function, also known as the logistic function, is defined as
expit(x) = 1/(1+exp(-x)). It is the inverse of the logit function.
Parameters
----------
x : ndarray
The ndarray to apply expit to element-wise.
Returns
-------
out : ndarray
An ndarray of the same shape as x. Its entries
are expit of the corresponding entry of x.
Notes
-----
As a ufunc expit takes a number of optional
keyword arguments. For more information
see `ufuncs <https://docs.scipy.org/doc/numpy/reference/ufuncs.html>`_
.. versionadded:: 0.10.0
""")
add_newdoc("scipy.special", "expm1",
"""
expm1(x)
exp(x) - 1 for use when `x` is near zero.
""")
add_newdoc("scipy.special", "expn",
"""
expn(n, x)
Exponential integral E_n
Returns the exponential integral for integer `n` and non-negative `x` and
`n`::
integral(exp(-x*t) / t**n, t=1..inf).
""")
add_newdoc("scipy.special", "exprel",
r"""
exprel(x)
Relative error exponential, (exp(x)-1)/x, for use when `x` is near zero.
Parameters
----------
x : ndarray
Input array.
Returns
-------
res : ndarray
Output array.
See Also
--------
expm1
.. versionadded:: 0.17.0
""")
add_newdoc("scipy.special", "fdtr",
r"""
fdtr(dfn, dfd, x)
F cumulative distribution function.
Returns the value of the cumulative density function of the
F-distribution, also known as Snedecor's F-distribution or the
Fisher-Snedecor distribution.
The F-distribution with parameters :math:`d_n` and :math:`d_d` is the
distribution of the random variable,
.. math::
X = \frac{U_n/d_n}{U_d/d_d},
where :math:`U_n` and :math:`U_d` are random variables distributed
:math:`\chi^2`, with :math:`d_n` and :math:`d_d` degrees of freedom,
respectively.
Parameters
----------
dfn : array_like
First parameter (positive float).
dfd : array_like
Second parameter (positive float).
x : array_like
Argument (nonnegative float).
Returns
-------
y : ndarray
The CDF of the F-distribution with parameters `dfn` and `dfd` at `x`.
Notes
-----
The regularized incomplete beta function is used, according to the
formula,
.. math::
F(d_n, d_d; x) = I_{xd_n/(d_d + xd_n)}(d_n/2, d_d/2).
Wrapper for the Cephes [1]_ routine `fdtr`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "fdtrc",
r"""
fdtrc(dfn, dfd, x)
F survival function.
Returns the complemented F-distribution function (the integral of the
density from `x` to infinity).
Parameters
----------
dfn : array_like
First parameter (positive float).
dfd : array_like
Second parameter (positive float).
x : array_like
Argument (nonnegative float).
Returns
-------
y : ndarray
The complemented F-distribution function with parameters `dfn` and
`dfd` at `x`.
See also
--------
fdtr
Notes
-----
The regularized incomplete beta function is used, according to the
formula,
.. math::
F(d_n, d_d; x) = I_{d_d/(d_d + xd_n)}(d_d/2, d_n/2).
Wrapper for the Cephes [1]_ routine `fdtrc`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "fdtri",
r"""
fdtri(dfn, dfd, p)
The `p`-th quantile of the F-distribution.
This function is the inverse of the F-distribution CDF, `fdtr`, returning
the `x` such that `fdtr(dfn, dfd, x) = p`.
Parameters
----------
dfn : array_like
First parameter (positive float).
dfd : array_like
Second parameter (positive float).
p : array_like
Cumulative probability, in [0, 1].
Returns
-------
x : ndarray
The quantile corresponding to `p`.
Notes
-----
The computation is carried out using the relation to the inverse
regularized beta function, :math:`I^{-1}_x(a, b)`. Let
:math:`z = I^{-1}_p(d_d/2, d_n/2).` Then,
.. math::
x = \frac{d_d (1 - z)}{d_n z}.
If `p` is such that :math:`x < 0.5`, the following relation is used
instead for improved stability: let
:math:`z' = I^{-1}_{1 - p}(d_n/2, d_d/2).` Then,
.. math::
x = \frac{d_d z'}{d_n (1 - z')}.
Wrapper for the Cephes [1]_ routine `fdtri`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "fdtridfd",
"""
fdtridfd(dfn, p, x)
Inverse to `fdtr` vs dfd
Finds the F density argument dfd such that ``fdtr(dfn, dfd, x) == p``.
""")
add_newdoc("scipy.special", "fdtridfn",
"""
fdtridfn(p, dfd, x)
Inverse to `fdtr` vs dfn
finds the F density argument dfn such that ``fdtr(dfn, dfd, x) == p``.
""")
add_newdoc("scipy.special", "fresnel",
"""
fresnel(z)
Fresnel sin and cos integrals
Defined as::
ssa = integral(sin(pi/2 * t**2), t=0..z)
csa = integral(cos(pi/2 * t**2), t=0..z)
Parameters
----------
z : float or complex array_like
Argument
Returns
-------
ssa, csa
Fresnel sin and cos integral values
""")
add_newdoc("scipy.special", "gamma",
"""
gamma(z)
Gamma function.
The gamma function is often referred to as the generalized
factorial since ``z*gamma(z) = gamma(z+1)`` and ``gamma(n+1) =
n!`` for natural number *n*.
""")
add_newdoc("scipy.special", "gammainc",
r"""
gammainc(a, x)
Regularized lower incomplete gamma function.
Defined as
.. math::
\frac{1}{\Gamma(a)} \int_0^x t^{a - 1}e^{-t} dt
for :math:`a > 0` and :math:`x \geq 0`. The function satisfies the
relation ``gammainc(a, x) + gammaincc(a, x) = 1`` where
`gammaincc` is the regularized upper incomplete gamma function.
Notes
-----
The implementation largely follows that of [1]_.
See also
--------
gammaincc : regularized upper incomplete gamma function
gammaincinv : inverse to ``gammainc`` versus ``x``
gammainccinv : inverse to ``gammaincc`` versus ``x``
References
----------
.. [1] Maddock et. al., "Incomplete Gamma Functions",
http://www.boost.org/doc/libs/1_61_0/libs/math/doc/html/math_toolkit/sf_gamma/igamma.html
""")
add_newdoc("scipy.special", "gammaincc",
r"""
gammaincc(a, x)
Regularized upper incomplete gamma function.
Defined as
.. math::
\frac{1}{\Gamma(a)} \int_x^\infty t^{a - 1}e^{-t} dt
for :math:`a > 0` and :math:`x \geq 0`. The function satisfies the
relation ``gammainc(a, x) + gammaincc(a, x) = 1`` where `gammainc`
is the regularized lower incomplete gamma function.
Notes
-----
The implementation largely follows that of [1]_.
See also
--------
gammainc : regularized lower incomplete gamma function
gammaincinv : inverse to ``gammainc`` versus ``x``
gammainccinv : inverse to ``gammaincc`` versus ``x``
References
----------
.. [1] Maddock et. al., "Incomplete Gamma Functions",
http://www.boost.org/doc/libs/1_61_0/libs/math/doc/html/math_toolkit/sf_gamma/igamma.html
""")
add_newdoc("scipy.special", "gammainccinv",
"""
gammainccinv(a, y)
Inverse to `gammaincc`
Returns `x` such that ``gammaincc(a, x) == y``.
""")
add_newdoc("scipy.special", "gammaincinv",
"""
gammaincinv(a, y)
Inverse to `gammainc`
Returns `x` such that ``gammainc(a, x) = y``.
""")
add_newdoc("scipy.special", "_gammaln",
"""
Internal function, use ``gammaln`` instead.
""")
add_newdoc("scipy.special", "gammasgn",
"""
gammasgn(x)
Sign of the gamma function.
See Also
--------
gammaln
loggamma
""")
add_newdoc("scipy.special", "gdtr",
r"""
gdtr(a, b, x)
Gamma distribution cumulative density function.
Returns the integral from zero to `x` of the gamma probability density
function,
.. math::
F = \int_0^x \frac{a^b}{\Gamma(b)} t^{b-1} e^{-at}\,dt,
where :math:`\Gamma` is the gamma function.
Parameters
----------
a : array_like
The rate parameter of the gamma distribution, sometimes denoted
:math:`\beta` (float). It is also the reciprocal of the scale
parameter :math:`\theta`.
b : array_like
The shape parameter of the gamma distribution, sometimes denoted
:math:`\alpha` (float).
x : array_like
The quantile (upper limit of integration; float).
See also
--------
gdtrc : 1 - CDF of the gamma distribution.
Returns
-------
F : ndarray
The CDF of the gamma distribution with parameters `a` and `b`
evaluated at `x`.
Notes
-----
The evaluation is carried out using the relation to the incomplete gamma
integral (regularized gamma function).
Wrapper for the Cephes [1]_ routine `gdtr`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "gdtrc",
r"""
gdtrc(a, b, x)
Gamma distribution survival function.
Integral from `x` to infinity of the gamma probability density function,
.. math::
F = \int_x^\infty \frac{a^b}{\Gamma(b)} t^{b-1} e^{-at}\,dt,
where :math:`\Gamma` is the gamma function.
Parameters
----------
a : array_like
The rate parameter of the gamma distribution, sometimes denoted
:math:`\beta` (float). It is also the reciprocal of the scale
parameter :math:`\theta`.
b : array_like
The shape parameter of the gamma distribution, sometimes denoted
:math:`\alpha` (float).
x : array_like
The quantile (lower limit of integration; float).
Returns
-------
F : ndarray
The survival function of the gamma distribution with parameters `a`
and `b` evaluated at `x`.
See Also
--------
gdtr, gdtri
Notes
-----
The evaluation is carried out using the relation to the incomplete gamma
integral (regularized gamma function).
Wrapper for the Cephes [1]_ routine `gdtrc`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "gdtria",
"""
gdtria(p, b, x, out=None)
Inverse of `gdtr` vs a.
Returns the inverse with respect to the parameter `a` of ``p =
gdtr(a, b, x)``, the cumulative distribution function of the gamma
distribution.
Parameters
----------
p : array_like
Probability values.
b : array_like
`b` parameter values of `gdtr(a, b, x)`. `b` is the "shape" parameter
of the gamma distribution.
x : array_like
Nonnegative real values, from the domain of the gamma distribution.
out : ndarray, optional
If a fourth argument is given, it must be a numpy.ndarray whose size
matches the broadcast result of `a`, `b` and `x`. `out` is then the
array returned by the function.
Returns
-------
a : ndarray
Values of the `a` parameter such that `p = gdtr(a, b, x)`. `1/a`
is the "scale" parameter of the gamma distribution.
See Also
--------
gdtr : CDF of the gamma distribution.
gdtrib : Inverse with respect to `b` of `gdtr(a, b, x)`.
gdtrix : Inverse with respect to `x` of `gdtr(a, b, x)`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfgam`.
The cumulative distribution function `p` is computed using a routine by
DiDinato and Morris [2]_. Computation of `a` involves a seach for a value
that produces the desired value of `p`. The search relies on the
monotinicity of `p` with `a`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] DiDinato, A. R. and Morris, A. H.,
Computation of the incomplete gamma function ratios and their
inverse. ACM Trans. Math. Softw. 12 (1986), 377-393.
Examples
--------
First evaluate `gdtr`.
>>> from scipy.special import gdtr, gdtria
>>> p = gdtr(1.2, 3.4, 5.6)
>>> print(p)
0.94378087442
Verify the inverse.
>>> gdtria(p, 3.4, 5.6)
1.2
""")
add_newdoc("scipy.special", "gdtrib",
"""
gdtrib(a, p, x, out=None)
Inverse of `gdtr` vs b.
Returns the inverse with respect to the parameter `b` of ``p =
gdtr(a, b, x)``, the cumulative distribution function of the gamma
distribution.
Parameters
----------
a : array_like
`a` parameter values of `gdtr(a, b, x)`. `1/a` is the "scale"
parameter of the gamma distribution.
p : array_like
Probability values.
x : array_like
Nonnegative real values, from the domain of the gamma distribution.
out : ndarray, optional
If a fourth argument is given, it must be a numpy.ndarray whose size
matches the broadcast result of `a`, `b` and `x`. `out` is then the
array returned by the function.
Returns
-------
b : ndarray
Values of the `b` parameter such that `p = gdtr(a, b, x)`. `b` is
the "shape" parameter of the gamma distribution.
See Also
--------
gdtr : CDF of the gamma distribution.
gdtria : Inverse with respect to `a` of `gdtr(a, b, x)`.
gdtrix : Inverse with respect to `x` of `gdtr(a, b, x)`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfgam`.
The cumulative distribution function `p` is computed using a routine by
DiDinato and Morris [2]_. Computation of `b` involves a seach for a value
that produces the desired value of `p`. The search relies on the
monotinicity of `p` with `b`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] DiDinato, A. R. and Morris, A. H.,
Computation of the incomplete gamma function ratios and their
inverse. ACM Trans. Math. Softw. 12 (1986), 377-393.
Examples
--------
First evaluate `gdtr`.
>>> from scipy.special import gdtr, gdtrib
>>> p = gdtr(1.2, 3.4, 5.6)
>>> print(p)
0.94378087442
Verify the inverse.
>>> gdtrib(1.2, p, 5.6)
3.3999999999723882
""")
add_newdoc("scipy.special", "gdtrix",
"""
gdtrix(a, b, p, out=None)
Inverse of `gdtr` vs x.
Returns the inverse with respect to the parameter `x` of ``p =
gdtr(a, b, x)``, the cumulative distribution function of the gamma
distribution. This is also known as the p'th quantile of the
distribution.
Parameters
----------
a : array_like
`a` parameter values of `gdtr(a, b, x)`. `1/a` is the "scale"
parameter of the gamma distribution.
b : array_like
`b` parameter values of `gdtr(a, b, x)`. `b` is the "shape" parameter
of the gamma distribution.
p : array_like
Probability values.
out : ndarray, optional
If a fourth argument is given, it must be a numpy.ndarray whose size
matches the broadcast result of `a`, `b` and `x`. `out` is then the
array returned by the function.
Returns
-------
x : ndarray
Values of the `x` parameter such that `p = gdtr(a, b, x)`.
See Also
--------
gdtr : CDF of the gamma distribution.
gdtria : Inverse with respect to `a` of `gdtr(a, b, x)`.
gdtrib : Inverse with respect to `b` of `gdtr(a, b, x)`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfgam`.
The cumulative distribution function `p` is computed using a routine by
DiDinato and Morris [2]_. Computation of `x` involves a seach for a value
that produces the desired value of `p`. The search relies on the
monotinicity of `p` with `x`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] DiDinato, A. R. and Morris, A. H.,
Computation of the incomplete gamma function ratios and their
inverse. ACM Trans. Math. Softw. 12 (1986), 377-393.
Examples
--------
First evaluate `gdtr`.
>>> from scipy.special import gdtr, gdtrix
>>> p = gdtr(1.2, 3.4, 5.6)
>>> print(p)
0.94378087442
Verify the inverse.
>>> gdtrix(1.2, 3.4, p)
5.5999999999999996
""")
add_newdoc("scipy.special", "hankel1",
r"""
hankel1(v, z)
Hankel function of the first kind
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
out : Values of the Hankel function of the first kind.
Notes
-----
A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the
computation using the relation,
.. math:: H^{(1)}_v(z) = \frac{2}{\imath\pi} \exp(-\imath \pi v/2) K_v(z \exp(-\imath\pi/2))
where :math:`K_v` is the modified Bessel function of the second kind.
For negative orders, the relation
.. math:: H^{(1)}_{-v}(z) = H^{(1)}_v(z) \exp(\imath\pi v)
is used.
See also
--------
hankel1e : this function with leading exponential behavior stripped off.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "hankel1e",
r"""
hankel1e(v, z)
Exponentially scaled Hankel function of the first kind
Defined as::
hankel1e(v, z) = hankel1(v, z) * exp(-1j * z)
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
out : Values of the exponentially scaled Hankel function.
Notes
-----
A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the
computation using the relation,
.. math:: H^{(1)}_v(z) = \frac{2}{\imath\pi} \exp(-\imath \pi v/2) K_v(z \exp(-\imath\pi/2))
where :math:`K_v` is the modified Bessel function of the second kind.
For negative orders, the relation
.. math:: H^{(1)}_{-v}(z) = H^{(1)}_v(z) \exp(\imath\pi v)
is used.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "hankel2",
r"""
hankel2(v, z)
Hankel function of the second kind
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
out : Values of the Hankel function of the second kind.
Notes
-----
A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the
computation using the relation,
.. math:: H^{(2)}_v(z) = -\frac{2}{\imath\pi} \exp(\imath \pi v/2) K_v(z \exp(\imath\pi/2))
where :math:`K_v` is the modified Bessel function of the second kind.
For negative orders, the relation
.. math:: H^{(2)}_{-v}(z) = H^{(2)}_v(z) \exp(-\imath\pi v)
is used.
See also
--------
hankel2e : this function with leading exponential behavior stripped off.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "hankel2e",
r"""
hankel2e(v, z)
Exponentially scaled Hankel function of the second kind
Defined as::
hankel2e(v, z) = hankel2(v, z) * exp(1j * z)
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
out : Values of the exponentially scaled Hankel function of the second kind.
Notes
-----
A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the
computation using the relation,
.. math:: H^{(2)}_v(z) = -\frac{2}{\imath\pi} \exp(\frac{\imath \pi v}{2}) K_v(z exp(\frac{\imath\pi}{2}))
where :math:`K_v` is the modified Bessel function of the second kind.
For negative orders, the relation
.. math:: H^{(2)}_{-v}(z) = H^{(2)}_v(z) \exp(-\imath\pi v)
is used.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "huber",
r"""
huber(delta, r)
Huber loss function.
.. math:: \text{huber}(\delta, r) = \begin{cases} \infty & \delta < 0 \\ \frac{1}{2}r^2 & 0 \le \delta, | r | \le \delta \\ \delta ( |r| - \frac{1}{2}\delta ) & \text{otherwise} \end{cases}
Parameters
----------
delta : ndarray
Input array, indicating the quadratic vs. linear loss changepoint.
r : ndarray
Input array, possibly representing residuals.
Returns
-------
res : ndarray
The computed Huber loss function values.
Notes
-----
This function is convex in r.
.. versionadded:: 0.15.0
""")
add_newdoc("scipy.special", "hyp0f1",
r"""
hyp0f1(v, x)
Confluent hypergeometric limit function 0F1.
Parameters
----------
v, z : array_like
Input values.
Returns
-------
hyp0f1 : ndarray
The confluent hypergeometric limit function.
Notes
-----
This function is defined as:
.. math:: _0F_1(v, z) = \sum_{k=0}^{\infty}\frac{z^k}{(v)_k k!}.
It's also the limit as :math:`q \to \infty` of :math:`_1F_1(q; v; z/q)`,
and satisfies the differential equation :math:`f''(z) + vf'(z) = f(z)`.
""")
add_newdoc("scipy.special", "hyp1f1",
"""
hyp1f1(a, b, x)
Confluent hypergeometric function 1F1(a, b; x)
""")
add_newdoc("scipy.special", "hyp1f2",
"""
hyp1f2(a, b, c, x)
Hypergeometric function 1F2 and error estimate
Returns
-------
y
Value of the function
err
Error estimate
""")
add_newdoc("scipy.special", "hyp2f0",
"""
hyp2f0(a, b, x, type)
Hypergeometric function 2F0 in y and an error estimate
The parameter `type` determines a convergence factor and can be
either 1 or 2.
Returns
-------
y
Value of the function
err
Error estimate
""")
add_newdoc("scipy.special", "hyp2f1",
"""
hyp2f1(a, b, c, z)
Gauss hypergeometric function 2F1(a, b; c; z).
""")
add_newdoc("scipy.special", "hyp3f0",
"""
hyp3f0(a, b, c, x)
Hypergeometric function 3F0 in y and an error estimate
Returns
-------
y
Value of the function
err
Error estimate
""")
add_newdoc("scipy.special", "hyperu",
"""
hyperu(a, b, x)
Confluent hypergeometric function U(a, b, x) of the second kind
""")
add_newdoc("scipy.special", "i0",
r"""
i0(x)
Modified Bessel function of order 0.
Defined as,
.. math::
I_0(x) = \sum_{k=0}^\infty \frac{(x^2/4)^k}{(k!)^2} = J_0(\imath x),
where :math:`J_0` is the Bessel function of the first kind of order 0.
Parameters
----------
x : array_like
Argument (float)
Returns
-------
I : ndarray
Value of the modified Bessel function of order 0 at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 8] and (8, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `i0`.
See also
--------
iv
i0e
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "i0e",
"""
i0e(x)
Exponentially scaled modified Bessel function of order 0.
Defined as::
i0e(x) = exp(-abs(x)) * i0(x).
Parameters
----------
x : array_like
Argument (float)
Returns
-------
I : ndarray
Value of the exponentially scaled modified Bessel function of order 0
at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 8] and (8, infinity).
Chebyshev polynomial expansions are employed in each interval. The
polynomial expansions used are the same as those in `i0`, but
they are not multiplied by the dominant exponential factor.
This function is a wrapper for the Cephes [1]_ routine `i0e`.
See also
--------
iv
i0
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "i1",
r"""
i1(x)
Modified Bessel function of order 1.
Defined as,
.. math::
I_1(x) = \frac{1}{2}x \sum_{k=0}^\infty \frac{(x^2/4)^k}{k! (k + 1)!}
= -\imath J_1(\imath x),
where :math:`J_1` is the Bessel function of the first kind of order 1.
Parameters
----------
x : array_like
Argument (float)
Returns
-------
I : ndarray
Value of the modified Bessel function of order 1 at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 8] and (8, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `i1`.
See also
--------
iv
i1e
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "i1e",
"""
i1e(x)
Exponentially scaled modified Bessel function of order 1.
Defined as::
i1e(x) = exp(-abs(x)) * i1(x)
Parameters
----------
x : array_like
Argument (float)
Returns
-------
I : ndarray
Value of the exponentially scaled modified Bessel function of order 1
at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 8] and (8, infinity).
Chebyshev polynomial expansions are employed in each interval. The
polynomial expansions used are the same as those in `i1`, but
they are not multiplied by the dominant exponential factor.
This function is a wrapper for the Cephes [1]_ routine `i1e`.
See also
--------
iv
i1
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "_igam_fac",
"""
Internal function, do not use.
""")
add_newdoc("scipy.special", "it2i0k0",
"""
it2i0k0(x)
Integrals related to modified Bessel functions of order 0
Returns
-------
ii0
``integral((i0(t)-1)/t, t=0..x)``
ik0
``int(k0(t)/t, t=x..inf)``
""")
add_newdoc("scipy.special", "it2j0y0",
"""
it2j0y0(x)
Integrals related to Bessel functions of order 0
Returns
-------
ij0
``integral((1-j0(t))/t, t=0..x)``
iy0
``integral(y0(t)/t, t=x..inf)``
""")
add_newdoc("scipy.special", "it2struve0",
r"""
it2struve0(x)
Integral related to the Struve function of order 0.
Returns the integral,
.. math::
\int_x^\infty \frac{H_0(t)}{t}\,dt
where :math:`H_0` is the Struve function of order 0.
Parameters
----------
x : array_like
Lower limit of integration.
Returns
-------
I : ndarray
The value of the integral.
See also
--------
struve
Notes
-----
Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
Jin [1]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
""")
add_newdoc("scipy.special", "itairy",
"""
itairy(x)
Integrals of Airy functions
Calculates the integrals of Airy functions from 0 to `x`.
Parameters
----------
x: array_like
Upper limit of integration (float).
Returns
-------
Apt
Integral of Ai(t) from 0 to x.
Bpt
Integral of Bi(t) from 0 to x.
Ant
Integral of Ai(-t) from 0 to x.
Bnt
Integral of Bi(-t) from 0 to x.
Notes
-----
Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
Jin [1]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
""")
add_newdoc("scipy.special", "iti0k0",
"""
iti0k0(x)
Integrals of modified Bessel functions of order 0
Returns simple integrals from 0 to `x` of the zeroth order modified
Bessel functions `i0` and `k0`.
Returns
-------
ii0, ik0
""")
add_newdoc("scipy.special", "itj0y0",
"""
itj0y0(x)
Integrals of Bessel functions of order 0
Returns simple integrals from 0 to `x` of the zeroth order Bessel
functions `j0` and `y0`.
Returns
-------
ij0, iy0
""")
add_newdoc("scipy.special", "itmodstruve0",
r"""
itmodstruve0(x)
Integral of the modified Struve function of order 0.
.. math::
I = \int_0^x L_0(t)\,dt
Parameters
----------
x : array_like
Upper limit of integration (float).
Returns
-------
I : ndarray
The integral of :math:`L_0` from 0 to `x`.
Notes
-----
Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
Jin [1]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
""")
add_newdoc("scipy.special", "itstruve0",
r"""
itstruve0(x)
Integral of the Struve function of order 0.
.. math::
I = \int_0^x H_0(t)\,dt
Parameters
----------
x : array_like
Upper limit of integration (float).
Returns
-------
I : ndarray
The integral of :math:`H_0` from 0 to `x`.
See also
--------
struve
Notes
-----
Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
Jin [1]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
""")
add_newdoc("scipy.special", "iv",
r"""
iv(v, z)
Modified Bessel function of the first kind of real order.
Parameters
----------
v : array_like
Order. If `z` is of real type and negative, `v` must be integer
valued.
z : array_like of float or complex
Argument.
Returns
-------
out : ndarray
Values of the modified Bessel function.
Notes
-----
For real `z` and :math:`v \in [-50, 50]`, the evaluation is carried out
using Temme's method [1]_. For larger orders, uniform asymptotic
expansions are applied.
For complex `z` and positive `v`, the AMOS [2]_ `zbesi` routine is
called. It uses a power series for small `z`, the asymptitic expansion
for large `abs(z)`, the Miller algorithm normalized by the Wronskian
and a Neumann series for intermediate magnitudes, and the uniform
asymptitic expansions for :math:`I_v(z)` and :math:`J_v(z)` for large
orders. Backward recurrence is used to generate sequences or reduce
orders when necessary.
The calculations above are done in the right half plane and continued
into the left half plane by the formula,
.. math:: I_v(z \exp(\pm\imath\pi)) = \exp(\pm\pi v) I_v(z)
(valid when the real part of `z` is positive). For negative `v`, the
formula
.. math:: I_{-v}(z) = I_v(z) + \frac{2}{\pi} \sin(\pi v) K_v(z)
is used, where :math:`K_v(z)` is the modified Bessel function of the
second kind, evaluated using the AMOS routine `zbesk`.
See also
--------
kve : This function with leading exponential behavior stripped off.
References
----------
.. [1] Temme, Journal of Computational Physics, vol 21, 343 (1976)
.. [2] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "ive",
r"""
ive(v, z)
Exponentially scaled modified Bessel function of the first kind
Defined as::
ive(v, z) = iv(v, z) * exp(-abs(z.real))
Parameters
----------
v : array_like of float
Order.
z : array_like of float or complex
Argument.
Returns
-------
out : ndarray
Values of the exponentially scaled modified Bessel function.
Notes
-----
For positive `v`, the AMOS [1]_ `zbesi` routine is called. It uses a
power series for small `z`, the asymptitic expansion for large
`abs(z)`, the Miller algorithm normalized by the Wronskian and a
Neumann series for intermediate magnitudes, and the uniform asymptitic
expansions for :math:`I_v(z)` and :math:`J_v(z)` for large orders.
Backward recurrence is used to generate sequences or reduce orders when
necessary.
The calculations above are done in the right half plane and continued
into the left half plane by the formula,
.. math:: I_v(z \exp(\pm\imath\pi)) = \exp(\pm\pi v) I_v(z)
(valid when the real part of `z` is positive). For negative `v`, the
formula
.. math:: I_{-v}(z) = I_v(z) + \frac{2}{\pi} \sin(\pi v) K_v(z)
is used, where :math:`K_v(z)` is the modified Bessel function of the
second kind, evaluated using the AMOS routine `zbesk`.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "j0",
r"""
j0(x)
Bessel function of the first kind of order 0.
Parameters
----------
x : array_like
Argument (float).
Returns
-------
J : ndarray
Value of the Bessel function of the first kind of order 0 at `x`.
Notes
-----
The domain is divided into the intervals [0, 5] and (5, infinity). In the
first interval the following rational approximation is used:
.. math::
J_0(x) \approx (w - r_1^2)(w - r_2^2) \frac{P_3(w)}{Q_8(w)},
where :math:`w = x^2` and :math:`r_1`, :math:`r_2` are the zeros of
:math:`J_0`, and :math:`P_3` and :math:`Q_8` are polynomials of degrees 3
and 8, respectively.
In the second interval, the Hankel asymptotic expansion is employed with
two rational functions of degree 6/6 and 7/7.
This function is a wrapper for the Cephes [1]_ routine `j0`.
See also
--------
jv : Bessel function of real order and complex argument.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "j1",
"""
j1(x)
Bessel function of the first kind of order 1.
Parameters
----------
x : array_like
Argument (float).
Returns
-------
J : ndarray
Value of the Bessel function of the first kind of order 1 at `x`.
Notes
-----
The domain is divided into the intervals [0, 8] and (8, infinity). In the
first interval a 24 term Chebyshev expansion is used. In the second, the
asymptotic trigonometric representation is employed using two rational
functions of degree 5/5.
This function is a wrapper for the Cephes [1]_ routine `j1`.
See also
--------
jv
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "jn",
"""
jn(n, x)
Bessel function of the first kind of integer order and real argument.
Notes
-----
`jn` is an alias of `jv`.
See also
--------
jv
""")
add_newdoc("scipy.special", "jv",
r"""
jv(v, z)
Bessel function of the first kind of real order and complex argument.
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
J : ndarray
Value of the Bessel function, :math:`J_v(z)`.
Notes
-----
For positive `v` values, the computation is carried out using the AMOS
[1]_ `zbesj` routine, which exploits the connection to the modified
Bessel function :math:`I_v`,
.. math::
J_v(z) = \exp(n\pi\imath/2) I_v(-\imath z)\qquad (\Im z > 0)
J_v(z) = \exp(-n\pi\imath/2) I_v(\imath z)\qquad (\Im z < 0)
For negative `v` values the formula,
.. math:: J_{-v}(z) = J_v(z) \cos(\pi v) - Y_v(z) \sin(\pi v)
is used, where :math:`Y_v(z)` is the Bessel function of the second
kind, computed using the AMOS routine `zbesy`. Note that the second
term is exactly zero for integer `v`; to improve accuracy the second
term is explicitly omitted for `v` values such that `v = floor(v)`.
See also
--------
jve : :math:`J_v` with leading exponential behavior stripped off.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "jve",
r"""
jve(v, z)
Exponentially scaled Bessel function of order `v`.
Defined as::
jve(v, z) = jv(v, z) * exp(-abs(z.imag))
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
J : ndarray
Value of the exponentially scaled Bessel function.
Notes
-----
For positive `v` values, the computation is carried out using the AMOS
[1]_ `zbesj` routine, which exploits the connection to the modified
Bessel function :math:`I_v`,
.. math::
J_v(z) = \exp(n\pi\imath/2) I_v(-\imath z)\qquad (\Im z > 0)
J_v(z) = \exp(-n\pi\imath/2) I_v(\imath z)\qquad (\Im z < 0)
For negative `v` values the formula,
.. math:: J_{-v}(z) = J_v(z) \cos(\pi v) - Y_v(z) \sin(\pi v)
is used, where :math:`Y_v(z)` is the Bessel function of the second
kind, computed using the AMOS routine `zbesy`. Note that the second
term is exactly zero for integer `v`; to improve accuracy the second
term is explicitly omitted for `v` values such that `v = floor(v)`.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "k0",
r"""
k0(x)
Modified Bessel function of the second kind of order 0, :math:`K_0`.
This function is also sometimes referred to as the modified Bessel
function of the third kind of order 0.
Parameters
----------
x : array_like
Argument (float).
Returns
-------
K : ndarray
Value of the modified Bessel function :math:`K_0` at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 2] and (2, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `k0`.
See also
--------
kv
k0e
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "k0e",
"""
k0e(x)
Exponentially scaled modified Bessel function K of order 0
Defined as::
k0e(x) = exp(x) * k0(x).
Parameters
----------
x : array_like
Argument (float)
Returns
-------
K : ndarray
Value of the exponentially scaled modified Bessel function K of order
0 at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 2] and (2, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `k0e`.
See also
--------
kv
k0
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "k1",
"""
k1(x)
Modified Bessel function of the second kind of order 1, :math:`K_1(x)`.
Parameters
----------
x : array_like
Argument (float)
Returns
-------
K : ndarray
Value of the modified Bessel function K of order 1 at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 2] and (2, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `k1`.
See also
--------
kv
k1e
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "k1e",
"""
k1e(x)
Exponentially scaled modified Bessel function K of order 1
Defined as::
k1e(x) = exp(x) * k1(x)
Parameters
----------
x : array_like
Argument (float)
Returns
-------
K : ndarray
Value of the exponentially scaled modified Bessel function K of order
1 at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 2] and (2, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `k1e`.
See also
--------
kv
k1
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "kei",
"""
kei(x)
Kelvin function ker
""")
add_newdoc("scipy.special", "keip",
"""
keip(x)
Derivative of the Kelvin function kei
""")
add_newdoc("scipy.special", "kelvin",
"""
kelvin(x)
Kelvin functions as complex numbers
Returns
-------
Be, Ke, Bep, Kep
The tuple (Be, Ke, Bep, Kep) contains complex numbers
representing the real and imaginary Kelvin functions and their
derivatives evaluated at `x`. For example, kelvin(x)[0].real =
ber x and kelvin(x)[0].imag = bei x with similar relationships
for ker and kei.
""")
add_newdoc("scipy.special", "ker",
"""
ker(x)
Kelvin function ker
""")
add_newdoc("scipy.special", "kerp",
"""
kerp(x)
Derivative of the Kelvin function ker
""")
add_newdoc("scipy.special", "kl_div",
r"""
kl_div(x, y)
Elementwise function for computing Kullback-Leibler divergence.
.. math:: \mathrm{kl\_div}(x, y) = \begin{cases} x \log(x / y) - x + y & x > 0, y > 0 \\ y & x = 0, y \ge 0 \\ \infty & \text{otherwise} \end{cases}
Parameters
----------
x : ndarray
First input array.
y : ndarray
Second input array.
Returns
-------
res : ndarray
Output array.
See Also
--------
entr, rel_entr
Notes
-----
This function is non-negative and is jointly convex in `x` and `y`.
.. versionadded:: 0.15.0
""")
add_newdoc("scipy.special", "kn",
r"""
kn(n, x)
Modified Bessel function of the second kind of integer order `n`
Returns the modified Bessel function of the second kind for integer order
`n` at real `z`.
These are also sometimes called functions of the third kind, Basset
functions, or Macdonald functions.
Parameters
----------
n : array_like of int
Order of Bessel functions (floats will truncate with a warning)
z : array_like of float
Argument at which to evaluate the Bessel functions
Returns
-------
out : ndarray
The results
Notes
-----
Wrapper for AMOS [1]_ routine `zbesk`. For a discussion of the
algorithm used, see [2]_ and the references therein.
See Also
--------
kv : Same function, but accepts real order and complex argument
kvp : Derivative of this function
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
.. [2] Donald E. Amos, "Algorithm 644: A portable package for Bessel
functions of a complex argument and nonnegative order", ACM
TOMS Vol. 12 Issue 3, Sept. 1986, p. 265
Examples
--------
Plot the function of several orders for real input:
>>> from scipy.special import kn
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(0, 5, 1000)
>>> for N in range(6):
... plt.plot(x, kn(N, x), label='$K_{}(x)$'.format(N))
>>> plt.ylim(0, 10)
>>> plt.legend()
>>> plt.title(r'Modified Bessel function of the second kind $K_n(x)$')
>>> plt.show()
Calculate for a single value at multiple orders:
>>> kn([4, 5, 6], 1)
array([ 44.23241585, 360.9605896 , 3653.83831186])
""")
add_newdoc("scipy.special", "kolmogi",
"""
kolmogi(p)
Inverse function to kolmogorov
Returns y such that ``kolmogorov(y) == p``.
""")
add_newdoc("scipy.special", "kolmogorov",
"""
kolmogorov(y)
Complementary cumulative distribution function of Kolmogorov distribution
Returns the complementary cumulative distribution function of
Kolmogorov's limiting distribution (Kn* for large n) of a
two-sided test for equality between an empirical and a theoretical
distribution. It is equal to the (limit as n->infinity of the)
probability that sqrt(n) * max absolute deviation > y.
""")
add_newdoc("scipy.special", "kv",
r"""
kv(v, z)
Modified Bessel function of the second kind of real order `v`
Returns the modified Bessel function of the second kind for real order
`v` at complex `z`.
These are also sometimes called functions of the third kind, Basset
functions, or Macdonald functions. They are defined as those solutions
of the modified Bessel equation for which,
.. math::
K_v(x) \sim \sqrt{\pi/(2x)} \exp(-x)
as :math:`x \to \infty` [3]_.
Parameters
----------
v : array_like of float
Order of Bessel functions
z : array_like of complex
Argument at which to evaluate the Bessel functions
Returns
-------
out : ndarray
The results. Note that input must be of complex type to get complex
output, e.g. ``kv(3, -2+0j)`` instead of ``kv(3, -2)``.
Notes
-----
Wrapper for AMOS [1]_ routine `zbesk`. For a discussion of the
algorithm used, see [2]_ and the references therein.
See Also
--------
kve : This function with leading exponential behavior stripped off.
kvp : Derivative of this function
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
.. [2] Donald E. Amos, "Algorithm 644: A portable package for Bessel
functions of a complex argument and nonnegative order", ACM
TOMS Vol. 12 Issue 3, Sept. 1986, p. 265
.. [3] NIST Digital Library of Mathematical Functions,
Eq. 10.25.E3. http://dlmf.nist.gov/10.25.E3
Examples
--------
Plot the function of several orders for real input:
>>> from scipy.special import kv
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(0, 5, 1000)
>>> for N in np.linspace(0, 6, 5):
... plt.plot(x, kv(N, x), label='$K_{{{}}}(x)$'.format(N))
>>> plt.ylim(0, 10)
>>> plt.legend()
>>> plt.title(r'Modified Bessel function of the second kind $K_\nu(x)$')
>>> plt.show()
Calculate for a single value at multiple orders:
>>> kv([4, 4.5, 5], 1+2j)
array([ 0.1992+2.3892j, 2.3493+3.6j , 7.2827+3.8104j])
""")
add_newdoc("scipy.special", "kve",
r"""
kve(v, z)
Exponentially scaled modified Bessel function of the second kind.
Returns the exponentially scaled, modified Bessel function of the
second kind (sometimes called the third kind) for real order `v` at
complex `z`::
kve(v, z) = kv(v, z) * exp(z)
Parameters
----------
v : array_like of float
Order of Bessel functions
z : array_like of complex
Argument at which to evaluate the Bessel functions
Returns
-------
out : ndarray
The exponentially scaled modified Bessel function of the second kind.
Notes
-----
Wrapper for AMOS [1]_ routine `zbesk`. For a discussion of the
algorithm used, see [2]_ and the references therein.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
.. [2] Donald E. Amos, "Algorithm 644: A portable package for Bessel
functions of a complex argument and nonnegative order", ACM
TOMS Vol. 12 Issue 3, Sept. 1986, p. 265
""")
add_newdoc("scipy.special", "_lanczos_sum_expg_scaled",
"""
Internal function, do not use.
""")
add_newdoc("scipy.special", "_lgam1p",
"""
Internal function, do not use.
""")
add_newdoc("scipy.special", "log1p",
"""
log1p(x)
Calculates log(1+x) for use when `x` is near zero
""")
add_newdoc("scipy.special", "_log1pmx",
"""
Internal function, do not use.
""")
add_newdoc('scipy.special', 'logit',
"""
logit(x)
Logit ufunc for ndarrays.
The logit function is defined as logit(p) = log(p/(1-p)).
Note that logit(0) = -inf, logit(1) = inf, and logit(p)
for p<0 or p>1 yields nan.
Parameters
----------
x : ndarray
The ndarray to apply logit to element-wise.
Returns
-------
out : ndarray
An ndarray of the same shape as x. Its entries
are logit of the corresponding entry of x.
Notes
-----
As a ufunc logit takes a number of optional
keyword arguments. For more information
see `ufuncs <https://docs.scipy.org/doc/numpy/reference/ufuncs.html>`_
.. versionadded:: 0.10.0
""")
add_newdoc("scipy.special", "lpmv",
r"""
lpmv(m, v, x)
Associated Legendre function of integer order and real degree.
Defined as
.. math::
P_v^m = (-1)^m (1 - x^2)^{m/2} \frac{d^m}{dx^m} P_v(x)
where
.. math::
P_v = \sum_{k = 0}^\infty \frac{(-v)_k (v + 1)_k}{(k!)^2}
\left(\frac{1 - x}{2}\right)^k
is the Legendre function of the first kind. Here :math:`(\cdot)_k`
is the Pochhammer symbol; see `poch`.
Parameters
----------
m : array_like
Order (int or float). If passed a float not equal to an
integer the function returns NaN.
v : array_like
Degree (float).
x : array_like
Argument (float). Must have ``|x| <= 1``.
Returns
-------
pmv : ndarray
Value of the associated Legendre function.
See Also
--------
lpmn : Compute the associated Legendre function for all orders
``0, ..., m`` and degrees ``0, ..., n``.
clpmn : Compute the associated Legendre function at complex
arguments.
Notes
-----
Note that this implementation includes the Condon-Shortley phase.
References
----------
.. [1] Zhang, Jin, "Computation of Special Functions", John Wiley
and Sons, Inc, 1996.
""")
add_newdoc("scipy.special", "mathieu_a",
"""
mathieu_a(m, q)
Characteristic value of even Mathieu functions
Returns the characteristic value for the even solution,
``ce_m(z, q)``, of Mathieu's equation.
""")
add_newdoc("scipy.special", "mathieu_b",
"""
mathieu_b(m, q)
Characteristic value of odd Mathieu functions
Returns the characteristic value for the odd solution,
``se_m(z, q)``, of Mathieu's equation.
""")
add_newdoc("scipy.special", "mathieu_cem",
"""
mathieu_cem(m, q, x)
Even Mathieu function and its derivative
Returns the even Mathieu function, ``ce_m(x, q)``, of order `m` and
parameter `q` evaluated at `x` (given in degrees). Also returns the
derivative with respect to `x` of ce_m(x, q)
Parameters
----------
m
Order of the function
q
Parameter of the function
x
Argument of the function, *given in degrees, not radians*
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modcem1",
"""
mathieu_modcem1(m, q, x)
Even modified Mathieu function of the first kind and its derivative
Evaluates the even modified Mathieu function of the first kind,
``Mc1m(x, q)``, and its derivative at `x` for order `m` and parameter
`q`.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modcem2",
"""
mathieu_modcem2(m, q, x)
Even modified Mathieu function of the second kind and its derivative
Evaluates the even modified Mathieu function of the second kind,
Mc2m(x, q), and its derivative at `x` (given in degrees) for order `m`
and parameter `q`.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modsem1",
"""
mathieu_modsem1(m, q, x)
Odd modified Mathieu function of the first kind and its derivative
Evaluates the odd modified Mathieu function of the first kind,
Ms1m(x, q), and its derivative at `x` (given in degrees) for order `m`
and parameter `q`.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modsem2",
"""
mathieu_modsem2(m, q, x)
Odd modified Mathieu function of the second kind and its derivative
Evaluates the odd modified Mathieu function of the second kind,
Ms2m(x, q), and its derivative at `x` (given in degrees) for order `m`
and parameter q.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_sem",
"""
mathieu_sem(m, q, x)
Odd Mathieu function and its derivative
Returns the odd Mathieu function, se_m(x, q), of order `m` and
parameter `q` evaluated at `x` (given in degrees). Also returns the
derivative with respect to `x` of se_m(x, q).
Parameters
----------
m
Order of the function
q
Parameter of the function
x
Argument of the function, *given in degrees, not radians*.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "modfresnelm",
"""
modfresnelm(x)
Modified Fresnel negative integrals
Returns
-------
fm
Integral ``F_-(x)``: ``integral(exp(-1j*t*t), t=x..inf)``
km
Integral ``K_-(x)``: ``1/sqrt(pi)*exp(1j*(x*x+pi/4))*fp``
""")
add_newdoc("scipy.special", "modfresnelp",
"""
modfresnelp(x)
Modified Fresnel positive integrals
Returns
-------
fp
Integral ``F_+(x)``: ``integral(exp(1j*t*t), t=x..inf)``
kp
Integral ``K_+(x)``: ``1/sqrt(pi)*exp(-1j*(x*x+pi/4))*fp``
""")
add_newdoc("scipy.special", "modstruve",
r"""
modstruve(v, x)
Modified Struve function.
Return the value of the modified Struve function of order `v` at `x`. The
modified Struve function is defined as,
.. math::
L_v(x) = -\imath \exp(-\pi\imath v/2) H_v(x),
where :math:`H_v` is the Struve function.
Parameters
----------
v : array_like
Order of the modified Struve function (float).
x : array_like
Argument of the Struve function (float; must be positive unless `v` is
an integer).
Returns
-------
L : ndarray
Value of the modified Struve function of order `v` at `x`.
Notes
-----
Three methods discussed in [1]_ are used to evaluate the function:
- power series
- expansion in Bessel functions (if :math:`|z| < |v| + 20`)
- asymptotic large-z expansion (if :math:`z \geq 0.7v + 12`)
Rounding errors are estimated based on the largest terms in the sums, and
the result associated with the smallest error is returned.
See also
--------
struve
References
----------
.. [1] NIST Digital Library of Mathematical Functions
http://dlmf.nist.gov/11
""")
add_newdoc("scipy.special", "nbdtr",
r"""
nbdtr(k, n, p)
Negative binomial cumulative distribution function.
Returns the sum of the terms 0 through `k` of the negative binomial
distribution probability mass function,
.. math::
F = \sum_{j=0}^k {{n + j - 1}\choose{j}} p^n (1 - p)^j.
In a sequence of Bernoulli trials with individual success probabilities
`p`, this is the probability that `k` or fewer failures precede the nth
success.
Parameters
----------
k : array_like
The maximum number of allowed failures (nonnegative int).
n : array_like
The target number of successes (positive int).
p : array_like
Probability of success in a single event (float).
Returns
-------
F : ndarray
The probability of `k` or fewer failures before `n` successes in a
sequence of events with individual success probability `p`.
See also
--------
nbdtrc
Notes
-----
If floating point values are passed for `k` or `n`, they will be truncated
to integers.
The terms are not summed directly; instead the regularized incomplete beta
function is employed, according to the formula,
.. math::
\mathrm{nbdtr}(k, n, p) = I_{p}(n, k + 1).
Wrapper for the Cephes [1]_ routine `nbdtr`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "nbdtrc",
r"""
nbdtrc(k, n, p)
Negative binomial survival function.
Returns the sum of the terms `k + 1` to infinity of the negative binomial
distribution probability mass function,
.. math::
F = \sum_{j=k + 1}^\infty {{n + j - 1}\choose{j}} p^n (1 - p)^j.
In a sequence of Bernoulli trials with individual success probabilities
`p`, this is the probability that more than `k` failures precede the nth
success.
Parameters
----------
k : array_like
The maximum number of allowed failures (nonnegative int).
n : array_like
The target number of successes (positive int).
p : array_like
Probability of success in a single event (float).
Returns
-------
F : ndarray
The probability of `k + 1` or more failures before `n` successes in a
sequence of events with individual success probability `p`.
Notes
-----
If floating point values are passed for `k` or `n`, they will be truncated
to integers.
The terms are not summed directly; instead the regularized incomplete beta
function is employed, according to the formula,
.. math::
\mathrm{nbdtrc}(k, n, p) = I_{1 - p}(k + 1, n).
Wrapper for the Cephes [1]_ routine `nbdtrc`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "nbdtri",
"""
nbdtri(k, n, y)
Inverse of `nbdtr` vs `p`.
Returns the inverse with respect to the parameter `p` of
`y = nbdtr(k, n, p)`, the negative binomial cumulative distribution
function.
Parameters
----------
k : array_like
The maximum number of allowed failures (nonnegative int).
n : array_like
The target number of successes (positive int).
y : array_like
The probability of `k` or fewer failures before `n` successes (float).
Returns
-------
p : ndarray
Probability of success in a single event (float) such that
`nbdtr(k, n, p) = y`.
See also
--------
nbdtr : Cumulative distribution function of the negative binomial.
nbdtrik : Inverse with respect to `k` of `nbdtr(k, n, p)`.
nbdtrin : Inverse with respect to `n` of `nbdtr(k, n, p)`.
Notes
-----
Wrapper for the Cephes [1]_ routine `nbdtri`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "nbdtrik",
r"""
nbdtrik(y, n, p)
Inverse of `nbdtr` vs `k`.
Returns the inverse with respect to the parameter `k` of
`y = nbdtr(k, n, p)`, the negative binomial cumulative distribution
function.
Parameters
----------
y : array_like
The probability of `k` or fewer failures before `n` successes (float).
n : array_like
The target number of successes (positive int).
p : array_like
Probability of success in a single event (float).
Returns
-------
k : ndarray
The maximum number of allowed failures such that `nbdtr(k, n, p) = y`.
See also
--------
nbdtr : Cumulative distribution function of the negative binomial.
nbdtri : Inverse with respect to `p` of `nbdtr(k, n, p)`.
nbdtrin : Inverse with respect to `n` of `nbdtr(k, n, p)`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfnbn`.
Formula 26.5.26 of [2]_,
.. math::
\sum_{j=k + 1}^\infty {{n + j - 1}\choose{j}} p^n (1 - p)^j = I_{1 - p}(k + 1, n),
is used to reduce calculation of the cumulative distribution function to
that of a regularized incomplete beta :math:`I`.
Computation of `k` involves a seach for a value that produces the desired
value of `y`. The search relies on the monotinicity of `y` with `k`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
""")
add_newdoc("scipy.special", "nbdtrin",
r"""
nbdtrin(k, y, p)
Inverse of `nbdtr` vs `n`.
Returns the inverse with respect to the parameter `n` of
`y = nbdtr(k, n, p)`, the negative binomial cumulative distribution
function.
Parameters
----------
k : array_like
The maximum number of allowed failures (nonnegative int).
y : array_like
The probability of `k` or fewer failures before `n` successes (float).
p : array_like
Probability of success in a single event (float).
Returns
-------
n : ndarray
The number of successes `n` such that `nbdtr(k, n, p) = y`.
See also
--------
nbdtr : Cumulative distribution function of the negative binomial.
nbdtri : Inverse with respect to `p` of `nbdtr(k, n, p)`.
nbdtrik : Inverse with respect to `k` of `nbdtr(k, n, p)`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfnbn`.
Formula 26.5.26 of [2]_,
.. math::
\sum_{j=k + 1}^\infty {{n + j - 1}\choose{j}} p^n (1 - p)^j = I_{1 - p}(k + 1, n),
is used to reduce calculation of the cumulative distribution function to
that of a regularized incomplete beta :math:`I`.
Computation of `n` involves a seach for a value that produces the desired
value of `y`. The search relies on the monotinicity of `y` with `n`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
""")
add_newdoc("scipy.special", "ncfdtr",
r"""
ncfdtr(dfn, dfd, nc, f)
Cumulative distribution function of the non-central F distribution.
The non-central F describes the distribution of,
.. math::
Z = \frac{X/d_n}{Y/d_d}
where :math:`X` and :math:`Y` are independently distributed, with
:math:`X` distributed non-central :math:`\chi^2` with noncentrality
parameter `nc` and :math:`d_n` degrees of freedom, and :math:`Y`
distributed :math:`\chi^2` with :math:`d_d` degrees of freedom.
Parameters
----------
dfn : array_like
Degrees of freedom of the numerator sum of squares. Range (0, inf).
dfd : array_like
Degrees of freedom of the denominator sum of squares. Range (0, inf).
nc : array_like
Noncentrality parameter. Should be in range (0, 1e4).
f : array_like
Quantiles, i.e. the upper limit of integration.
Returns
-------
cdf : float or ndarray
The calculated CDF. If all inputs are scalar, the return will be a
float. Otherwise it will be an array.
See Also
--------
ncdfdtri : Inverse CDF (iCDF) of the non-central F distribution.
ncdfdtridfd : Calculate dfd, given CDF and iCDF values.
ncdfdtridfn : Calculate dfn, given CDF and iCDF values.
ncdfdtrinc : Calculate noncentrality parameter, given CDF, iCDF, dfn, dfd.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdffnc`.
The cumulative distribution function is computed using Formula 26.6.20 of
[2]_:
.. math::
F(d_n, d_d, n_c, f) = \sum_{j=0}^\infty e^{-n_c/2} \frac{(n_c/2)^j}{j!} I_{x}(\frac{d_n}{2} + j, \frac{d_d}{2}),
where :math:`I` is the regularized incomplete beta function, and
:math:`x = f d_n/(f d_n + d_d)`.
The computation time required for this routine is proportional to the
noncentrality parameter `nc`. Very large values of this parameter can
consume immense computer resources. This is why the search range is
bounded by 10,000.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
Examples
--------
>>> from scipy import special
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
Plot the CDF of the non-central F distribution, for nc=0. Compare with the
F-distribution from scipy.stats:
>>> x = np.linspace(-1, 8, num=500)
>>> dfn = 3
>>> dfd = 2
>>> ncf_stats = stats.f.cdf(x, dfn, dfd)
>>> ncf_special = special.ncfdtr(dfn, dfd, 0, x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, ncf_stats, 'b-', lw=3)
>>> ax.plot(x, ncf_special, 'r-')
>>> plt.show()
""")
add_newdoc("scipy.special", "ncfdtri",
"""
ncfdtri(p, dfn, dfd, nc)
Inverse cumulative distribution function of the non-central F distribution.
See `ncfdtr` for more details.
""")
add_newdoc("scipy.special", "ncfdtridfd",
"""
ncfdtridfd(p, f, dfn, nc)
Calculate degrees of freedom (denominator) for the noncentral F-distribution.
See `ncfdtr` for more details.
Notes
-----
The value of the cumulative noncentral F distribution is not necessarily
monotone in either degrees of freedom. There thus may be two values that
provide a given CDF value. This routine assumes monotonicity and will
find an arbitrary one of the two values.
""")
add_newdoc("scipy.special", "ncfdtridfn",
"""
ncfdtridfn(p, f, dfd, nc)
Calculate degrees of freedom (numerator) for the noncentral F-distribution.
See `ncfdtr` for more details.
Notes
-----
The value of the cumulative noncentral F distribution is not necessarily
monotone in either degrees of freedom. There thus may be two values that
provide a given CDF value. This routine assumes monotonicity and will
find an arbitrary one of the two values.
""")
add_newdoc("scipy.special", "ncfdtrinc",
"""
ncfdtrinc(p, f, dfn, dfd)
Calculate non-centrality parameter for non-central F distribution.
See `ncfdtr` for more details.
""")
add_newdoc("scipy.special", "nctdtr",
"""
nctdtr(df, nc, t)
Cumulative distribution function of the non-central `t` distribution.
Parameters
----------
df : array_like
Degrees of freedom of the distribution. Should be in range (0, inf).
nc : array_like
Noncentrality parameter. Should be in range (-1e6, 1e6).
t : array_like
Quantiles, i.e. the upper limit of integration.
Returns
-------
cdf : float or ndarray
The calculated CDF. If all inputs are scalar, the return will be a
float. Otherwise it will be an array.
See Also
--------
nctdtrit : Inverse CDF (iCDF) of the non-central t distribution.
nctdtridf : Calculate degrees of freedom, given CDF and iCDF values.
nctdtrinc : Calculate non-centrality parameter, given CDF iCDF values.
Examples
--------
>>> from scipy import special
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
Plot the CDF of the non-central t distribution, for nc=0. Compare with the
t-distribution from scipy.stats:
>>> x = np.linspace(-5, 5, num=500)
>>> df = 3
>>> nct_stats = stats.t.cdf(x, df)
>>> nct_special = special.nctdtr(df, 0, x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, nct_stats, 'b-', lw=3)
>>> ax.plot(x, nct_special, 'r-')
>>> plt.show()
""")
add_newdoc("scipy.special", "nctdtridf",
"""
nctdtridf(p, nc, t)
Calculate degrees of freedom for non-central t distribution.
See `nctdtr` for more details.
Parameters
----------
p : array_like
CDF values, in range (0, 1].
nc : array_like
Noncentrality parameter. Should be in range (-1e6, 1e6).
t : array_like
Quantiles, i.e. the upper limit of integration.
""")
add_newdoc("scipy.special", "nctdtrinc",
"""
nctdtrinc(df, p, t)
Calculate non-centrality parameter for non-central t distribution.
See `nctdtr` for more details.
Parameters
----------
df : array_like
Degrees of freedom of the distribution. Should be in range (0, inf).
p : array_like
CDF values, in range (0, 1].
t : array_like
Quantiles, i.e. the upper limit of integration.
""")
add_newdoc("scipy.special", "nctdtrit",
"""
nctdtrit(df, nc, p)
Inverse cumulative distribution function of the non-central t distribution.
See `nctdtr` for more details.
Parameters
----------
df : array_like
Degrees of freedom of the distribution. Should be in range (0, inf).
nc : array_like
Noncentrality parameter. Should be in range (-1e6, 1e6).
p : array_like
CDF values, in range (0, 1].
""")
add_newdoc("scipy.special", "ndtr",
r"""
ndtr(x)
Gaussian cumulative distribution function.
Returns the area under the standard Gaussian probability
density function, integrated from minus infinity to `x`
.. math::
\frac{1}{\sqrt{2\pi}} \int_{-\infty}^x \exp(-t^2/2) dt
Parameters
----------
x : array_like, real or complex
Argument
Returns
-------
ndarray
The value of the normal CDF evaluated at `x`
See Also
--------
erf
erfc
scipy.stats.norm
log_ndtr
""")
add_newdoc("scipy.special", "nrdtrimn",
"""
nrdtrimn(p, x, std)
Calculate mean of normal distribution given other params.
Parameters
----------
p : array_like
CDF values, in range (0, 1].
x : array_like
Quantiles, i.e. the upper limit of integration.
std : array_like
Standard deviation.
Returns
-------
mn : float or ndarray
The mean of the normal distribution.
See Also
--------
nrdtrimn, ndtr
""")
add_newdoc("scipy.special", "nrdtrisd",
"""
nrdtrisd(p, x, mn)
Calculate standard deviation of normal distribution given other params.
Parameters
----------
p : array_like
CDF values, in range (0, 1].
x : array_like
Quantiles, i.e. the upper limit of integration.
mn : float or ndarray
The mean of the normal distribution.
Returns
-------
std : array_like
Standard deviation.
See Also
--------
nrdtristd, ndtr
""")
add_newdoc("scipy.special", "log_ndtr",
"""
log_ndtr(x)
Logarithm of Gaussian cumulative distribution function.
Returns the log of the area under the standard Gaussian probability
density function, integrated from minus infinity to `x`::
log(1/sqrt(2*pi) * integral(exp(-t**2 / 2), t=-inf..x))
Parameters
----------
x : array_like, real or complex
Argument
Returns
-------
ndarray
The value of the log of the normal CDF evaluated at `x`
See Also
--------
erf
erfc
scipy.stats.norm
ndtr
""")
add_newdoc("scipy.special", "ndtri",
"""
ndtri(y)
Inverse of `ndtr` vs x
Returns the argument x for which the area under the Gaussian
probability density function (integrated from minus infinity to `x`)
is equal to y.
""")
add_newdoc("scipy.special", "obl_ang1",
"""
obl_ang1(m, n, c, x)
Oblate spheroidal angular function of the first kind and its derivative
Computes the oblate spheroidal angular function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_ang1_cv",
"""
obl_ang1_cv(m, n, c, cv, x)
Oblate spheroidal angular function obl_ang1 for precomputed characteristic value
Computes the oblate spheroidal angular function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_cv",
"""
obl_cv(m, n, c)
Characteristic value of oblate spheroidal function
Computes the characteristic value of oblate spheroidal wave
functions of order `m`, `n` (n>=m) and spheroidal parameter `c`.
""")
add_newdoc("scipy.special", "obl_rad1",
"""
obl_rad1(m, n, c, x)
Oblate spheroidal radial function of the first kind and its derivative
Computes the oblate spheroidal radial function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_rad1_cv",
"""
obl_rad1_cv(m, n, c, cv, x)
Oblate spheroidal radial function obl_rad1 for precomputed characteristic value
Computes the oblate spheroidal radial function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_rad2",
"""
obl_rad2(m, n, c, x)
Oblate spheroidal radial function of the second kind and its derivative.
Computes the oblate spheroidal radial function of the second kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_rad2_cv",
"""
obl_rad2_cv(m, n, c, cv, x)
Oblate spheroidal radial function obl_rad2 for precomputed characteristic value
Computes the oblate spheroidal radial function of the second kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pbdv",
"""
pbdv(v, x)
Parabolic cylinder function D
Returns (d, dp) the parabolic cylinder function Dv(x) in d and the
derivative, Dv'(x) in dp.
Returns
-------
d
Value of the function
dp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pbvv",
"""
pbvv(v, x)
Parabolic cylinder function V
Returns the parabolic cylinder function Vv(x) in v and the
derivative, Vv'(x) in vp.
Returns
-------
v
Value of the function
vp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pbwa",
"""
pbwa(a, x)
Parabolic cylinder function W
Returns the parabolic cylinder function W(a, x) in w and the
derivative, W'(a, x) in wp.
.. warning::
May not be accurate for large (>5) arguments in a and/or x.
Returns
-------
w
Value of the function
wp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pdtr",
"""
pdtr(k, m)
Poisson cumulative distribution function
Returns the sum of the first `k` terms of the Poisson distribution:
sum(exp(-m) * m**j / j!, j=0..k) = gammaincc( k+1, m). Arguments
must both be positive and `k` an integer.
""")
add_newdoc("scipy.special", "pdtrc",
"""
pdtrc(k, m)
Poisson survival function
Returns the sum of the terms from k+1 to infinity of the Poisson
distribution: sum(exp(-m) * m**j / j!, j=k+1..inf) = gammainc(
k+1, m). Arguments must both be positive and `k` an integer.
""")
add_newdoc("scipy.special", "pdtri",
"""
pdtri(k, y)
Inverse to `pdtr` vs m
Returns the Poisson variable `m` such that the sum from 0 to `k` of
the Poisson density is equal to the given probability `y`:
calculated by gammaincinv(k+1, y). `k` must be a nonnegative
integer and `y` between 0 and 1.
""")
add_newdoc("scipy.special", "pdtrik",
"""
pdtrik(p, m)
Inverse to `pdtr` vs k
Returns the quantile k such that ``pdtr(k, m) = p``
""")
add_newdoc("scipy.special", "poch",
"""
poch(z, m)
Rising factorial (z)_m
The Pochhammer symbol (rising factorial), is defined as::
(z)_m = gamma(z + m) / gamma(z)
For positive integer `m` it reads::
(z)_m = z * (z + 1) * ... * (z + m - 1)
""")
add_newdoc("scipy.special", "pro_ang1",
"""
pro_ang1(m, n, c, x)
Prolate spheroidal angular function of the first kind and its derivative
Computes the prolate spheroidal angular function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_ang1_cv",
"""
pro_ang1_cv(m, n, c, cv, x)
Prolate spheroidal angular function pro_ang1 for precomputed characteristic value
Computes the prolate spheroidal angular function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_cv",
"""
pro_cv(m, n, c)
Characteristic value of prolate spheroidal function
Computes the characteristic value of prolate spheroidal wave
functions of order `m`, `n` (n>=m) and spheroidal parameter `c`.
""")
add_newdoc("scipy.special", "pro_rad1",
"""
pro_rad1(m, n, c, x)
Prolate spheroidal radial function of the first kind and its derivative
Computes the prolate spheroidal radial function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_rad1_cv",
"""
pro_rad1_cv(m, n, c, cv, x)
Prolate spheroidal radial function pro_rad1 for precomputed characteristic value
Computes the prolate spheroidal radial function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_rad2",
"""
pro_rad2(m, n, c, x)
Prolate spheroidal radial function of the secon kind and its derivative
Computes the prolate spheroidal radial function of the second kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_rad2_cv",
"""
pro_rad2_cv(m, n, c, cv, x)
Prolate spheroidal radial function pro_rad2 for precomputed characteristic value
Computes the prolate spheroidal radial function of the second kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pseudo_huber",
r"""
pseudo_huber(delta, r)
Pseudo-Huber loss function.
.. math:: \mathrm{pseudo\_huber}(\delta, r) = \delta^2 \left( \sqrt{ 1 + \left( \frac{r}{\delta} \right)^2 } - 1 \right)
Parameters
----------
delta : ndarray
Input array, indicating the soft quadratic vs. linear loss changepoint.
r : ndarray
Input array, possibly representing residuals.
Returns
-------
res : ndarray
The computed Pseudo-Huber loss function values.
Notes
-----
This function is convex in :math:`r`.
.. versionadded:: 0.15.0
""")
add_newdoc("scipy.special", "psi",
"""
psi(z, out=None)
The digamma function.
The logarithmic derivative of the gamma function evaluated at ``z``.
Parameters
----------
z : array_like
Real or complex argument.
out : ndarray, optional
Array for the computed values of ``psi``.
Returns
-------
digamma : ndarray
Computed values of ``psi``.
Notes
-----
For large values not close to the negative real axis ``psi`` is
computed using the asymptotic series (5.11.2) from [1]_. For small
arguments not close to the negative real axis the recurrence
relation (5.5.2) from [1]_ is used until the argument is large
enough to use the asymptotic series. For values close to the
negative real axis the reflection formula (5.5.4) from [1]_ is
used first. Note that ``psi`` has a family of zeros on the
negative real axis which occur between the poles at nonpositive
integers. Around the zeros the reflection formula suffers from
cancellation and the implementation loses precision. The sole
positive zero and the first negative zero, however, are handled
separately by precomputing series expansions using [2]_, so the
function should maintain full accuracy around the origin.
References
----------
.. [1] NIST Digital Library of Mathematical Functions
http://dlmf.nist.gov/5
.. [2] Fredrik Johansson and others.
"mpmath: a Python library for arbitrary-precision floating-point arithmetic"
(Version 0.19) http://mpmath.org/
""")
add_newdoc("scipy.special", "radian",
"""
radian(d, m, s)
Convert from degrees to radians
Returns the angle given in (d)egrees, (m)inutes, and (s)econds in
radians.
""")
add_newdoc("scipy.special", "rel_entr",
r"""
rel_entr(x, y)
Elementwise function for computing relative entropy.
.. math:: \mathrm{rel\_entr}(x, y) = \begin{cases} x \log(x / y) & x > 0, y > 0 \\ 0 & x = 0, y \ge 0 \\ \infty & \text{otherwise} \end{cases}
Parameters
----------
x : ndarray
First input array.
y : ndarray
Second input array.
Returns
-------
res : ndarray
Output array.
See Also
--------
entr, kl_div
Notes
-----
This function is jointly convex in x and y.
.. versionadded:: 0.15.0
""")
add_newdoc("scipy.special", "rgamma",
"""
rgamma(z)
Gamma function inverted
Returns ``1/gamma(x)``
""")
add_newdoc("scipy.special", "round",
"""
round(x)
Round to nearest integer
Returns the nearest integer to `x` as a double precision floating
point result. If `x` ends in 0.5 exactly, the nearest even integer
is chosen.
""")
add_newdoc("scipy.special", "shichi",
r"""
shichi(x, out=None)
Hyperbolic sine and cosine integrals.
The hyperbolic sine integral is
.. math::
\int_0^x \frac{\sinh{t}}{t}dt
and the hyperbolic cosine integral is
.. math::
\gamma + \log(x) + \int_0^x \frac{\cosh{t} - 1}{t} dt
where :math:`\gamma` is Euler's constant and :math:`\log` is the
principle branch of the logarithm.
Parameters
----------
x : array_like
Real or complex points at which to compute the hyperbolic sine
and cosine integrals.
Returns
-------
si : ndarray
Hyperbolic sine integral at ``x``
ci : ndarray
Hyperbolic cosine integral at ``x``
Notes
-----
For real arguments with ``x < 0``, ``chi`` is the real part of the
hyperbolic cosine integral. For such points ``chi(x)`` and ``chi(x
+ 0j)`` differ by a factor of ``1j*pi``.
For real arguments the function is computed by calling Cephes'
[1]_ *shichi* routine. For complex arguments the algorithm is based
on Mpmath's [2]_ *shi* and *chi* routines.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
.. [2] Fredrik Johansson and others.
"mpmath: a Python library for arbitrary-precision floating-point arithmetic"
(Version 0.19) http://mpmath.org/
""")
add_newdoc("scipy.special", "sici",
r"""
sici(x, out=None)
Sine and cosine integrals.
The sine integral is
.. math::
\int_0^x \frac{\sin{t}}{t}dt
and the cosine integral is
.. math::
\gamma + \log(x) + \int_0^x \frac{\cos{t} - 1}{t}dt
where :math:`\gamma` is Euler's constant and :math:`\log` is the
principle branch of the logarithm.
Parameters
----------
x : array_like
Real or complex points at which to compute the sine and cosine
integrals.
Returns
-------
si : ndarray
Sine integral at ``x``
ci : ndarray
Cosine integral at ``x``
Notes
-----
For real arguments with ``x < 0``, ``ci`` is the real part of the
cosine integral. For such points ``ci(x)`` and ``ci(x + 0j)``
differ by a factor of ``1j*pi``.
For real arguments the function is computed by calling Cephes'
[1]_ *sici* routine. For complex arguments the algorithm is based
on Mpmath's [2]_ *si* and *ci* routines.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
.. [2] Fredrik Johansson and others.
"mpmath: a Python library for arbitrary-precision floating-point arithmetic"
(Version 0.19) http://mpmath.org/
""")
add_newdoc("scipy.special", "sindg",
"""
sindg(x)
Sine of angle given in degrees
""")
add_newdoc("scipy.special", "smirnov",
"""
smirnov(n, e)
Kolmogorov-Smirnov complementary cumulative distribution function
Returns the exact Kolmogorov-Smirnov complementary cumulative
distribution function (Dn+ or Dn-) for a one-sided test of
equality between an empirical and a theoretical distribution. It
is equal to the probability that the maximum difference between a
theoretical distribution and an empirical one based on `n` samples
is greater than e.
""")
add_newdoc("scipy.special", "smirnovi",
"""
smirnovi(n, y)
Inverse to `smirnov`
Returns ``e`` such that ``smirnov(n, e) = y``.
""")
add_newdoc("scipy.special", "spence",
r"""
spence(z, out=None)
Spence's function, also known as the dilogarithm.
It is defined to be
.. math::
\int_0^z \frac{\log(t)}{1 - t}dt
for complex :math:`z`, where the contour of integration is taken
to avoid the branch cut of the logarithm. Spence's function is
analytic everywhere except the negative real axis where it has a
branch cut.
Parameters
----------
z : array_like
Points at which to evaluate Spence's function
Returns
-------
s : ndarray
Computed values of Spence's function
Notes
-----
There is a different convention which defines Spence's function by
the integral
.. math::
-\int_0^z \frac{\log(1 - t)}{t}dt;
this is our ``spence(1 - z)``.
""")
add_newdoc("scipy.special", "stdtr",
"""
stdtr(df, t)
Student t distribution cumulative density function
Returns the integral from minus infinity to t of the Student t
distribution with df > 0 degrees of freedom::
gamma((df+1)/2)/(sqrt(df*pi)*gamma(df/2)) *
integral((1+x**2/df)**(-df/2-1/2), x=-inf..t)
""")
add_newdoc("scipy.special", "stdtridf",
"""
stdtridf(p, t)
Inverse of `stdtr` vs df
Returns the argument df such that stdtr(df, t) is equal to `p`.
""")
add_newdoc("scipy.special", "stdtrit",
"""
stdtrit(df, p)
Inverse of `stdtr` vs `t`
Returns the argument `t` such that stdtr(df, t) is equal to `p`.
""")
add_newdoc("scipy.special", "struve",
r"""
struve(v, x)
Struve function.
Return the value of the Struve function of order `v` at `x`. The Struve
function is defined as,
.. math::
H_v(x) = (z/2)^{v + 1} \sum_{n=0}^\infty \frac{(-1)^n (z/2)^{2n}}{\Gamma(n + \frac{3}{2}) \Gamma(n + v + \frac{3}{2})},
where :math:`\Gamma` is the gamma function.
Parameters
----------
v : array_like
Order of the Struve function (float).
x : array_like
Argument of the Struve function (float; must be positive unless `v` is
an integer).
Returns
-------
H : ndarray
Value of the Struve function of order `v` at `x`.
Notes
-----
Three methods discussed in [1]_ are used to evaluate the Struve function:
- power series
- expansion in Bessel functions (if :math:`|z| < |v| + 20`)
- asymptotic large-z expansion (if :math:`z \geq 0.7v + 12`)
Rounding errors are estimated based on the largest terms in the sums, and
the result associated with the smallest error is returned.
See also
--------
modstruve
References
----------
.. [1] NIST Digital Library of Mathematical Functions
http://dlmf.nist.gov/11
""")
add_newdoc("scipy.special", "tandg",
"""
tandg(x)
Tangent of angle x given in degrees.
""")
add_newdoc("scipy.special", "tklmbda",
"""
tklmbda(x, lmbda)
Tukey-Lambda cumulative distribution function
""")
add_newdoc("scipy.special", "wofz",
"""
wofz(z)
Faddeeva function
Returns the value of the Faddeeva function for complex argument::
exp(-z**2) * erfc(-i*z)
See Also
--------
dawsn, erf, erfc, erfcx, erfi
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-3, 3)
>>> plt.plot(x, special.wofz(x))
>>> plt.xlabel('$x$')
>>> plt.ylabel('$wofz(x)$')
>>> plt.show()
""")
add_newdoc("scipy.special", "xlogy",
"""
xlogy(x, y)
Compute ``x*log(y)`` so that the result is 0 if ``x = 0``.
Parameters
----------
x : array_like
Multiplier
y : array_like
Argument
Returns
-------
z : array_like
Computed x*log(y)
Notes
-----
.. versionadded:: 0.13.0
""")
add_newdoc("scipy.special", "xlog1py",
"""
xlog1py(x, y)
Compute ``x*log1p(y)`` so that the result is 0 if ``x = 0``.
Parameters
----------
x : array_like
Multiplier
y : array_like
Argument
Returns
-------
z : array_like
Computed x*log1p(y)
Notes
-----
.. versionadded:: 0.13.0
""")
add_newdoc("scipy.special", "y0",
r"""
y0(x)
Bessel function of the second kind of order 0.
Parameters
----------
x : array_like
Argument (float).
Returns
-------
Y : ndarray
Value of the Bessel function of the second kind of order 0 at `x`.
Notes
-----
The domain is divided into the intervals [0, 5] and (5, infinity). In the
first interval a rational approximation :math:`R(x)` is employed to
compute,
.. math::
Y_0(x) = R(x) + \frac{2 \log(x) J_0(x)}{\pi},
where :math:`J_0` is the Bessel function of the first kind of order 0.
In the second interval, the Hankel asymptotic expansion is employed with
two rational functions of degree 6/6 and 7/7.
This function is a wrapper for the Cephes [1]_ routine `y0`.
See also
--------
j0
yv
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "y1",
"""
y1(x)
Bessel function of the second kind of order 1.
Parameters
----------
x : array_like
Argument (float).
Returns
-------
Y : ndarray
Value of the Bessel function of the second kind of order 1 at `x`.
Notes
-----
The domain is divided into the intervals [0, 8] and (8, infinity). In the
first interval a 25 term Chebyshev expansion is used, and computing
:math:`J_1` (the Bessel function of the first kind) is required. In the
second, the asymptotic trigonometric representation is employed using two
rational functions of degree 5/5.
This function is a wrapper for the Cephes [1]_ routine `y1`.
See also
--------
j1
yn
yv
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "yn",
r"""
yn(n, x)
Bessel function of the second kind of integer order and real argument.
Parameters
----------
n : array_like
Order (integer).
z : array_like
Argument (float).
Returns
-------
Y : ndarray
Value of the Bessel function, :math:`Y_n(x)`.
Notes
-----
Wrapper for the Cephes [1]_ routine `yn`.
The function is evaluated by forward recurrence on `n`, starting with
values computed by the Cephes routines `y0` and `y1`. If `n = 0` or 1,
the routine for `y0` or `y1` is called directly.
See also
--------
yv : For real order and real or complex argument.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "yv",
r"""
yv(v, z)
Bessel function of the second kind of real order and complex argument.
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
Y : ndarray
Value of the Bessel function of the second kind, :math:`Y_v(x)`.
Notes
-----
For positive `v` values, the computation is carried out using the
AMOS [1]_ `zbesy` routine, which exploits the connection to the Hankel
Bessel functions :math:`H_v^{(1)}` and :math:`H_v^{(2)}`,
.. math:: Y_v(z) = \frac{1}{2\imath} (H_v^{(1)} - H_v^{(2)}).
For negative `v` values the formula,
.. math:: Y_{-v}(z) = Y_v(z) \cos(\pi v) + J_v(z) \sin(\pi v)
is used, where :math:`J_v(z)` is the Bessel function of the first kind,
computed using the AMOS routine `zbesj`. Note that the second term is
exactly zero for integer `v`; to improve accuracy the second term is
explicitly omitted for `v` values such that `v = floor(v)`.
See also
--------
yve : :math:`Y_v` with leading exponential behavior stripped off.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "yve",
r"""
yve(v, z)
Exponentially scaled Bessel function of the second kind of real order.
Returns the exponentially scaled Bessel function of the second
kind of real order `v` at complex `z`::
yve(v, z) = yv(v, z) * exp(-abs(z.imag))
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
Y : ndarray
Value of the exponentially scaled Bessel function.
Notes
-----
For positive `v` values, the computation is carried out using the
AMOS [1]_ `zbesy` routine, which exploits the connection to the Hankel
Bessel functions :math:`H_v^{(1)}` and :math:`H_v^{(2)}`,
.. math:: Y_v(z) = \frac{1}{2\imath} (H_v^{(1)} - H_v^{(2)}).
For negative `v` values the formula,
.. math:: Y_{-v}(z) = Y_v(z) \cos(\pi v) + J_v(z) \sin(\pi v)
is used, where :math:`J_v(z)` is the Bessel function of the first kind,
computed using the AMOS routine `zbesj`. Note that the second term is
exactly zero for integer `v`; to improve accuracy the second term is
explicitly omitted for `v` values such that `v = floor(v)`.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "_zeta",
"""
_zeta(x, q)
Internal function, Hurwitz zeta.
""")
add_newdoc("scipy.special", "zetac",
"""
zetac(x)
Riemann zeta function minus 1.
This function is defined as
.. math:: \\zeta(x) = \\sum_{k=2}^{\\infty} 1 / k^x,
where ``x > 1``.
See Also
--------
zeta
""")
add_newdoc("scipy.special", "_struve_asymp_large_z",
"""
_struve_asymp_large_z(v, z, is_h)
Internal function for testing `struve` & `modstruve`
Evaluates using asymptotic expansion
Returns
-------
v, err
""")
add_newdoc("scipy.special", "_struve_power_series",
"""
_struve_power_series(v, z, is_h)
Internal function for testing `struve` & `modstruve`
Evaluates using power series
Returns
-------
v, err
""")
add_newdoc("scipy.special", "_struve_bessel_series",
"""
_struve_bessel_series(v, z, is_h)
Internal function for testing `struve` & `modstruve`
Evaluates using Bessel function series
Returns
-------
v, err
""")
add_newdoc("scipy.special", "_spherical_jn",
"""
Internal function, use `spherical_jn` instead.
""")
add_newdoc("scipy.special", "_spherical_jn_d",
"""
Internal function, use `spherical_jn` instead.
""")
add_newdoc("scipy.special", "_spherical_yn",
"""
Internal function, use `spherical_yn` instead.
""")
add_newdoc("scipy.special", "_spherical_yn_d",
"""
Internal function, use `spherical_yn` instead.
""")
add_newdoc("scipy.special", "_spherical_in",
"""
Internal function, use `spherical_in` instead.
""")
add_newdoc("scipy.special", "_spherical_in_d",
"""
Internal function, use `spherical_in` instead.
""")
add_newdoc("scipy.special", "_spherical_kn",
"""
Internal function, use `spherical_kn` instead.
""")
add_newdoc("scipy.special", "_spherical_kn_d",
"""
Internal function, use `spherical_kn` instead.
""")
add_newdoc("scipy.special", "loggamma",
r"""
loggamma(z, out=None)
Principal branch of the logarithm of the Gamma function.
Defined to be :math:`\log(\Gamma(x))` for :math:`x > 0` and
extended to the complex plane by analytic continuation. The
function has a single branch cut on the negative real axis.
.. versionadded:: 0.18.0
Parameters
----------
z : array-like
Values in the complex plain at which to compute ``loggamma``
out : ndarray, optional
Output array for computed values of ``loggamma``
Returns
-------
loggamma : ndarray
Values of ``loggamma`` at z.
Notes
-----
It is not generally true that :math:`\log\Gamma(z) =
\log(\Gamma(z))`, though the real parts of the functions do
agree. The benefit of not defining ``loggamma`` as
:math:`\log(\Gamma(z))` is that the latter function has a
complicated branch cut structure whereas ``loggamma`` is analytic
except for on the negative real axis.
The identities
.. math::
\exp(\log\Gamma(z)) &= \Gamma(z) \\
\log\Gamma(z + 1) &= \log(z) + \log\Gamma(z)
make ``loggama`` useful for working in complex logspace. However,
``loggamma`` necessarily returns complex outputs for real inputs,
so if you want to work only with real numbers use `gammaln`. On
the real line the two functions are related by ``exp(loggamma(x))
= gammasgn(x)*exp(gammaln(x))``, though in practice rounding
errors will introduce small spurious imaginary components in
``exp(loggamma(x))``.
The implementation here is based on [hare1997]_.
See also
--------
gammaln : logarithm of the absolute value of the Gamma function
gammasgn : sign of the gamma function
References
----------
.. [hare1997] D.E.G. Hare,
*Computing the Principal Branch of log-Gamma*,
Journal of Algorithms, Volume 25, Issue 2, November 1997, pages 221-236.
""")
add_newdoc("scipy.special", "_sinpi",
"""
Internal function, do not use.
""")
add_newdoc("scipy.special", "_cospi",
"""
Internal function, do not use.
""")
|
DailyActie/Surrogate-Model
|
01-codes/scipy-master/scipy/special/add_newdocs.py
|
Python
|
mit
| 189,015
|
[
"Gaussian"
] |
174e0190e871aeeeb1fc7ecb574640f195d1cabdd7e6bac2249d2ac9fb64aa1b
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Unit tests for MLlib Python DataFrame-based APIs.
"""
import sys
if sys.version > '3':
xrange = range
basestring = str
try:
import xmlrunner
except ImportError:
xmlrunner = None
if sys.version_info[:2] <= (2, 6):
try:
import unittest2 as unittest
except ImportError:
sys.stderr.write('Please install unittest2 to test with Python 2.6 or earlier')
sys.exit(1)
else:
import unittest
from shutil import rmtree
import tempfile
import array as pyarray
import numpy as np
from numpy import abs, all, arange, array, array_equal, inf, ones, tile, zeros
import inspect
from pyspark import keyword_only, SparkContext
from pyspark.ml import Estimator, Model, Pipeline, PipelineModel, Transformer, UnaryTransformer
from pyspark.ml.classification import *
from pyspark.ml.clustering import *
from pyspark.ml.common import _java2py, _py2java
from pyspark.ml.evaluation import BinaryClassificationEvaluator, \
MulticlassClassificationEvaluator, RegressionEvaluator
from pyspark.ml.feature import *
from pyspark.ml.fpm import FPGrowth, FPGrowthModel
from pyspark.ml.linalg import DenseMatrix, DenseMatrix, DenseVector, Matrices, MatrixUDT, \
SparseMatrix, SparseVector, Vector, VectorUDT, Vectors
from pyspark.ml.param import Param, Params, TypeConverters
from pyspark.ml.param.shared import HasInputCol, HasMaxIter, HasSeed
from pyspark.ml.recommendation import ALS
from pyspark.ml.regression import DecisionTreeRegressor, GeneralizedLinearRegression, \
LinearRegression
from pyspark.ml.stat import ChiSquareTest
from pyspark.ml.tuning import *
from pyspark.ml.util import *
from pyspark.ml.wrapper import JavaParams, JavaWrapper
from pyspark.serializers import PickleSerializer
from pyspark.sql import DataFrame, Row, SparkSession
from pyspark.sql.functions import rand
from pyspark.sql.types import DoubleType, IntegerType
from pyspark.storagelevel import *
from pyspark.tests import ReusedPySparkTestCase as PySparkTestCase
ser = PickleSerializer()
class MLlibTestCase(unittest.TestCase):
def setUp(self):
self.sc = SparkContext('local[4]', "MLlib tests")
self.spark = SparkSession(self.sc)
def tearDown(self):
self.spark.stop()
class SparkSessionTestCase(PySparkTestCase):
@classmethod
def setUpClass(cls):
PySparkTestCase.setUpClass()
cls.spark = SparkSession(cls.sc)
@classmethod
def tearDownClass(cls):
PySparkTestCase.tearDownClass()
cls.spark.stop()
class MockDataset(DataFrame):
def __init__(self):
self.index = 0
class HasFake(Params):
def __init__(self):
super(HasFake, self).__init__()
self.fake = Param(self, "fake", "fake param")
def getFake(self):
return self.getOrDefault(self.fake)
class MockTransformer(Transformer, HasFake):
def __init__(self):
super(MockTransformer, self).__init__()
self.dataset_index = None
def _transform(self, dataset):
self.dataset_index = dataset.index
dataset.index += 1
return dataset
class MockUnaryTransformer(UnaryTransformer, DefaultParamsReadable, DefaultParamsWritable):
shift = Param(Params._dummy(), "shift", "The amount by which to shift " +
"data in a DataFrame",
typeConverter=TypeConverters.toFloat)
def __init__(self, shiftVal=1):
super(MockUnaryTransformer, self).__init__()
self._setDefault(shift=1)
self._set(shift=shiftVal)
def getShift(self):
return self.getOrDefault(self.shift)
def setShift(self, shift):
self._set(shift=shift)
def createTransformFunc(self):
shiftVal = self.getShift()
return lambda x: x + shiftVal
def outputDataType(self):
return DoubleType()
def validateInputType(self, inputType):
if inputType != DoubleType():
raise TypeError("Bad input type: {}. ".format(inputType) +
"Requires Double.")
class MockEstimator(Estimator, HasFake):
def __init__(self):
super(MockEstimator, self).__init__()
self.dataset_index = None
def _fit(self, dataset):
self.dataset_index = dataset.index
model = MockModel()
self._copyValues(model)
return model
class MockModel(MockTransformer, Model, HasFake):
pass
class ParamTypeConversionTests(PySparkTestCase):
"""
Test that param type conversion happens.
"""
def test_int(self):
lr = LogisticRegression(maxIter=5.0)
self.assertEqual(lr.getMaxIter(), 5)
self.assertTrue(type(lr.getMaxIter()) == int)
self.assertRaises(TypeError, lambda: LogisticRegression(maxIter="notAnInt"))
self.assertRaises(TypeError, lambda: LogisticRegression(maxIter=5.1))
def test_float(self):
lr = LogisticRegression(tol=1)
self.assertEqual(lr.getTol(), 1.0)
self.assertTrue(type(lr.getTol()) == float)
self.assertRaises(TypeError, lambda: LogisticRegression(tol="notAFloat"))
def test_vector(self):
ewp = ElementwiseProduct(scalingVec=[1, 3])
self.assertEqual(ewp.getScalingVec(), DenseVector([1.0, 3.0]))
ewp = ElementwiseProduct(scalingVec=np.array([1.2, 3.4]))
self.assertEqual(ewp.getScalingVec(), DenseVector([1.2, 3.4]))
self.assertRaises(TypeError, lambda: ElementwiseProduct(scalingVec=["a", "b"]))
def test_list(self):
l = [0, 1]
for lst_like in [l, np.array(l), DenseVector(l), SparseVector(len(l),
range(len(l)), l), pyarray.array('l', l), xrange(2), tuple(l)]:
converted = TypeConverters.toList(lst_like)
self.assertEqual(type(converted), list)
self.assertListEqual(converted, l)
def test_list_int(self):
for indices in [[1.0, 2.0], np.array([1.0, 2.0]), DenseVector([1.0, 2.0]),
SparseVector(2, {0: 1.0, 1: 2.0}), xrange(1, 3), (1.0, 2.0),
pyarray.array('d', [1.0, 2.0])]:
vs = VectorSlicer(indices=indices)
self.assertListEqual(vs.getIndices(), [1, 2])
self.assertTrue(all([type(v) == int for v in vs.getIndices()]))
self.assertRaises(TypeError, lambda: VectorSlicer(indices=["a", "b"]))
def test_list_float(self):
b = Bucketizer(splits=[1, 4])
self.assertEqual(b.getSplits(), [1.0, 4.0])
self.assertTrue(all([type(v) == float for v in b.getSplits()]))
self.assertRaises(TypeError, lambda: Bucketizer(splits=["a", 1.0]))
def test_list_string(self):
for labels in [np.array(['a', u'b']), ['a', u'b'], np.array(['a', 'b'])]:
idx_to_string = IndexToString(labels=labels)
self.assertListEqual(idx_to_string.getLabels(), ['a', 'b'])
self.assertRaises(TypeError, lambda: IndexToString(labels=['a', 2]))
def test_string(self):
lr = LogisticRegression()
for col in ['features', u'features', np.str_('features')]:
lr.setFeaturesCol(col)
self.assertEqual(lr.getFeaturesCol(), 'features')
self.assertRaises(TypeError, lambda: LogisticRegression(featuresCol=2.3))
def test_bool(self):
self.assertRaises(TypeError, lambda: LogisticRegression(fitIntercept=1))
self.assertRaises(TypeError, lambda: LogisticRegression(fitIntercept="false"))
class PipelineTests(PySparkTestCase):
def test_pipeline(self):
dataset = MockDataset()
estimator0 = MockEstimator()
transformer1 = MockTransformer()
estimator2 = MockEstimator()
transformer3 = MockTransformer()
pipeline = Pipeline(stages=[estimator0, transformer1, estimator2, transformer3])
pipeline_model = pipeline.fit(dataset, {estimator0.fake: 0, transformer1.fake: 1})
model0, transformer1, model2, transformer3 = pipeline_model.stages
self.assertEqual(0, model0.dataset_index)
self.assertEqual(0, model0.getFake())
self.assertEqual(1, transformer1.dataset_index)
self.assertEqual(1, transformer1.getFake())
self.assertEqual(2, dataset.index)
self.assertIsNone(model2.dataset_index, "The last model shouldn't be called in fit.")
self.assertIsNone(transformer3.dataset_index,
"The last transformer shouldn't be called in fit.")
dataset = pipeline_model.transform(dataset)
self.assertEqual(2, model0.dataset_index)
self.assertEqual(3, transformer1.dataset_index)
self.assertEqual(4, model2.dataset_index)
self.assertEqual(5, transformer3.dataset_index)
self.assertEqual(6, dataset.index)
def test_identity_pipeline(self):
dataset = MockDataset()
def doTransform(pipeline):
pipeline_model = pipeline.fit(dataset)
return pipeline_model.transform(dataset)
# check that empty pipeline did not perform any transformation
self.assertEqual(dataset.index, doTransform(Pipeline(stages=[])).index)
# check that failure to set stages param will raise KeyError for missing param
self.assertRaises(KeyError, lambda: doTransform(Pipeline()))
class TestParams(HasMaxIter, HasInputCol, HasSeed):
"""
A subclass of Params mixed with HasMaxIter, HasInputCol and HasSeed.
"""
@keyword_only
def __init__(self, seed=None):
super(TestParams, self).__init__()
self._setDefault(maxIter=10)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, seed=None):
"""
setParams(self, seed=None)
Sets params for this test.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
class OtherTestParams(HasMaxIter, HasInputCol, HasSeed):
"""
A subclass of Params mixed with HasMaxIter, HasInputCol and HasSeed.
"""
@keyword_only
def __init__(self, seed=None):
super(OtherTestParams, self).__init__()
self._setDefault(maxIter=10)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, seed=None):
"""
setParams(self, seed=None)
Sets params for this test.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
class HasThrowableProperty(Params):
def __init__(self):
super(HasThrowableProperty, self).__init__()
self.p = Param(self, "none", "empty param")
@property
def test_property(self):
raise RuntimeError("Test property to raise error when invoked")
class ParamTests(PySparkTestCase):
def test_copy_new_parent(self):
testParams = TestParams()
# Copying an instantiated param should fail
with self.assertRaises(ValueError):
testParams.maxIter._copy_new_parent(testParams)
# Copying a dummy param should succeed
TestParams.maxIter._copy_new_parent(testParams)
maxIter = testParams.maxIter
self.assertEqual(maxIter.name, "maxIter")
self.assertEqual(maxIter.doc, "max number of iterations (>= 0).")
self.assertTrue(maxIter.parent == testParams.uid)
def test_param(self):
testParams = TestParams()
maxIter = testParams.maxIter
self.assertEqual(maxIter.name, "maxIter")
self.assertEqual(maxIter.doc, "max number of iterations (>= 0).")
self.assertTrue(maxIter.parent == testParams.uid)
def test_hasparam(self):
testParams = TestParams()
self.assertTrue(all([testParams.hasParam(p.name) for p in testParams.params]))
self.assertFalse(testParams.hasParam("notAParameter"))
def test_params(self):
testParams = TestParams()
maxIter = testParams.maxIter
inputCol = testParams.inputCol
seed = testParams.seed
params = testParams.params
self.assertEqual(params, [inputCol, maxIter, seed])
self.assertTrue(testParams.hasParam(maxIter.name))
self.assertTrue(testParams.hasDefault(maxIter))
self.assertFalse(testParams.isSet(maxIter))
self.assertTrue(testParams.isDefined(maxIter))
self.assertEqual(testParams.getMaxIter(), 10)
testParams.setMaxIter(100)
self.assertTrue(testParams.isSet(maxIter))
self.assertEqual(testParams.getMaxIter(), 100)
self.assertTrue(testParams.hasParam(inputCol.name))
self.assertFalse(testParams.hasDefault(inputCol))
self.assertFalse(testParams.isSet(inputCol))
self.assertFalse(testParams.isDefined(inputCol))
with self.assertRaises(KeyError):
testParams.getInputCol()
otherParam = Param(Params._dummy(), "otherParam", "Parameter used to test that " +
"set raises an error for a non-member parameter.",
typeConverter=TypeConverters.toString)
with self.assertRaises(ValueError):
testParams.set(otherParam, "value")
# Since the default is normally random, set it to a known number for debug str
testParams._setDefault(seed=41)
testParams.setSeed(43)
self.assertEqual(
testParams.explainParams(),
"\n".join(["inputCol: input column name. (undefined)",
"maxIter: max number of iterations (>= 0). (default: 10, current: 100)",
"seed: random seed. (default: 41, current: 43)"]))
def test_kmeans_param(self):
algo = KMeans()
self.assertEqual(algo.getInitMode(), "k-means||")
algo.setK(10)
self.assertEqual(algo.getK(), 10)
algo.setInitSteps(10)
self.assertEqual(algo.getInitSteps(), 10)
def test_hasseed(self):
noSeedSpecd = TestParams()
withSeedSpecd = TestParams(seed=42)
other = OtherTestParams()
# Check that we no longer use 42 as the magic number
self.assertNotEqual(noSeedSpecd.getSeed(), 42)
origSeed = noSeedSpecd.getSeed()
# Check that we only compute the seed once
self.assertEqual(noSeedSpecd.getSeed(), origSeed)
# Check that a specified seed is honored
self.assertEqual(withSeedSpecd.getSeed(), 42)
# Check that a different class has a different seed
self.assertNotEqual(other.getSeed(), noSeedSpecd.getSeed())
def test_param_property_error(self):
param_store = HasThrowableProperty()
self.assertRaises(RuntimeError, lambda: param_store.test_property)
params = param_store.params # should not invoke the property 'test_property'
self.assertEqual(len(params), 1)
def test_word2vec_param(self):
model = Word2Vec().setWindowSize(6)
# Check windowSize is set properly
self.assertEqual(model.getWindowSize(), 6)
def test_copy_param_extras(self):
tp = TestParams(seed=42)
extra = {tp.getParam(TestParams.inputCol.name): "copy_input"}
tp_copy = tp.copy(extra=extra)
self.assertEqual(tp.uid, tp_copy.uid)
self.assertEqual(tp.params, tp_copy.params)
for k, v in extra.items():
self.assertTrue(tp_copy.isDefined(k))
self.assertEqual(tp_copy.getOrDefault(k), v)
copied_no_extra = {}
for k, v in tp_copy._paramMap.items():
if k not in extra:
copied_no_extra[k] = v
self.assertEqual(tp._paramMap, copied_no_extra)
self.assertEqual(tp._defaultParamMap, tp_copy._defaultParamMap)
def test_logistic_regression_check_thresholds(self):
self.assertIsInstance(
LogisticRegression(threshold=0.5, thresholds=[0.5, 0.5]),
LogisticRegression
)
self.assertRaisesRegexp(
ValueError,
"Logistic Regression getThreshold found inconsistent.*$",
LogisticRegression, threshold=0.42, thresholds=[0.5, 0.5]
)
class EvaluatorTests(SparkSessionTestCase):
def test_java_params(self):
"""
This tests a bug fixed by SPARK-18274 which causes multiple copies
of a Params instance in Python to be linked to the same Java instance.
"""
evaluator = RegressionEvaluator(metricName="r2")
df = self.spark.createDataFrame([Row(label=1.0, prediction=1.1)])
evaluator.evaluate(df)
self.assertEqual(evaluator._java_obj.getMetricName(), "r2")
evaluatorCopy = evaluator.copy({evaluator.metricName: "mae"})
evaluator.evaluate(df)
evaluatorCopy.evaluate(df)
self.assertEqual(evaluator._java_obj.getMetricName(), "r2")
self.assertEqual(evaluatorCopy._java_obj.getMetricName(), "mae")
class FeatureTests(SparkSessionTestCase):
def test_binarizer(self):
b0 = Binarizer()
self.assertListEqual(b0.params, [b0.inputCol, b0.outputCol, b0.threshold])
self.assertTrue(all([~b0.isSet(p) for p in b0.params]))
self.assertTrue(b0.hasDefault(b0.threshold))
self.assertEqual(b0.getThreshold(), 0.0)
b0.setParams(inputCol="input", outputCol="output").setThreshold(1.0)
self.assertTrue(all([b0.isSet(p) for p in b0.params]))
self.assertEqual(b0.getThreshold(), 1.0)
self.assertEqual(b0.getInputCol(), "input")
self.assertEqual(b0.getOutputCol(), "output")
b0c = b0.copy({b0.threshold: 2.0})
self.assertEqual(b0c.uid, b0.uid)
self.assertListEqual(b0c.params, b0.params)
self.assertEqual(b0c.getThreshold(), 2.0)
b1 = Binarizer(threshold=2.0, inputCol="input", outputCol="output")
self.assertNotEqual(b1.uid, b0.uid)
self.assertEqual(b1.getThreshold(), 2.0)
self.assertEqual(b1.getInputCol(), "input")
self.assertEqual(b1.getOutputCol(), "output")
def test_idf(self):
dataset = self.spark.createDataFrame([
(DenseVector([1.0, 2.0]),),
(DenseVector([0.0, 1.0]),),
(DenseVector([3.0, 0.2]),)], ["tf"])
idf0 = IDF(inputCol="tf")
self.assertListEqual(idf0.params, [idf0.inputCol, idf0.minDocFreq, idf0.outputCol])
idf0m = idf0.fit(dataset, {idf0.outputCol: "idf"})
self.assertEqual(idf0m.uid, idf0.uid,
"Model should inherit the UID from its parent estimator.")
output = idf0m.transform(dataset)
self.assertIsNotNone(output.head().idf)
def test_ngram(self):
dataset = self.spark.createDataFrame([
Row(input=["a", "b", "c", "d", "e"])])
ngram0 = NGram(n=4, inputCol="input", outputCol="output")
self.assertEqual(ngram0.getN(), 4)
self.assertEqual(ngram0.getInputCol(), "input")
self.assertEqual(ngram0.getOutputCol(), "output")
transformedDF = ngram0.transform(dataset)
self.assertEqual(transformedDF.head().output, ["a b c d", "b c d e"])
def test_stopwordsremover(self):
dataset = self.spark.createDataFrame([Row(input=["a", "panda"])])
stopWordRemover = StopWordsRemover(inputCol="input", outputCol="output")
# Default
self.assertEqual(stopWordRemover.getInputCol(), "input")
transformedDF = stopWordRemover.transform(dataset)
self.assertEqual(transformedDF.head().output, ["panda"])
self.assertEqual(type(stopWordRemover.getStopWords()), list)
self.assertTrue(isinstance(stopWordRemover.getStopWords()[0], basestring))
# Custom
stopwords = ["panda"]
stopWordRemover.setStopWords(stopwords)
self.assertEqual(stopWordRemover.getInputCol(), "input")
self.assertEqual(stopWordRemover.getStopWords(), stopwords)
transformedDF = stopWordRemover.transform(dataset)
self.assertEqual(transformedDF.head().output, ["a"])
# with language selection
stopwords = StopWordsRemover.loadDefaultStopWords("turkish")
dataset = self.spark.createDataFrame([Row(input=["acaba", "ama", "biri"])])
stopWordRemover.setStopWords(stopwords)
self.assertEqual(stopWordRemover.getStopWords(), stopwords)
transformedDF = stopWordRemover.transform(dataset)
self.assertEqual(transformedDF.head().output, [])
def test_count_vectorizer_with_binary(self):
dataset = self.spark.createDataFrame([
(0, "a a a b b c".split(' '), SparseVector(3, {0: 1.0, 1: 1.0, 2: 1.0}),),
(1, "a a".split(' '), SparseVector(3, {0: 1.0}),),
(2, "a b".split(' '), SparseVector(3, {0: 1.0, 1: 1.0}),),
(3, "c".split(' '), SparseVector(3, {2: 1.0}),)], ["id", "words", "expected"])
cv = CountVectorizer(binary=True, inputCol="words", outputCol="features")
model = cv.fit(dataset)
transformedList = model.transform(dataset).select("features", "expected").collect()
for r in transformedList:
feature, expected = r
self.assertEqual(feature, expected)
def test_rformula_force_index_label(self):
df = self.spark.createDataFrame([
(1.0, 1.0, "a"),
(0.0, 2.0, "b"),
(1.0, 0.0, "a")], ["y", "x", "s"])
# Does not index label by default since it's numeric type.
rf = RFormula(formula="y ~ x + s")
model = rf.fit(df)
transformedDF = model.transform(df)
self.assertEqual(transformedDF.head().label, 1.0)
# Force to index label.
rf2 = RFormula(formula="y ~ x + s").setForceIndexLabel(True)
model2 = rf2.fit(df)
transformedDF2 = model2.transform(df)
self.assertEqual(transformedDF2.head().label, 0.0)
def test_rformula_string_indexer_order_type(self):
df = self.spark.createDataFrame([
(1.0, 1.0, "a"),
(0.0, 2.0, "b"),
(1.0, 0.0, "a")], ["y", "x", "s"])
rf = RFormula(formula="y ~ x + s", stringIndexerOrderType="alphabetDesc")
self.assertEqual(rf.getStringIndexerOrderType(), 'alphabetDesc')
transformedDF = rf.fit(df).transform(df)
observed = transformedDF.select("features").collect()
expected = [[1.0, 0.0], [2.0, 1.0], [0.0, 0.0]]
for i in range(0, len(expected)):
self.assertTrue(all(observed[i]["features"].toArray() == expected[i]))
def test_string_indexer_handle_invalid(self):
df = self.spark.createDataFrame([
(0, "a"),
(1, "d"),
(2, None)], ["id", "label"])
si1 = StringIndexer(inputCol="label", outputCol="indexed", handleInvalid="keep",
stringOrderType="alphabetAsc")
model1 = si1.fit(df)
td1 = model1.transform(df)
actual1 = td1.select("id", "indexed").collect()
expected1 = [Row(id=0, indexed=0.0), Row(id=1, indexed=1.0), Row(id=2, indexed=2.0)]
self.assertEqual(actual1, expected1)
si2 = si1.setHandleInvalid("skip")
model2 = si2.fit(df)
td2 = model2.transform(df)
actual2 = td2.select("id", "indexed").collect()
expected2 = [Row(id=0, indexed=0.0), Row(id=1, indexed=1.0)]
self.assertEqual(actual2, expected2)
class HasInducedError(Params):
def __init__(self):
super(HasInducedError, self).__init__()
self.inducedError = Param(self, "inducedError",
"Uniformly-distributed error added to feature")
def getInducedError(self):
return self.getOrDefault(self.inducedError)
class InducedErrorModel(Model, HasInducedError):
def __init__(self):
super(InducedErrorModel, self).__init__()
def _transform(self, dataset):
return dataset.withColumn("prediction",
dataset.feature + (rand(0) * self.getInducedError()))
class InducedErrorEstimator(Estimator, HasInducedError):
def __init__(self, inducedError=1.0):
super(InducedErrorEstimator, self).__init__()
self._set(inducedError=inducedError)
def _fit(self, dataset):
model = InducedErrorModel()
self._copyValues(model)
return model
class CrossValidatorTests(SparkSessionTestCase):
def test_copy(self):
dataset = self.spark.createDataFrame([
(10, 10.0),
(50, 50.0),
(100, 100.0),
(500, 500.0)] * 10,
["feature", "label"])
iee = InducedErrorEstimator()
evaluator = RegressionEvaluator(metricName="rmse")
grid = (ParamGridBuilder()
.addGrid(iee.inducedError, [100.0, 0.0, 10000.0])
.build())
cv = CrossValidator(estimator=iee, estimatorParamMaps=grid, evaluator=evaluator)
cvCopied = cv.copy()
self.assertEqual(cv.getEstimator().uid, cvCopied.getEstimator().uid)
cvModel = cv.fit(dataset)
cvModelCopied = cvModel.copy()
for index in range(len(cvModel.avgMetrics)):
self.assertTrue(abs(cvModel.avgMetrics[index] - cvModelCopied.avgMetrics[index])
< 0.0001)
def test_fit_minimize_metric(self):
dataset = self.spark.createDataFrame([
(10, 10.0),
(50, 50.0),
(100, 100.0),
(500, 500.0)] * 10,
["feature", "label"])
iee = InducedErrorEstimator()
evaluator = RegressionEvaluator(metricName="rmse")
grid = (ParamGridBuilder()
.addGrid(iee.inducedError, [100.0, 0.0, 10000.0])
.build())
cv = CrossValidator(estimator=iee, estimatorParamMaps=grid, evaluator=evaluator)
cvModel = cv.fit(dataset)
bestModel = cvModel.bestModel
bestModelMetric = evaluator.evaluate(bestModel.transform(dataset))
self.assertEqual(0.0, bestModel.getOrDefault('inducedError'),
"Best model should have zero induced error")
self.assertEqual(0.0, bestModelMetric, "Best model has RMSE of 0")
def test_fit_maximize_metric(self):
dataset = self.spark.createDataFrame([
(10, 10.0),
(50, 50.0),
(100, 100.0),
(500, 500.0)] * 10,
["feature", "label"])
iee = InducedErrorEstimator()
evaluator = RegressionEvaluator(metricName="r2")
grid = (ParamGridBuilder()
.addGrid(iee.inducedError, [100.0, 0.0, 10000.0])
.build())
cv = CrossValidator(estimator=iee, estimatorParamMaps=grid, evaluator=evaluator)
cvModel = cv.fit(dataset)
bestModel = cvModel.bestModel
bestModelMetric = evaluator.evaluate(bestModel.transform(dataset))
self.assertEqual(0.0, bestModel.getOrDefault('inducedError'),
"Best model should have zero induced error")
self.assertEqual(1.0, bestModelMetric, "Best model has R-squared of 1")
def test_save_load_trained_model(self):
# This tests saving and loading the trained model only.
# Save/load for CrossValidator will be added later: SPARK-13786
temp_path = tempfile.mkdtemp()
dataset = self.spark.createDataFrame(
[(Vectors.dense([0.0]), 0.0),
(Vectors.dense([0.4]), 1.0),
(Vectors.dense([0.5]), 0.0),
(Vectors.dense([0.6]), 1.0),
(Vectors.dense([1.0]), 1.0)] * 10,
["features", "label"])
lr = LogisticRegression()
grid = ParamGridBuilder().addGrid(lr.maxIter, [0, 1]).build()
evaluator = BinaryClassificationEvaluator()
cv = CrossValidator(estimator=lr, estimatorParamMaps=grid, evaluator=evaluator)
cvModel = cv.fit(dataset)
lrModel = cvModel.bestModel
cvModelPath = temp_path + "/cvModel"
lrModel.save(cvModelPath)
loadedLrModel = LogisticRegressionModel.load(cvModelPath)
self.assertEqual(loadedLrModel.uid, lrModel.uid)
self.assertEqual(loadedLrModel.intercept, lrModel.intercept)
def test_save_load_simple_estimator(self):
temp_path = tempfile.mkdtemp()
dataset = self.spark.createDataFrame(
[(Vectors.dense([0.0]), 0.0),
(Vectors.dense([0.4]), 1.0),
(Vectors.dense([0.5]), 0.0),
(Vectors.dense([0.6]), 1.0),
(Vectors.dense([1.0]), 1.0)] * 10,
["features", "label"])
lr = LogisticRegression()
grid = ParamGridBuilder().addGrid(lr.maxIter, [0, 1]).build()
evaluator = BinaryClassificationEvaluator()
# test save/load of CrossValidator
cv = CrossValidator(estimator=lr, estimatorParamMaps=grid, evaluator=evaluator)
cvModel = cv.fit(dataset)
cvPath = temp_path + "/cv"
cv.save(cvPath)
loadedCV = CrossValidator.load(cvPath)
self.assertEqual(loadedCV.getEstimator().uid, cv.getEstimator().uid)
self.assertEqual(loadedCV.getEvaluator().uid, cv.getEvaluator().uid)
self.assertEqual(loadedCV.getEstimatorParamMaps(), cv.getEstimatorParamMaps())
# test save/load of CrossValidatorModel
cvModelPath = temp_path + "/cvModel"
cvModel.save(cvModelPath)
loadedModel = CrossValidatorModel.load(cvModelPath)
self.assertEqual(loadedModel.bestModel.uid, cvModel.bestModel.uid)
def test_save_load_nested_estimator(self):
temp_path = tempfile.mkdtemp()
dataset = self.spark.createDataFrame(
[(Vectors.dense([0.0]), 0.0),
(Vectors.dense([0.4]), 1.0),
(Vectors.dense([0.5]), 0.0),
(Vectors.dense([0.6]), 1.0),
(Vectors.dense([1.0]), 1.0)] * 10,
["features", "label"])
ova = OneVsRest(classifier=LogisticRegression())
lr1 = LogisticRegression().setMaxIter(100)
lr2 = LogisticRegression().setMaxIter(150)
grid = ParamGridBuilder().addGrid(ova.classifier, [lr1, lr2]).build()
evaluator = MulticlassClassificationEvaluator()
# test save/load of CrossValidator
cv = CrossValidator(estimator=ova, estimatorParamMaps=grid, evaluator=evaluator)
cvModel = cv.fit(dataset)
cvPath = temp_path + "/cv"
cv.save(cvPath)
loadedCV = CrossValidator.load(cvPath)
self.assertEqual(loadedCV.getEstimator().uid, cv.getEstimator().uid)
self.assertEqual(loadedCV.getEvaluator().uid, cv.getEvaluator().uid)
originalParamMap = cv.getEstimatorParamMaps()
loadedParamMap = loadedCV.getEstimatorParamMaps()
for i, param in enumerate(loadedParamMap):
for p in param:
if p.name == "classifier":
self.assertEqual(param[p].uid, originalParamMap[i][p].uid)
else:
self.assertEqual(param[p], originalParamMap[i][p])
# test save/load of CrossValidatorModel
cvModelPath = temp_path + "/cvModel"
cvModel.save(cvModelPath)
loadedModel = CrossValidatorModel.load(cvModelPath)
self.assertEqual(loadedModel.bestModel.uid, cvModel.bestModel.uid)
class TrainValidationSplitTests(SparkSessionTestCase):
def test_fit_minimize_metric(self):
dataset = self.spark.createDataFrame([
(10, 10.0),
(50, 50.0),
(100, 100.0),
(500, 500.0)] * 10,
["feature", "label"])
iee = InducedErrorEstimator()
evaluator = RegressionEvaluator(metricName="rmse")
grid = ParamGridBuilder() \
.addGrid(iee.inducedError, [100.0, 0.0, 10000.0]) \
.build()
tvs = TrainValidationSplit(estimator=iee, estimatorParamMaps=grid, evaluator=evaluator)
tvsModel = tvs.fit(dataset)
bestModel = tvsModel.bestModel
bestModelMetric = evaluator.evaluate(bestModel.transform(dataset))
validationMetrics = tvsModel.validationMetrics
self.assertEqual(0.0, bestModel.getOrDefault('inducedError'),
"Best model should have zero induced error")
self.assertEqual(0.0, bestModelMetric, "Best model has RMSE of 0")
self.assertEqual(len(grid), len(validationMetrics),
"validationMetrics has the same size of grid parameter")
self.assertEqual(0.0, min(validationMetrics))
def test_fit_maximize_metric(self):
dataset = self.spark.createDataFrame([
(10, 10.0),
(50, 50.0),
(100, 100.0),
(500, 500.0)] * 10,
["feature", "label"])
iee = InducedErrorEstimator()
evaluator = RegressionEvaluator(metricName="r2")
grid = ParamGridBuilder() \
.addGrid(iee.inducedError, [100.0, 0.0, 10000.0]) \
.build()
tvs = TrainValidationSplit(estimator=iee, estimatorParamMaps=grid, evaluator=evaluator)
tvsModel = tvs.fit(dataset)
bestModel = tvsModel.bestModel
bestModelMetric = evaluator.evaluate(bestModel.transform(dataset))
validationMetrics = tvsModel.validationMetrics
self.assertEqual(0.0, bestModel.getOrDefault('inducedError'),
"Best model should have zero induced error")
self.assertEqual(1.0, bestModelMetric, "Best model has R-squared of 1")
self.assertEqual(len(grid), len(validationMetrics),
"validationMetrics has the same size of grid parameter")
self.assertEqual(1.0, max(validationMetrics))
def test_save_load_trained_model(self):
# This tests saving and loading the trained model only.
# Save/load for TrainValidationSplit will be added later: SPARK-13786
temp_path = tempfile.mkdtemp()
dataset = self.spark.createDataFrame(
[(Vectors.dense([0.0]), 0.0),
(Vectors.dense([0.4]), 1.0),
(Vectors.dense([0.5]), 0.0),
(Vectors.dense([0.6]), 1.0),
(Vectors.dense([1.0]), 1.0)] * 10,
["features", "label"])
lr = LogisticRegression()
grid = ParamGridBuilder().addGrid(lr.maxIter, [0, 1]).build()
evaluator = BinaryClassificationEvaluator()
tvs = TrainValidationSplit(estimator=lr, estimatorParamMaps=grid, evaluator=evaluator)
tvsModel = tvs.fit(dataset)
lrModel = tvsModel.bestModel
tvsModelPath = temp_path + "/tvsModel"
lrModel.save(tvsModelPath)
loadedLrModel = LogisticRegressionModel.load(tvsModelPath)
self.assertEqual(loadedLrModel.uid, lrModel.uid)
self.assertEqual(loadedLrModel.intercept, lrModel.intercept)
def test_save_load_simple_estimator(self):
# This tests saving and loading the trained model only.
# Save/load for TrainValidationSplit will be added later: SPARK-13786
temp_path = tempfile.mkdtemp()
dataset = self.spark.createDataFrame(
[(Vectors.dense([0.0]), 0.0),
(Vectors.dense([0.4]), 1.0),
(Vectors.dense([0.5]), 0.0),
(Vectors.dense([0.6]), 1.0),
(Vectors.dense([1.0]), 1.0)] * 10,
["features", "label"])
lr = LogisticRegression()
grid = ParamGridBuilder().addGrid(lr.maxIter, [0, 1]).build()
evaluator = BinaryClassificationEvaluator()
tvs = TrainValidationSplit(estimator=lr, estimatorParamMaps=grid, evaluator=evaluator)
tvsModel = tvs.fit(dataset)
tvsPath = temp_path + "/tvs"
tvs.save(tvsPath)
loadedTvs = TrainValidationSplit.load(tvsPath)
self.assertEqual(loadedTvs.getEstimator().uid, tvs.getEstimator().uid)
self.assertEqual(loadedTvs.getEvaluator().uid, tvs.getEvaluator().uid)
self.assertEqual(loadedTvs.getEstimatorParamMaps(), tvs.getEstimatorParamMaps())
tvsModelPath = temp_path + "/tvsModel"
tvsModel.save(tvsModelPath)
loadedModel = TrainValidationSplitModel.load(tvsModelPath)
self.assertEqual(loadedModel.bestModel.uid, tvsModel.bestModel.uid)
def test_save_load_nested_estimator(self):
# This tests saving and loading the trained model only.
# Save/load for TrainValidationSplit will be added later: SPARK-13786
temp_path = tempfile.mkdtemp()
dataset = self.spark.createDataFrame(
[(Vectors.dense([0.0]), 0.0),
(Vectors.dense([0.4]), 1.0),
(Vectors.dense([0.5]), 0.0),
(Vectors.dense([0.6]), 1.0),
(Vectors.dense([1.0]), 1.0)] * 10,
["features", "label"])
ova = OneVsRest(classifier=LogisticRegression())
lr1 = LogisticRegression().setMaxIter(100)
lr2 = LogisticRegression().setMaxIter(150)
grid = ParamGridBuilder().addGrid(ova.classifier, [lr1, lr2]).build()
evaluator = MulticlassClassificationEvaluator()
tvs = TrainValidationSplit(estimator=ova, estimatorParamMaps=grid, evaluator=evaluator)
tvsModel = tvs.fit(dataset)
tvsPath = temp_path + "/tvs"
tvs.save(tvsPath)
loadedTvs = TrainValidationSplit.load(tvsPath)
self.assertEqual(loadedTvs.getEstimator().uid, tvs.getEstimator().uid)
self.assertEqual(loadedTvs.getEvaluator().uid, tvs.getEvaluator().uid)
originalParamMap = tvs.getEstimatorParamMaps()
loadedParamMap = loadedTvs.getEstimatorParamMaps()
for i, param in enumerate(loadedParamMap):
for p in param:
if p.name == "classifier":
self.assertEqual(param[p].uid, originalParamMap[i][p].uid)
else:
self.assertEqual(param[p], originalParamMap[i][p])
tvsModelPath = temp_path + "/tvsModel"
tvsModel.save(tvsModelPath)
loadedModel = TrainValidationSplitModel.load(tvsModelPath)
self.assertEqual(loadedModel.bestModel.uid, tvsModel.bestModel.uid)
def test_copy(self):
dataset = self.spark.createDataFrame([
(10, 10.0),
(50, 50.0),
(100, 100.0),
(500, 500.0)] * 10,
["feature", "label"])
iee = InducedErrorEstimator()
evaluator = RegressionEvaluator(metricName="r2")
grid = ParamGridBuilder() \
.addGrid(iee.inducedError, [100.0, 0.0, 10000.0]) \
.build()
tvs = TrainValidationSplit(estimator=iee, estimatorParamMaps=grid, evaluator=evaluator)
tvsModel = tvs.fit(dataset)
tvsCopied = tvs.copy()
tvsModelCopied = tvsModel.copy()
self.assertEqual(tvs.getEstimator().uid, tvsCopied.getEstimator().uid,
"Copied TrainValidationSplit has the same uid of Estimator")
self.assertEqual(tvsModel.bestModel.uid, tvsModelCopied.bestModel.uid)
self.assertEqual(len(tvsModel.validationMetrics),
len(tvsModelCopied.validationMetrics),
"Copied validationMetrics has the same size of the original")
for index in range(len(tvsModel.validationMetrics)):
self.assertEqual(tvsModel.validationMetrics[index],
tvsModelCopied.validationMetrics[index])
class PersistenceTest(SparkSessionTestCase):
def test_linear_regression(self):
lr = LinearRegression(maxIter=1)
path = tempfile.mkdtemp()
lr_path = path + "/lr"
lr.save(lr_path)
lr2 = LinearRegression.load(lr_path)
self.assertEqual(lr.uid, lr2.uid)
self.assertEqual(type(lr.uid), type(lr2.uid))
self.assertEqual(lr2.uid, lr2.maxIter.parent,
"Loaded LinearRegression instance uid (%s) did not match Param's uid (%s)"
% (lr2.uid, lr2.maxIter.parent))
self.assertEqual(lr._defaultParamMap[lr.maxIter], lr2._defaultParamMap[lr2.maxIter],
"Loaded LinearRegression instance default params did not match " +
"original defaults")
try:
rmtree(path)
except OSError:
pass
def test_logistic_regression(self):
lr = LogisticRegression(maxIter=1)
path = tempfile.mkdtemp()
lr_path = path + "/logreg"
lr.save(lr_path)
lr2 = LogisticRegression.load(lr_path)
self.assertEqual(lr2.uid, lr2.maxIter.parent,
"Loaded LogisticRegression instance uid (%s) "
"did not match Param's uid (%s)"
% (lr2.uid, lr2.maxIter.parent))
self.assertEqual(lr._defaultParamMap[lr.maxIter], lr2._defaultParamMap[lr2.maxIter],
"Loaded LogisticRegression instance default params did not match " +
"original defaults")
try:
rmtree(path)
except OSError:
pass
def _compare_params(self, m1, m2, param):
"""
Compare 2 ML Params instances for the given param, and assert both have the same param value
and parent. The param must be a parameter of m1.
"""
# Prevent key not found error in case of some param in neither paramMap nor defaultParamMap.
if m1.isDefined(param):
paramValue1 = m1.getOrDefault(param)
paramValue2 = m2.getOrDefault(m2.getParam(param.name))
if isinstance(paramValue1, Params):
self._compare_pipelines(paramValue1, paramValue2)
else:
self.assertEqual(paramValue1, paramValue2) # for general types param
# Assert parents are equal
self.assertEqual(param.parent, m2.getParam(param.name).parent)
else:
# If m1 is not defined param, then m2 should not, too. See SPARK-14931.
self.assertFalse(m2.isDefined(m2.getParam(param.name)))
def _compare_pipelines(self, m1, m2):
"""
Compare 2 ML types, asserting that they are equivalent.
This currently supports:
- basic types
- Pipeline, PipelineModel
- OneVsRest, OneVsRestModel
This checks:
- uid
- type
- Param values and parents
"""
self.assertEqual(m1.uid, m2.uid)
self.assertEqual(type(m1), type(m2))
if isinstance(m1, JavaParams) or isinstance(m1, Transformer):
self.assertEqual(len(m1.params), len(m2.params))
for p in m1.params:
self._compare_params(m1, m2, p)
elif isinstance(m1, Pipeline):
self.assertEqual(len(m1.getStages()), len(m2.getStages()))
for s1, s2 in zip(m1.getStages(), m2.getStages()):
self._compare_pipelines(s1, s2)
elif isinstance(m1, PipelineModel):
self.assertEqual(len(m1.stages), len(m2.stages))
for s1, s2 in zip(m1.stages, m2.stages):
self._compare_pipelines(s1, s2)
elif isinstance(m1, OneVsRest) or isinstance(m1, OneVsRestModel):
for p in m1.params:
self._compare_params(m1, m2, p)
if isinstance(m1, OneVsRestModel):
self.assertEqual(len(m1.models), len(m2.models))
for x, y in zip(m1.models, m2.models):
self._compare_pipelines(x, y)
else:
raise RuntimeError("_compare_pipelines does not yet support type: %s" % type(m1))
def test_pipeline_persistence(self):
"""
Pipeline[HashingTF, PCA]
"""
temp_path = tempfile.mkdtemp()
try:
df = self.spark.createDataFrame([(["a", "b", "c"],), (["c", "d", "e"],)], ["words"])
tf = HashingTF(numFeatures=10, inputCol="words", outputCol="features")
pca = PCA(k=2, inputCol="features", outputCol="pca_features")
pl = Pipeline(stages=[tf, pca])
model = pl.fit(df)
pipeline_path = temp_path + "/pipeline"
pl.save(pipeline_path)
loaded_pipeline = Pipeline.load(pipeline_path)
self._compare_pipelines(pl, loaded_pipeline)
model_path = temp_path + "/pipeline-model"
model.save(model_path)
loaded_model = PipelineModel.load(model_path)
self._compare_pipelines(model, loaded_model)
finally:
try:
rmtree(temp_path)
except OSError:
pass
def test_nested_pipeline_persistence(self):
"""
Pipeline[HashingTF, Pipeline[PCA]]
"""
temp_path = tempfile.mkdtemp()
try:
df = self.spark.createDataFrame([(["a", "b", "c"],), (["c", "d", "e"],)], ["words"])
tf = HashingTF(numFeatures=10, inputCol="words", outputCol="features")
pca = PCA(k=2, inputCol="features", outputCol="pca_features")
p0 = Pipeline(stages=[pca])
pl = Pipeline(stages=[tf, p0])
model = pl.fit(df)
pipeline_path = temp_path + "/pipeline"
pl.save(pipeline_path)
loaded_pipeline = Pipeline.load(pipeline_path)
self._compare_pipelines(pl, loaded_pipeline)
model_path = temp_path + "/pipeline-model"
model.save(model_path)
loaded_model = PipelineModel.load(model_path)
self._compare_pipelines(model, loaded_model)
finally:
try:
rmtree(temp_path)
except OSError:
pass
def test_python_transformer_pipeline_persistence(self):
"""
Pipeline[MockUnaryTransformer, Binarizer]
"""
temp_path = tempfile.mkdtemp()
try:
df = self.spark.range(0, 10).toDF('input')
tf = MockUnaryTransformer(shiftVal=2)\
.setInputCol("input").setOutputCol("shiftedInput")
tf2 = Binarizer(threshold=6, inputCol="shiftedInput", outputCol="binarized")
pl = Pipeline(stages=[tf, tf2])
model = pl.fit(df)
pipeline_path = temp_path + "/pipeline"
pl.save(pipeline_path)
loaded_pipeline = Pipeline.load(pipeline_path)
self._compare_pipelines(pl, loaded_pipeline)
model_path = temp_path + "/pipeline-model"
model.save(model_path)
loaded_model = PipelineModel.load(model_path)
self._compare_pipelines(model, loaded_model)
finally:
try:
rmtree(temp_path)
except OSError:
pass
def test_onevsrest(self):
temp_path = tempfile.mkdtemp()
df = self.spark.createDataFrame([(0.0, Vectors.dense(1.0, 0.8)),
(1.0, Vectors.sparse(2, [], [])),
(2.0, Vectors.dense(0.5, 0.5))] * 10,
["label", "features"])
lr = LogisticRegression(maxIter=5, regParam=0.01)
ovr = OneVsRest(classifier=lr)
model = ovr.fit(df)
ovrPath = temp_path + "/ovr"
ovr.save(ovrPath)
loadedOvr = OneVsRest.load(ovrPath)
self._compare_pipelines(ovr, loadedOvr)
modelPath = temp_path + "/ovrModel"
model.save(modelPath)
loadedModel = OneVsRestModel.load(modelPath)
self._compare_pipelines(model, loadedModel)
def test_decisiontree_classifier(self):
dt = DecisionTreeClassifier(maxDepth=1)
path = tempfile.mkdtemp()
dtc_path = path + "/dtc"
dt.save(dtc_path)
dt2 = DecisionTreeClassifier.load(dtc_path)
self.assertEqual(dt2.uid, dt2.maxDepth.parent,
"Loaded DecisionTreeClassifier instance uid (%s) "
"did not match Param's uid (%s)"
% (dt2.uid, dt2.maxDepth.parent))
self.assertEqual(dt._defaultParamMap[dt.maxDepth], dt2._defaultParamMap[dt2.maxDepth],
"Loaded DecisionTreeClassifier instance default params did not match " +
"original defaults")
try:
rmtree(path)
except OSError:
pass
def test_decisiontree_regressor(self):
dt = DecisionTreeRegressor(maxDepth=1)
path = tempfile.mkdtemp()
dtr_path = path + "/dtr"
dt.save(dtr_path)
dt2 = DecisionTreeClassifier.load(dtr_path)
self.assertEqual(dt2.uid, dt2.maxDepth.parent,
"Loaded DecisionTreeRegressor instance uid (%s) "
"did not match Param's uid (%s)"
% (dt2.uid, dt2.maxDepth.parent))
self.assertEqual(dt._defaultParamMap[dt.maxDepth], dt2._defaultParamMap[dt2.maxDepth],
"Loaded DecisionTreeRegressor instance default params did not match " +
"original defaults")
try:
rmtree(path)
except OSError:
pass
def test_default_read_write(self):
temp_path = tempfile.mkdtemp()
lr = LogisticRegression()
lr.setMaxIter(50)
lr.setThreshold(.75)
writer = DefaultParamsWriter(lr)
savePath = temp_path + "/lr"
writer.save(savePath)
reader = DefaultParamsReadable.read()
lr2 = reader.load(savePath)
self.assertEqual(lr.uid, lr2.uid)
self.assertEqual(lr.extractParamMap(), lr2.extractParamMap())
# test overwrite
lr.setThreshold(.8)
writer.overwrite().save(savePath)
reader = DefaultParamsReadable.read()
lr3 = reader.load(savePath)
self.assertEqual(lr.uid, lr3.uid)
self.assertEqual(lr.extractParamMap(), lr3.extractParamMap())
class LDATest(SparkSessionTestCase):
def _compare(self, m1, m2):
"""
Temp method for comparing instances.
TODO: Replace with generic implementation once SPARK-14706 is merged.
"""
self.assertEqual(m1.uid, m2.uid)
self.assertEqual(type(m1), type(m2))
self.assertEqual(len(m1.params), len(m2.params))
for p in m1.params:
if m1.isDefined(p):
self.assertEqual(m1.getOrDefault(p), m2.getOrDefault(p))
self.assertEqual(p.parent, m2.getParam(p.name).parent)
if isinstance(m1, LDAModel):
self.assertEqual(m1.vocabSize(), m2.vocabSize())
self.assertEqual(m1.topicsMatrix(), m2.topicsMatrix())
def test_persistence(self):
# Test save/load for LDA, LocalLDAModel, DistributedLDAModel.
df = self.spark.createDataFrame([
[1, Vectors.dense([0.0, 1.0])],
[2, Vectors.sparse(2, {0: 1.0})],
], ["id", "features"])
# Fit model
lda = LDA(k=2, seed=1, optimizer="em")
distributedModel = lda.fit(df)
self.assertTrue(distributedModel.isDistributed())
localModel = distributedModel.toLocal()
self.assertFalse(localModel.isDistributed())
# Define paths
path = tempfile.mkdtemp()
lda_path = path + "/lda"
dist_model_path = path + "/distLDAModel"
local_model_path = path + "/localLDAModel"
# Test LDA
lda.save(lda_path)
lda2 = LDA.load(lda_path)
self._compare(lda, lda2)
# Test DistributedLDAModel
distributedModel.save(dist_model_path)
distributedModel2 = DistributedLDAModel.load(dist_model_path)
self._compare(distributedModel, distributedModel2)
# Test LocalLDAModel
localModel.save(local_model_path)
localModel2 = LocalLDAModel.load(local_model_path)
self._compare(localModel, localModel2)
# Clean up
try:
rmtree(path)
except OSError:
pass
class TrainingSummaryTest(SparkSessionTestCase):
def test_linear_regression_summary(self):
df = self.spark.createDataFrame([(1.0, 2.0, Vectors.dense(1.0)),
(0.0, 2.0, Vectors.sparse(1, [], []))],
["label", "weight", "features"])
lr = LinearRegression(maxIter=5, regParam=0.0, solver="normal", weightCol="weight",
fitIntercept=False)
model = lr.fit(df)
self.assertTrue(model.hasSummary)
s = model.summary
# test that api is callable and returns expected types
self.assertGreater(s.totalIterations, 0)
self.assertTrue(isinstance(s.predictions, DataFrame))
self.assertEqual(s.predictionCol, "prediction")
self.assertEqual(s.labelCol, "label")
self.assertEqual(s.featuresCol, "features")
objHist = s.objectiveHistory
self.assertTrue(isinstance(objHist, list) and isinstance(objHist[0], float))
self.assertAlmostEqual(s.explainedVariance, 0.25, 2)
self.assertAlmostEqual(s.meanAbsoluteError, 0.0)
self.assertAlmostEqual(s.meanSquaredError, 0.0)
self.assertAlmostEqual(s.rootMeanSquaredError, 0.0)
self.assertAlmostEqual(s.r2, 1.0, 2)
self.assertTrue(isinstance(s.residuals, DataFrame))
self.assertEqual(s.numInstances, 2)
self.assertEqual(s.degreesOfFreedom, 1)
devResiduals = s.devianceResiduals
self.assertTrue(isinstance(devResiduals, list) and isinstance(devResiduals[0], float))
coefStdErr = s.coefficientStandardErrors
self.assertTrue(isinstance(coefStdErr, list) and isinstance(coefStdErr[0], float))
tValues = s.tValues
self.assertTrue(isinstance(tValues, list) and isinstance(tValues[0], float))
pValues = s.pValues
self.assertTrue(isinstance(pValues, list) and isinstance(pValues[0], float))
# test evaluation (with training dataset) produces a summary with same values
# one check is enough to verify a summary is returned
# The child class LinearRegressionTrainingSummary runs full test
sameSummary = model.evaluate(df)
self.assertAlmostEqual(sameSummary.explainedVariance, s.explainedVariance)
def test_glr_summary(self):
from pyspark.ml.linalg import Vectors
df = self.spark.createDataFrame([(1.0, 2.0, Vectors.dense(1.0)),
(0.0, 2.0, Vectors.sparse(1, [], []))],
["label", "weight", "features"])
glr = GeneralizedLinearRegression(family="gaussian", link="identity", weightCol="weight",
fitIntercept=False)
model = glr.fit(df)
self.assertTrue(model.hasSummary)
s = model.summary
# test that api is callable and returns expected types
self.assertEqual(s.numIterations, 1) # this should default to a single iteration of WLS
self.assertTrue(isinstance(s.predictions, DataFrame))
self.assertEqual(s.predictionCol, "prediction")
self.assertEqual(s.numInstances, 2)
self.assertTrue(isinstance(s.residuals(), DataFrame))
self.assertTrue(isinstance(s.residuals("pearson"), DataFrame))
coefStdErr = s.coefficientStandardErrors
self.assertTrue(isinstance(coefStdErr, list) and isinstance(coefStdErr[0], float))
tValues = s.tValues
self.assertTrue(isinstance(tValues, list) and isinstance(tValues[0], float))
pValues = s.pValues
self.assertTrue(isinstance(pValues, list) and isinstance(pValues[0], float))
self.assertEqual(s.degreesOfFreedom, 1)
self.assertEqual(s.residualDegreeOfFreedom, 1)
self.assertEqual(s.residualDegreeOfFreedomNull, 2)
self.assertEqual(s.rank, 1)
self.assertTrue(isinstance(s.solver, basestring))
self.assertTrue(isinstance(s.aic, float))
self.assertTrue(isinstance(s.deviance, float))
self.assertTrue(isinstance(s.nullDeviance, float))
self.assertTrue(isinstance(s.dispersion, float))
# test evaluation (with training dataset) produces a summary with same values
# one check is enough to verify a summary is returned
# The child class GeneralizedLinearRegressionTrainingSummary runs full test
sameSummary = model.evaluate(df)
self.assertAlmostEqual(sameSummary.deviance, s.deviance)
def test_logistic_regression_summary(self):
df = self.spark.createDataFrame([(1.0, 2.0, Vectors.dense(1.0)),
(0.0, 2.0, Vectors.sparse(1, [], []))],
["label", "weight", "features"])
lr = LogisticRegression(maxIter=5, regParam=0.01, weightCol="weight", fitIntercept=False)
model = lr.fit(df)
self.assertTrue(model.hasSummary)
s = model.summary
# test that api is callable and returns expected types
self.assertTrue(isinstance(s.predictions, DataFrame))
self.assertEqual(s.probabilityCol, "probability")
self.assertEqual(s.labelCol, "label")
self.assertEqual(s.featuresCol, "features")
objHist = s.objectiveHistory
self.assertTrue(isinstance(objHist, list) and isinstance(objHist[0], float))
self.assertGreater(s.totalIterations, 0)
self.assertTrue(isinstance(s.roc, DataFrame))
self.assertAlmostEqual(s.areaUnderROC, 1.0, 2)
self.assertTrue(isinstance(s.pr, DataFrame))
self.assertTrue(isinstance(s.fMeasureByThreshold, DataFrame))
self.assertTrue(isinstance(s.precisionByThreshold, DataFrame))
self.assertTrue(isinstance(s.recallByThreshold, DataFrame))
# test evaluation (with training dataset) produces a summary with same values
# one check is enough to verify a summary is returned, Scala version runs full test
sameSummary = model.evaluate(df)
self.assertAlmostEqual(sameSummary.areaUnderROC, s.areaUnderROC)
def test_gaussian_mixture_summary(self):
data = [(Vectors.dense(1.0),), (Vectors.dense(5.0),), (Vectors.dense(10.0),),
(Vectors.sparse(1, [], []),)]
df = self.spark.createDataFrame(data, ["features"])
gmm = GaussianMixture(k=2)
model = gmm.fit(df)
self.assertTrue(model.hasSummary)
s = model.summary
self.assertTrue(isinstance(s.predictions, DataFrame))
self.assertEqual(s.probabilityCol, "probability")
self.assertTrue(isinstance(s.probability, DataFrame))
self.assertEqual(s.featuresCol, "features")
self.assertEqual(s.predictionCol, "prediction")
self.assertTrue(isinstance(s.cluster, DataFrame))
self.assertEqual(len(s.clusterSizes), 2)
self.assertEqual(s.k, 2)
def test_bisecting_kmeans_summary(self):
data = [(Vectors.dense(1.0),), (Vectors.dense(5.0),), (Vectors.dense(10.0),),
(Vectors.sparse(1, [], []),)]
df = self.spark.createDataFrame(data, ["features"])
bkm = BisectingKMeans(k=2)
model = bkm.fit(df)
self.assertTrue(model.hasSummary)
s = model.summary
self.assertTrue(isinstance(s.predictions, DataFrame))
self.assertEqual(s.featuresCol, "features")
self.assertEqual(s.predictionCol, "prediction")
self.assertTrue(isinstance(s.cluster, DataFrame))
self.assertEqual(len(s.clusterSizes), 2)
self.assertEqual(s.k, 2)
def test_kmeans_summary(self):
data = [(Vectors.dense([0.0, 0.0]),), (Vectors.dense([1.0, 1.0]),),
(Vectors.dense([9.0, 8.0]),), (Vectors.dense([8.0, 9.0]),)]
df = self.spark.createDataFrame(data, ["features"])
kmeans = KMeans(k=2, seed=1)
model = kmeans.fit(df)
self.assertTrue(model.hasSummary)
s = model.summary
self.assertTrue(isinstance(s.predictions, DataFrame))
self.assertEqual(s.featuresCol, "features")
self.assertEqual(s.predictionCol, "prediction")
self.assertTrue(isinstance(s.cluster, DataFrame))
self.assertEqual(len(s.clusterSizes), 2)
self.assertEqual(s.k, 2)
class OneVsRestTests(SparkSessionTestCase):
def test_copy(self):
df = self.spark.createDataFrame([(0.0, Vectors.dense(1.0, 0.8)),
(1.0, Vectors.sparse(2, [], [])),
(2.0, Vectors.dense(0.5, 0.5))],
["label", "features"])
lr = LogisticRegression(maxIter=5, regParam=0.01)
ovr = OneVsRest(classifier=lr)
ovr1 = ovr.copy({lr.maxIter: 10})
self.assertEqual(ovr.getClassifier().getMaxIter(), 5)
self.assertEqual(ovr1.getClassifier().getMaxIter(), 10)
model = ovr.fit(df)
model1 = model.copy({model.predictionCol: "indexed"})
self.assertEqual(model1.getPredictionCol(), "indexed")
def test_output_columns(self):
df = self.spark.createDataFrame([(0.0, Vectors.dense(1.0, 0.8)),
(1.0, Vectors.sparse(2, [], [])),
(2.0, Vectors.dense(0.5, 0.5))],
["label", "features"])
lr = LogisticRegression(maxIter=5, regParam=0.01)
ovr = OneVsRest(classifier=lr)
model = ovr.fit(df)
output = model.transform(df)
self.assertEqual(output.columns, ["label", "features", "prediction"])
def test_support_for_weightCol(self):
df = self.spark.createDataFrame([(0.0, Vectors.dense(1.0, 0.8), 1.0),
(1.0, Vectors.sparse(2, [], []), 1.0),
(2.0, Vectors.dense(0.5, 0.5), 1.0)],
["label", "features", "weight"])
# classifier inherits hasWeightCol
lr = LogisticRegression(maxIter=5, regParam=0.01)
ovr = OneVsRest(classifier=lr, weightCol="weight")
self.assertIsNotNone(ovr.fit(df))
# classifier doesn't inherit hasWeightCol
dt = DecisionTreeClassifier()
ovr2 = OneVsRest(classifier=dt, weightCol="weight")
self.assertIsNotNone(ovr2.fit(df))
class HashingTFTest(SparkSessionTestCase):
def test_apply_binary_term_freqs(self):
df = self.spark.createDataFrame([(0, ["a", "a", "b", "c", "c", "c"])], ["id", "words"])
n = 10
hashingTF = HashingTF()
hashingTF.setInputCol("words").setOutputCol("features").setNumFeatures(n).setBinary(True)
output = hashingTF.transform(df)
features = output.select("features").first().features.toArray()
expected = Vectors.dense([1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]).toArray()
for i in range(0, n):
self.assertAlmostEqual(features[i], expected[i], 14, "Error at " + str(i) +
": expected " + str(expected[i]) + ", got " + str(features[i]))
class GeneralizedLinearRegressionTest(SparkSessionTestCase):
def test_tweedie_distribution(self):
df = self.spark.createDataFrame(
[(1.0, Vectors.dense(0.0, 0.0)),
(1.0, Vectors.dense(1.0, 2.0)),
(2.0, Vectors.dense(0.0, 0.0)),
(2.0, Vectors.dense(1.0, 1.0)), ], ["label", "features"])
glr = GeneralizedLinearRegression(family="tweedie", variancePower=1.6)
model = glr.fit(df)
self.assertTrue(np.allclose(model.coefficients.toArray(), [-0.4645, 0.3402], atol=1E-4))
self.assertTrue(np.isclose(model.intercept, 0.7841, atol=1E-4))
model2 = glr.setLinkPower(-1.0).fit(df)
self.assertTrue(np.allclose(model2.coefficients.toArray(), [-0.6667, 0.5], atol=1E-4))
self.assertTrue(np.isclose(model2.intercept, 0.6667, atol=1E-4))
def test_offset(self):
df = self.spark.createDataFrame(
[(0.2, 1.0, 2.0, Vectors.dense(0.0, 5.0)),
(0.5, 2.1, 0.5, Vectors.dense(1.0, 2.0)),
(0.9, 0.4, 1.0, Vectors.dense(2.0, 1.0)),
(0.7, 0.7, 0.0, Vectors.dense(3.0, 3.0))], ["label", "weight", "offset", "features"])
glr = GeneralizedLinearRegression(family="poisson", weightCol="weight", offsetCol="offset")
model = glr.fit(df)
self.assertTrue(np.allclose(model.coefficients.toArray(), [0.664647, -0.3192581],
atol=1E-4))
self.assertTrue(np.isclose(model.intercept, -1.561613, atol=1E-4))
class LogisticRegressionTest(SparkSessionTestCase):
def test_binomial_logistic_regression_with_bound(self):
df = self.spark.createDataFrame(
[(1.0, 1.0, Vectors.dense(0.0, 5.0)),
(0.0, 2.0, Vectors.dense(1.0, 2.0)),
(1.0, 3.0, Vectors.dense(2.0, 1.0)),
(0.0, 4.0, Vectors.dense(3.0, 3.0)), ], ["label", "weight", "features"])
lor = LogisticRegression(regParam=0.01, weightCol="weight",
lowerBoundsOnCoefficients=Matrices.dense(1, 2, [-1.0, -1.0]),
upperBoundsOnIntercepts=Vectors.dense(0.0))
model = lor.fit(df)
self.assertTrue(
np.allclose(model.coefficients.toArray(), [-0.2944, -0.0484], atol=1E-4))
self.assertTrue(np.isclose(model.intercept, 0.0, atol=1E-4))
def test_multinomial_logistic_regression_with_bound(self):
data_path = "data/mllib/sample_multiclass_classification_data.txt"
df = self.spark.read.format("libsvm").load(data_path)
lor = LogisticRegression(regParam=0.01,
lowerBoundsOnCoefficients=Matrices.dense(3, 4, range(12)),
upperBoundsOnIntercepts=Vectors.dense(0.0, 0.0, 0.0))
model = lor.fit(df)
expected = [[4.593, 4.5516, 9.0099, 12.2904],
[1.0, 8.1093, 7.0, 10.0],
[3.041, 5.0, 8.0, 11.0]]
for i in range(0, len(expected)):
self.assertTrue(
np.allclose(model.coefficientMatrix.toArray()[i], expected[i], atol=1E-4))
self.assertTrue(
np.allclose(model.interceptVector.toArray(), [-0.9057, -1.1392, -0.0033], atol=1E-4))
class FPGrowthTests(SparkSessionTestCase):
def setUp(self):
super(FPGrowthTests, self).setUp()
self.data = self.spark.createDataFrame(
[([1, 2], ), ([1, 2], ), ([1, 2, 3], ), ([1, 3], )],
["items"])
def test_association_rules(self):
fp = FPGrowth()
fpm = fp.fit(self.data)
expected_association_rules = self.spark.createDataFrame(
[([3], [1], 1.0), ([2], [1], 1.0)],
["antecedent", "consequent", "confidence"]
)
actual_association_rules = fpm.associationRules
self.assertEqual(actual_association_rules.subtract(expected_association_rules).count(), 0)
self.assertEqual(expected_association_rules.subtract(actual_association_rules).count(), 0)
def test_freq_itemsets(self):
fp = FPGrowth()
fpm = fp.fit(self.data)
expected_freq_itemsets = self.spark.createDataFrame(
[([1], 4), ([2], 3), ([2, 1], 3), ([3], 2), ([3, 1], 2)],
["items", "freq"]
)
actual_freq_itemsets = fpm.freqItemsets
self.assertEqual(actual_freq_itemsets.subtract(expected_freq_itemsets).count(), 0)
self.assertEqual(expected_freq_itemsets.subtract(actual_freq_itemsets).count(), 0)
def tearDown(self):
del self.data
class ALSTest(SparkSessionTestCase):
def test_storage_levels(self):
df = self.spark.createDataFrame(
[(0, 0, 4.0), (0, 1, 2.0), (1, 1, 3.0), (1, 2, 4.0), (2, 1, 1.0), (2, 2, 5.0)],
["user", "item", "rating"])
als = ALS().setMaxIter(1).setRank(1)
# test default params
als.fit(df)
self.assertEqual(als.getIntermediateStorageLevel(), "MEMORY_AND_DISK")
self.assertEqual(als._java_obj.getIntermediateStorageLevel(), "MEMORY_AND_DISK")
self.assertEqual(als.getFinalStorageLevel(), "MEMORY_AND_DISK")
self.assertEqual(als._java_obj.getFinalStorageLevel(), "MEMORY_AND_DISK")
# test non-default params
als.setIntermediateStorageLevel("MEMORY_ONLY_2")
als.setFinalStorageLevel("DISK_ONLY")
als.fit(df)
self.assertEqual(als.getIntermediateStorageLevel(), "MEMORY_ONLY_2")
self.assertEqual(als._java_obj.getIntermediateStorageLevel(), "MEMORY_ONLY_2")
self.assertEqual(als.getFinalStorageLevel(), "DISK_ONLY")
self.assertEqual(als._java_obj.getFinalStorageLevel(), "DISK_ONLY")
class DefaultValuesTests(PySparkTestCase):
"""
Test :py:class:`JavaParams` classes to see if their default Param values match
those in their Scala counterparts.
"""
def check_params(self, py_stage):
import pyspark.ml.feature
if not hasattr(py_stage, "_to_java"):
return
java_stage = py_stage._to_java()
if java_stage is None:
return
for p in py_stage.params:
java_param = java_stage.getParam(p.name)
py_has_default = py_stage.hasDefault(p)
java_has_default = java_stage.hasDefault(java_param)
self.assertEqual(py_has_default, java_has_default,
"Default value mismatch of param %s for Params %s"
% (p.name, str(py_stage)))
if py_has_default:
if p.name == "seed":
return # Random seeds between Spark and PySpark are different
java_default =\
_java2py(self.sc, java_stage.clear(java_param).getOrDefault(java_param))
py_stage._clear(p)
py_default = py_stage.getOrDefault(p)
if isinstance(py_stage, pyspark.ml.feature.Imputer) and p.name == "missingValue":
# SPARK-15040 - default value for Imputer param 'missingValue' is NaN,
# and NaN != NaN, so handle it specially here
import math
self.assertTrue(math.isnan(java_default) and math.isnan(py_default),
"Java default %s and python default %s are not both NaN for "
"param %s for Params %s"
% (str(java_default), str(py_default), p.name, str(py_stage)))
return
self.assertEqual(java_default, py_default,
"Java default %s != python default %s of param %s for Params %s"
% (str(java_default), str(py_default), p.name, str(py_stage)))
def test_java_params(self):
import pyspark.ml.feature
import pyspark.ml.classification
import pyspark.ml.clustering
import pyspark.ml.pipeline
import pyspark.ml.recommendation
import pyspark.ml.regression
modules = [pyspark.ml.feature, pyspark.ml.classification, pyspark.ml.clustering,
pyspark.ml.pipeline, pyspark.ml.recommendation, pyspark.ml.regression]
for module in modules:
for name, cls in inspect.getmembers(module, inspect.isclass):
if not name.endswith('Model') and issubclass(cls, JavaParams)\
and not inspect.isabstract(cls):
self.check_params(cls())
def _squared_distance(a, b):
if isinstance(a, Vector):
return a.squared_distance(b)
else:
return b.squared_distance(a)
class VectorTests(MLlibTestCase):
def _test_serialize(self, v):
self.assertEqual(v, ser.loads(ser.dumps(v)))
jvec = self.sc._jvm.org.apache.spark.ml.python.MLSerDe.loads(bytearray(ser.dumps(v)))
nv = ser.loads(bytes(self.sc._jvm.org.apache.spark.ml.python.MLSerDe.dumps(jvec)))
self.assertEqual(v, nv)
vs = [v] * 100
jvecs = self.sc._jvm.org.apache.spark.ml.python.MLSerDe.loads(bytearray(ser.dumps(vs)))
nvs = ser.loads(bytes(self.sc._jvm.org.apache.spark.ml.python.MLSerDe.dumps(jvecs)))
self.assertEqual(vs, nvs)
def test_serialize(self):
self._test_serialize(DenseVector(range(10)))
self._test_serialize(DenseVector(array([1., 2., 3., 4.])))
self._test_serialize(DenseVector(pyarray.array('d', range(10))))
self._test_serialize(SparseVector(4, {1: 1, 3: 2}))
self._test_serialize(SparseVector(3, {}))
self._test_serialize(DenseMatrix(2, 3, range(6)))
sm1 = SparseMatrix(
3, 4, [0, 2, 2, 4, 4], [1, 2, 1, 2], [1.0, 2.0, 4.0, 5.0])
self._test_serialize(sm1)
def test_dot(self):
sv = SparseVector(4, {1: 1, 3: 2})
dv = DenseVector(array([1., 2., 3., 4.]))
lst = DenseVector([1, 2, 3, 4])
mat = array([[1., 2., 3., 4.],
[1., 2., 3., 4.],
[1., 2., 3., 4.],
[1., 2., 3., 4.]])
arr = pyarray.array('d', [0, 1, 2, 3])
self.assertEqual(10.0, sv.dot(dv))
self.assertTrue(array_equal(array([3., 6., 9., 12.]), sv.dot(mat)))
self.assertEqual(30.0, dv.dot(dv))
self.assertTrue(array_equal(array([10., 20., 30., 40.]), dv.dot(mat)))
self.assertEqual(30.0, lst.dot(dv))
self.assertTrue(array_equal(array([10., 20., 30., 40.]), lst.dot(mat)))
self.assertEqual(7.0, sv.dot(arr))
def test_squared_distance(self):
sv = SparseVector(4, {1: 1, 3: 2})
dv = DenseVector(array([1., 2., 3., 4.]))
lst = DenseVector([4, 3, 2, 1])
lst1 = [4, 3, 2, 1]
arr = pyarray.array('d', [0, 2, 1, 3])
narr = array([0, 2, 1, 3])
self.assertEqual(15.0, _squared_distance(sv, dv))
self.assertEqual(25.0, _squared_distance(sv, lst))
self.assertEqual(20.0, _squared_distance(dv, lst))
self.assertEqual(15.0, _squared_distance(dv, sv))
self.assertEqual(25.0, _squared_distance(lst, sv))
self.assertEqual(20.0, _squared_distance(lst, dv))
self.assertEqual(0.0, _squared_distance(sv, sv))
self.assertEqual(0.0, _squared_distance(dv, dv))
self.assertEqual(0.0, _squared_distance(lst, lst))
self.assertEqual(25.0, _squared_distance(sv, lst1))
self.assertEqual(3.0, _squared_distance(sv, arr))
self.assertEqual(3.0, _squared_distance(sv, narr))
def test_hash(self):
v1 = DenseVector([0.0, 1.0, 0.0, 5.5])
v2 = SparseVector(4, [(1, 1.0), (3, 5.5)])
v3 = DenseVector([0.0, 1.0, 0.0, 5.5])
v4 = SparseVector(4, [(1, 1.0), (3, 2.5)])
self.assertEqual(hash(v1), hash(v2))
self.assertEqual(hash(v1), hash(v3))
self.assertEqual(hash(v2), hash(v3))
self.assertFalse(hash(v1) == hash(v4))
self.assertFalse(hash(v2) == hash(v4))
def test_eq(self):
v1 = DenseVector([0.0, 1.0, 0.0, 5.5])
v2 = SparseVector(4, [(1, 1.0), (3, 5.5)])
v3 = DenseVector([0.0, 1.0, 0.0, 5.5])
v4 = SparseVector(6, [(1, 1.0), (3, 5.5)])
v5 = DenseVector([0.0, 1.0, 0.0, 2.5])
v6 = SparseVector(4, [(1, 1.0), (3, 2.5)])
self.assertEqual(v1, v2)
self.assertEqual(v1, v3)
self.assertFalse(v2 == v4)
self.assertFalse(v1 == v5)
self.assertFalse(v1 == v6)
def test_equals(self):
indices = [1, 2, 4]
values = [1., 3., 2.]
self.assertTrue(Vectors._equals(indices, values, list(range(5)), [0., 1., 3., 0., 2.]))
self.assertFalse(Vectors._equals(indices, values, list(range(5)), [0., 3., 1., 0., 2.]))
self.assertFalse(Vectors._equals(indices, values, list(range(5)), [0., 3., 0., 2.]))
self.assertFalse(Vectors._equals(indices, values, list(range(5)), [0., 1., 3., 2., 2.]))
def test_conversion(self):
# numpy arrays should be automatically upcast to float64
# tests for fix of [SPARK-5089]
v = array([1, 2, 3, 4], dtype='float64')
dv = DenseVector(v)
self.assertTrue(dv.array.dtype == 'float64')
v = array([1, 2, 3, 4], dtype='float32')
dv = DenseVector(v)
self.assertTrue(dv.array.dtype == 'float64')
def test_sparse_vector_indexing(self):
sv = SparseVector(5, {1: 1, 3: 2})
self.assertEqual(sv[0], 0.)
self.assertEqual(sv[3], 2.)
self.assertEqual(sv[1], 1.)
self.assertEqual(sv[2], 0.)
self.assertEqual(sv[4], 0.)
self.assertEqual(sv[-1], 0.)
self.assertEqual(sv[-2], 2.)
self.assertEqual(sv[-3], 0.)
self.assertEqual(sv[-5], 0.)
for ind in [5, -6]:
self.assertRaises(IndexError, sv.__getitem__, ind)
for ind in [7.8, '1']:
self.assertRaises(TypeError, sv.__getitem__, ind)
zeros = SparseVector(4, {})
self.assertEqual(zeros[0], 0.0)
self.assertEqual(zeros[3], 0.0)
for ind in [4, -5]:
self.assertRaises(IndexError, zeros.__getitem__, ind)
empty = SparseVector(0, {})
for ind in [-1, 0, 1]:
self.assertRaises(IndexError, empty.__getitem__, ind)
def test_sparse_vector_iteration(self):
self.assertListEqual(list(SparseVector(3, [], [])), [0.0, 0.0, 0.0])
self.assertListEqual(list(SparseVector(5, [0, 3], [1.0, 2.0])), [1.0, 0.0, 0.0, 2.0, 0.0])
def test_matrix_indexing(self):
mat = DenseMatrix(3, 2, [0, 1, 4, 6, 8, 10])
expected = [[0, 6], [1, 8], [4, 10]]
for i in range(3):
for j in range(2):
self.assertEqual(mat[i, j], expected[i][j])
for i, j in [(-1, 0), (4, 1), (3, 4)]:
self.assertRaises(IndexError, mat.__getitem__, (i, j))
def test_repr_dense_matrix(self):
mat = DenseMatrix(3, 2, [0, 1, 4, 6, 8, 10])
self.assertTrue(
repr(mat),
'DenseMatrix(3, 2, [0.0, 1.0, 4.0, 6.0, 8.0, 10.0], False)')
mat = DenseMatrix(3, 2, [0, 1, 4, 6, 8, 10], True)
self.assertTrue(
repr(mat),
'DenseMatrix(3, 2, [0.0, 1.0, 4.0, 6.0, 8.0, 10.0], False)')
mat = DenseMatrix(6, 3, zeros(18))
self.assertTrue(
repr(mat),
'DenseMatrix(6, 3, [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ..., \
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], False)')
def test_repr_sparse_matrix(self):
sm1t = SparseMatrix(
3, 4, [0, 2, 3, 5], [0, 1, 2, 0, 2], [3.0, 2.0, 4.0, 9.0, 8.0],
isTransposed=True)
self.assertTrue(
repr(sm1t),
'SparseMatrix(3, 4, [0, 2, 3, 5], [0, 1, 2, 0, 2], [3.0, 2.0, 4.0, 9.0, 8.0], True)')
indices = tile(arange(6), 3)
values = ones(18)
sm = SparseMatrix(6, 3, [0, 6, 12, 18], indices, values)
self.assertTrue(
repr(sm), "SparseMatrix(6, 3, [0, 6, 12, 18], \
[0, 1, 2, 3, 4, 5, 0, 1, ..., 4, 5, 0, 1, 2, 3, 4, 5], \
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, ..., \
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], False)")
self.assertTrue(
str(sm),
"6 X 3 CSCMatrix\n\
(0,0) 1.0\n(1,0) 1.0\n(2,0) 1.0\n(3,0) 1.0\n(4,0) 1.0\n(5,0) 1.0\n\
(0,1) 1.0\n(1,1) 1.0\n(2,1) 1.0\n(3,1) 1.0\n(4,1) 1.0\n(5,1) 1.0\n\
(0,2) 1.0\n(1,2) 1.0\n(2,2) 1.0\n(3,2) 1.0\n..\n..")
sm = SparseMatrix(1, 18, zeros(19), [], [])
self.assertTrue(
repr(sm),
'SparseMatrix(1, 18, \
[0, 0, 0, 0, 0, 0, 0, 0, ..., 0, 0, 0, 0, 0, 0, 0, 0], [], [], False)')
def test_sparse_matrix(self):
# Test sparse matrix creation.
sm1 = SparseMatrix(
3, 4, [0, 2, 2, 4, 4], [1, 2, 1, 2], [1.0, 2.0, 4.0, 5.0])
self.assertEqual(sm1.numRows, 3)
self.assertEqual(sm1.numCols, 4)
self.assertEqual(sm1.colPtrs.tolist(), [0, 2, 2, 4, 4])
self.assertEqual(sm1.rowIndices.tolist(), [1, 2, 1, 2])
self.assertEqual(sm1.values.tolist(), [1.0, 2.0, 4.0, 5.0])
self.assertTrue(
repr(sm1),
'SparseMatrix(3, 4, [0, 2, 2, 4, 4], [1, 2, 1, 2], [1.0, 2.0, 4.0, 5.0], False)')
# Test indexing
expected = [
[0, 0, 0, 0],
[1, 0, 4, 0],
[2, 0, 5, 0]]
for i in range(3):
for j in range(4):
self.assertEqual(expected[i][j], sm1[i, j])
self.assertTrue(array_equal(sm1.toArray(), expected))
for i, j in [(-1, 1), (4, 3), (3, 5)]:
self.assertRaises(IndexError, sm1.__getitem__, (i, j))
# Test conversion to dense and sparse.
smnew = sm1.toDense().toSparse()
self.assertEqual(sm1.numRows, smnew.numRows)
self.assertEqual(sm1.numCols, smnew.numCols)
self.assertTrue(array_equal(sm1.colPtrs, smnew.colPtrs))
self.assertTrue(array_equal(sm1.rowIndices, smnew.rowIndices))
self.assertTrue(array_equal(sm1.values, smnew.values))
sm1t = SparseMatrix(
3, 4, [0, 2, 3, 5], [0, 1, 2, 0, 2], [3.0, 2.0, 4.0, 9.0, 8.0],
isTransposed=True)
self.assertEqual(sm1t.numRows, 3)
self.assertEqual(sm1t.numCols, 4)
self.assertEqual(sm1t.colPtrs.tolist(), [0, 2, 3, 5])
self.assertEqual(sm1t.rowIndices.tolist(), [0, 1, 2, 0, 2])
self.assertEqual(sm1t.values.tolist(), [3.0, 2.0, 4.0, 9.0, 8.0])
expected = [
[3, 2, 0, 0],
[0, 0, 4, 0],
[9, 0, 8, 0]]
for i in range(3):
for j in range(4):
self.assertEqual(expected[i][j], sm1t[i, j])
self.assertTrue(array_equal(sm1t.toArray(), expected))
def test_dense_matrix_is_transposed(self):
mat1 = DenseMatrix(3, 2, [0, 4, 1, 6, 3, 9], isTransposed=True)
mat = DenseMatrix(3, 2, [0, 1, 3, 4, 6, 9])
self.assertEqual(mat1, mat)
expected = [[0, 4], [1, 6], [3, 9]]
for i in range(3):
for j in range(2):
self.assertEqual(mat1[i, j], expected[i][j])
self.assertTrue(array_equal(mat1.toArray(), expected))
sm = mat1.toSparse()
self.assertTrue(array_equal(sm.rowIndices, [1, 2, 0, 1, 2]))
self.assertTrue(array_equal(sm.colPtrs, [0, 2, 5]))
self.assertTrue(array_equal(sm.values, [1, 3, 4, 6, 9]))
def test_norms(self):
a = DenseVector([0, 2, 3, -1])
self.assertAlmostEqual(a.norm(2), 3.742, 3)
self.assertTrue(a.norm(1), 6)
self.assertTrue(a.norm(inf), 3)
a = SparseVector(4, [0, 2], [3, -4])
self.assertAlmostEqual(a.norm(2), 5)
self.assertTrue(a.norm(1), 7)
self.assertTrue(a.norm(inf), 4)
tmp = SparseVector(4, [0, 2], [3, 0])
self.assertEqual(tmp.numNonzeros(), 1)
class VectorUDTTests(MLlibTestCase):
dv0 = DenseVector([])
dv1 = DenseVector([1.0, 2.0])
sv0 = SparseVector(2, [], [])
sv1 = SparseVector(2, [1], [2.0])
udt = VectorUDT()
def test_json_schema(self):
self.assertEqual(VectorUDT.fromJson(self.udt.jsonValue()), self.udt)
def test_serialization(self):
for v in [self.dv0, self.dv1, self.sv0, self.sv1]:
self.assertEqual(v, self.udt.deserialize(self.udt.serialize(v)))
def test_infer_schema(self):
rdd = self.sc.parallelize([Row(label=1.0, features=self.dv1),
Row(label=0.0, features=self.sv1)])
df = rdd.toDF()
schema = df.schema
field = [f for f in schema.fields if f.name == "features"][0]
self.assertEqual(field.dataType, self.udt)
vectors = df.rdd.map(lambda p: p.features).collect()
self.assertEqual(len(vectors), 2)
for v in vectors:
if isinstance(v, SparseVector):
self.assertEqual(v, self.sv1)
elif isinstance(v, DenseVector):
self.assertEqual(v, self.dv1)
else:
raise TypeError("expecting a vector but got %r of type %r" % (v, type(v)))
class MatrixUDTTests(MLlibTestCase):
dm1 = DenseMatrix(3, 2, [0, 1, 4, 5, 9, 10])
dm2 = DenseMatrix(3, 2, [0, 1, 4, 5, 9, 10], isTransposed=True)
sm1 = SparseMatrix(1, 1, [0, 1], [0], [2.0])
sm2 = SparseMatrix(2, 1, [0, 0, 1], [0], [5.0], isTransposed=True)
udt = MatrixUDT()
def test_json_schema(self):
self.assertEqual(MatrixUDT.fromJson(self.udt.jsonValue()), self.udt)
def test_serialization(self):
for m in [self.dm1, self.dm2, self.sm1, self.sm2]:
self.assertEqual(m, self.udt.deserialize(self.udt.serialize(m)))
def test_infer_schema(self):
rdd = self.sc.parallelize([("dense", self.dm1), ("sparse", self.sm1)])
df = rdd.toDF()
schema = df.schema
self.assertTrue(schema.fields[1].dataType, self.udt)
matrices = df.rdd.map(lambda x: x._2).collect()
self.assertEqual(len(matrices), 2)
for m in matrices:
if isinstance(m, DenseMatrix):
self.assertTrue(m, self.dm1)
elif isinstance(m, SparseMatrix):
self.assertTrue(m, self.sm1)
else:
raise ValueError("Expected a matrix but got type %r" % type(m))
class WrapperTests(MLlibTestCase):
def test_new_java_array(self):
# test array of strings
str_list = ["a", "b", "c"]
java_class = self.sc._gateway.jvm.java.lang.String
java_array = JavaWrapper._new_java_array(str_list, java_class)
self.assertEqual(_java2py(self.sc, java_array), str_list)
# test array of integers
int_list = [1, 2, 3]
java_class = self.sc._gateway.jvm.java.lang.Integer
java_array = JavaWrapper._new_java_array(int_list, java_class)
self.assertEqual(_java2py(self.sc, java_array), int_list)
# test array of floats
float_list = [0.1, 0.2, 0.3]
java_class = self.sc._gateway.jvm.java.lang.Double
java_array = JavaWrapper._new_java_array(float_list, java_class)
self.assertEqual(_java2py(self.sc, java_array), float_list)
# test array of bools
bool_list = [False, True, True]
java_class = self.sc._gateway.jvm.java.lang.Boolean
java_array = JavaWrapper._new_java_array(bool_list, java_class)
self.assertEqual(_java2py(self.sc, java_array), bool_list)
# test array of Java DenseVectors
v1 = DenseVector([0.0, 1.0])
v2 = DenseVector([1.0, 0.0])
vec_java_list = [_py2java(self.sc, v1), _py2java(self.sc, v2)]
java_class = self.sc._gateway.jvm.org.apache.spark.ml.linalg.DenseVector
java_array = JavaWrapper._new_java_array(vec_java_list, java_class)
self.assertEqual(_java2py(self.sc, java_array), [v1, v2])
# test empty array
java_class = self.sc._gateway.jvm.java.lang.Integer
java_array = JavaWrapper._new_java_array([], java_class)
self.assertEqual(_java2py(self.sc, java_array), [])
class ChiSquareTestTests(SparkSessionTestCase):
def test_chisquaretest(self):
data = [[0, Vectors.dense([0, 1, 2])],
[1, Vectors.dense([1, 1, 1])],
[2, Vectors.dense([2, 1, 0])]]
df = self.spark.createDataFrame(data, ['label', 'feat'])
res = ChiSquareTest.test(df, 'feat', 'label')
# This line is hitting the collect bug described in #17218, commented for now.
# pValues = res.select("degreesOfFreedom").collect())
self.assertIsInstance(res, DataFrame)
fieldNames = set(field.name for field in res.schema.fields)
expectedFields = ["pValues", "degreesOfFreedom", "statistics"]
self.assertTrue(all(field in fieldNames for field in expectedFields))
class UnaryTransformerTests(SparkSessionTestCase):
def test_unary_transformer_validate_input_type(self):
shiftVal = 3
transformer = MockUnaryTransformer(shiftVal=shiftVal)\
.setInputCol("input").setOutputCol("output")
# should not raise any errors
transformer.validateInputType(DoubleType())
with self.assertRaises(TypeError):
# passing the wrong input type should raise an error
transformer.validateInputType(IntegerType())
def test_unary_transformer_transform(self):
shiftVal = 3
transformer = MockUnaryTransformer(shiftVal=shiftVal)\
.setInputCol("input").setOutputCol("output")
df = self.spark.range(0, 10).toDF('input')
df = df.withColumn("input", df.input.cast(dataType="double"))
transformed_df = transformer.transform(df)
results = transformed_df.select("input", "output").collect()
for res in results:
self.assertEqual(res.input + shiftVal, res.output)
if __name__ == "__main__":
from pyspark.ml.tests import *
if xmlrunner:
unittest.main(testRunner=xmlrunner.XMLTestRunner(output='target/test-reports'))
else:
unittest.main()
|
xflin/spark
|
python/pyspark/ml/tests.py
|
Python
|
apache-2.0
| 89,730
|
[
"Gaussian"
] |
45849ef59db2cd33b57b73a95f62d491acf9189712de7c269d70b740af1727d4
|
''' DIRAC.ResourceStatusSystem.Agent package
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = '$Id$'
|
yujikato/DIRAC
|
src/DIRAC/ResourceStatusSystem/Agent/__init__.py
|
Python
|
gpl-3.0
| 179
|
[
"DIRAC"
] |
400076fdeccf404657dac96f3b2a3831d8ae89a8596a5c8fc04b677844a7a415
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This module is also sponsored by E.T.A.I. (www.etai.fr)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: vmware_guest
short_description: Manages virtual machines in vCenter
description: >
This module can be used to create new virtual machines from templates or other virtual machines,
manage power state of virtual machine such as power on, power off, suspend, shutdown, reboot, restart etc.,
modify various virtual machine components like network, disk, customization etc.,
rename a virtual machine and remove a virtual machine with associated components.
version_added: '2.2'
author:
- Loic Blot (@nerzhul) <loic.blot@unix-experience.fr>
- Philippe Dellaert (@pdellaert) <philippe@dellaert.org>
- Abhijeet Kasurde (@Akasurde) <akasurde@redhat.com>
requirements:
- python >= 2.6
- PyVmomi
notes:
- Please make sure the user used for vmware_guest should have correct level of privileges.
- For example, following is the list of minimum privileges required by users to create virtual machines.
- " DataStore > Allocate Space"
- " Virtual Machine > Configuration > Add New Disk"
- " Virtual Machine > Configuration > Add or Remove Device"
- " Virtual Machine > Inventory > Create New"
- " Network > Assign Network"
- " Resource > Assign Virtual Machine to Resource Pool"
- "Module may require additional privileges as well, which may be required for gathering facts - e.g. ESXi configurations."
- Tested on vSphere 5.5, 6.0 and 6.5
- "For additional information please visit Ansible VMware community wiki - U(https://github.com/ansible/community/wiki/VMware)."
options:
state:
description:
- Specify state of the virtual machine be in.
- 'If C(state) is set to C(present) and virtual machine exists, ensure the virtual machine
configurations conforms to task arguments.'
- 'If C(state) is set to C(absent) and virtual machine exists, then the specified virtual machine
is removed with its associated components.'
- 'If C(state) is set to one of the following C(poweredon), C(poweredoff), C(present), C(restarted), C(suspended)
and virtual machine does not exists, then virtual machine is deployed with given parameters.'
- 'If C(state) is set to C(poweredon) and virtual machine exists with powerstate other than powered on,
then the specified virtual machine is powered on.'
- 'If C(state) is set to C(poweredoff) and virtual machine exists with powerstate other than powered off,
then the specified virtual machine is powered off.'
- 'If C(state) is set to C(restarted) and virtual machine exists, then the virtual machine is restarted.'
- 'If C(state) is set to C(suspended) and virtual machine exists, then the virtual machine is set to suspended mode.'
- 'If C(state) is set to C(shutdownguest) and virtual machine exists, then the virtual machine is shutdown.'
- 'If C(state) is set to C(rebootguest) and virtual machine exists, then the virtual machine is rebooted.'
default: present
choices: [ present, absent, poweredon, poweredoff, restarted, suspended, shutdownguest, rebootguest ]
name:
description:
- Name of the virtual machine to work with.
- Virtual machine names in vCenter are not necessarily unique, which may be problematic, see C(name_match).
- 'If multiple virtual machines with same name exists, then C(folder) is required parameter to
identify uniqueness of the virtual machine.'
- This parameter is required, if C(state) is set to C(poweredon), C(poweredoff), C(present), C(restarted), C(suspended)
and virtual machine does not exists.
- This parameter is case sensitive.
required: yes
name_match:
description:
- If multiple virtual machines matching the name, use the first or last found.
default: 'first'
choices: [ first, last ]
uuid:
description:
- UUID of the virtual machine to manage if known, this is VMware's unique identifier.
- This is required if C(name) is not supplied.
- If virtual machine does not exists, then this parameter is ignored.
- Please note that a supplied UUID will be ignored on virtual machine creation, as VMware creates the UUID internally.
template:
description:
- Template or existing virtual machine used to create new virtual machine.
- If this value is not set, virtual machine is created without using a template.
- If the virtual machine already exists, this parameter will be ignored.
- This parameter is case sensitive.
aliases: [ 'template_src' ]
is_template:
description:
- Flag the instance as a template.
- This will mark the given virtual machine as template.
default: 'no'
type: bool
version_added: '2.3'
folder:
description:
- Destination folder, absolute path to find an existing guest or create the new guest.
- The folder should include the datacenter. ESX's datacenter is ha-datacenter.
- This parameter is case sensitive.
- This parameter is required, while deploying new virtual machine. version_added 2.5.
- 'If multiple machines are found with same name, this parameter is used to identify
uniqueness of the virtual machine. version_added 2.5'
- 'Examples:'
- ' folder: /ha-datacenter/vm'
- ' folder: ha-datacenter/vm'
- ' folder: /datacenter1/vm'
- ' folder: datacenter1/vm'
- ' folder: /datacenter1/vm/folder1'
- ' folder: datacenter1/vm/folder1'
- ' folder: /folder1/datacenter1/vm'
- ' folder: folder1/datacenter1/vm'
- ' folder: /folder1/datacenter1/vm/folder2'
hardware:
description:
- Manage virtual machine's hardware attributes.
- All parameters case sensitive.
- 'Valid attributes are:'
- ' - C(hotadd_cpu) (boolean): Allow virtual CPUs to be added while the virtual machine is running.'
- ' - C(hotremove_cpu) (boolean): Allow virtual CPUs to be removed while the virtual machine is running.
version_added: 2.5'
- ' - C(hotadd_memory) (boolean): Allow memory to be added while the virtual machine is running.'
- ' - C(memory_mb) (integer): Amount of memory in MB.'
- ' - C(nested_virt) (bool): Enable nested virtualization. version_added: 2.5'
- ' - C(num_cpus) (integer): Number of CPUs.'
- ' - C(num_cpu_cores_per_socket) (integer): Number of Cores Per Socket. Value should be multiple of C(num_cpus).'
- ' - C(scsi) (string): Valid values are C(buslogic), C(lsilogic), C(lsilogicsas) and C(paravirtual) (default).'
- ' - C(memory_reservation) (integer): Amount of memory in MB to set resource limits for memory. version_added: 2.5'
- " - C(memory_reservation_lock) (boolean): If set true, memory resource reservation for the virtual machine
will always be equal to the virtual machine's memory size. version_added: 2.5"
- ' - C(max_connections) (integer): Maximum number of active remote display connections for the virtual machines.
version_added: 2.5.'
- ' - C(mem_limit) (integer): The memory utilization of a virtual machine will not exceed this limit. Unit is MB.
version_added: 2.5'
- ' - C(mem_reservation) (integer): The amount of memory resource that is guaranteed available to the virtual
machine. Unit is MB. version_added: 2.5'
- ' - C(cpu_limit) (integer): The CPU utilization of a virtual machine will not exceed this limit. Unit is MHz.
version_added: 2.5'
- ' - C(cpu_reservation) (integer): The amount of CPU resource that is guaranteed available to the virtual machine.
Unit is MHz. version_added: 2.5'
- ' - C(version) (integer): The Virtual machine hardware versions. Default is 10 (ESXi 5.5 and onwards).
Please check VMware documentation for correct virtual machine hardware version.
Incorrect hardware version may lead to failure in deployment. If hardware version is already equal to the given
version then no action is taken. version_added: 2.6'
guest_id:
description:
- Set the guest ID.
- This parameter is case sensitive.
- 'Examples:'
- " virtual machine with RHEL7 64 bit, will be 'rhel7_64Guest'"
- " virtual machine with CensOS 64 bit, will be 'centos64Guest'"
- " virtual machine with Ubuntu 64 bit, will be 'ubuntu64Guest'"
- This field is required when creating a virtual machine.
- >
Valid values are referenced here:
U(http://pubs.vmware.com/vsphere-6-5/topic/com.vmware.wssdk.apiref.doc/vim.vm.GuestOsDescriptor.GuestOsIdentifier.html)
version_added: '2.3'
disk:
description:
- A list of disks to add.
- This parameter is case sensitive.
- Resizing disks is not supported.
- Removing existing disks of the virtual machine is not supported.
- 'Valid attributes are:'
- ' - C(size_[tb,gb,mb,kb]) (integer): Disk storage size in specified unit.'
- ' - C(type) (string): Valid values are:'
- ' - C(thin) thin disk'
- ' - C(eagerzeroedthick) eagerzeroedthick disk, added in version 2.5'
- ' Default: C(None) thick disk, no eagerzero.'
- ' - C(datastore) (string): Datastore to use for the disk. If C(autoselect_datastore) is enabled, filter datastore selection.'
- ' - C(autoselect_datastore) (bool): select the less used datastore. Specify only if C(datastore) is not specified.'
cdrom:
description:
- A CD-ROM configuration for the virtual machine.
- 'Valid attributes are:'
- ' - C(type) (string): The type of CD-ROM, valid options are C(none), C(client) or C(iso). With C(none) the CD-ROM will be disconnected but present.'
- ' - C(iso_path) (string): The datastore path to the ISO file to use, in the form of C([datastore1] path/to/file.iso). Required if type is set C(iso).'
version_added: '2.5'
resource_pool:
description:
- Use the given resource pool for virtual machine operation.
- This parameter is case sensitive.
- Resource pool should be child of the selected host parent.
version_added: '2.3'
wait_for_ip_address:
description:
- Wait until vCenter detects an IP address for the virtual machine.
- This requires vmware-tools (vmtoolsd) to properly work after creation.
- "vmware-tools needs to be installed on the given virtual machine in order to work with this parameter."
default: 'no'
type: bool
state_change_timeout:
description:
- If the C(state) is set to C(shutdownguest), by default the module will return immediately after sending the shutdown signal.
- If this argument is set to a positive integer, the module will instead wait for the virtual machine to reach the poweredoff state.
- The value sets a timeout in seconds for the module to wait for the state change.
default: 0
version_added: '2.6'
snapshot_src:
description:
- Name of the existing snapshot to use to create a clone of a virtual machine.
- This parameter is case sensitive.
version_added: '2.4'
linked_clone:
description:
- Whether to create a linked clone from the snapshot specified.
default: 'no'
type: bool
version_added: '2.4'
force:
description:
- Ignore warnings and complete the actions.
- This parameter is useful while removing virtual machine which is powered on state.
- 'This module reflects the VMware vCenter API and UI workflow, as such, in some cases the `force` flag will
be mandatory to perform the action to ensure you are certain the action has to be taken, no matter what the consequence.
This is specifically the case for removing a powered on the virtual machine when C(state) is set to C(absent).'
default: 'no'
type: bool
datacenter:
description:
- Destination datacenter for the deploy operation.
- This parameter is case sensitive.
default: ha-datacenter
cluster:
description:
- The cluster name where the virtual machine will run.
- This is a required parameter, if C(esxi_hostname) is not set.
- C(esxi_hostname) and C(cluster) are mutually exclusive parameters.
- This parameter is case sensitive.
version_added: '2.3'
esxi_hostname:
description:
- The ESXi hostname where the virtual machine will run.
- This is a required parameter, if C(cluster) is not set.
- C(esxi_hostname) and C(cluster) are mutually exclusive parameters.
- This parameter is case sensitive.
annotation:
description:
- A note or annotation to include in the virtual machine.
version_added: '2.3'
customvalues:
description:
- Define a list of custom values to set on virtual machine.
- A custom value object takes two fields C(key) and C(value).
- Incorrect key and values will be ignored.
version_added: '2.3'
networks:
description:
- A list of networks (in the order of the NICs).
- Removing NICs is not allowed, while reconfiguring the virtual machine.
- All parameters and VMware object names are case sensetive.
- 'One of the below parameters is required per entry:'
- ' - C(name) (string): Name of the portgroup or distributed virtual portgroup for this interface.
When specifying distributed virtual portgroup make sure given C(esxi_hostname) or C(cluster) is associated with it.'
- ' - C(vlan) (integer): VLAN number for this interface.'
- 'Optional parameters per entry (used for virtual hardware):'
- ' - C(device_type) (string): Virtual network device (one of C(e1000), C(e1000e), C(pcnet32), C(vmxnet2), C(vmxnet3) (default), C(sriov)).'
- ' - C(mac) (string): Customize MAC address.'
- 'Optional parameters per entry (used for OS customization):'
- ' - C(type) (string): Type of IP assignment (either C(dhcp) or C(static)). C(dhcp) is default.'
- ' - C(ip) (string): Static IP address (implies C(type: static)).'
- ' - C(netmask) (string): Static netmask required for C(ip).'
- ' - C(gateway) (string): Static gateway.'
- ' - C(dns_servers) (string): DNS servers for this network interface (Windows).'
- ' - C(domain) (string): Domain name for this network interface (Windows).'
- ' - C(wake_on_lan) (bool): Indicates if wake-on-LAN is enabled on this virtual network adapter. version_added: 2.5'
- ' - C(start_connected) (bool): Indicates that virtual network adapter starts with associated virtual machine powers on. version_added: 2.5'
- ' - C(allow_guest_control) (bool): Enables guest control over whether the connectable device is connected. version_added: 2.5'
version_added: '2.3'
customization:
description:
- Parameters for OS customization when cloning from the template or the virtual machine.
- Not all operating systems are supported for customization with respective vCenter version,
please check VMware documentation for respective OS customization.
- For supported customization operating system matrix, (see U(http://partnerweb.vmware.com/programs/guestOS/guest-os-customization-matrix.pdf))
- All parameters and VMware object names are case sensitive.
- Linux based OSes requires Perl package to be installed for OS customizations.
- 'Common parameters (Linux/Windows):'
- ' - C(dns_servers) (list): List of DNS servers to configure.'
- ' - C(dns_suffix) (list): List of domain suffixes, also known as DNS search path (default: C(domain) parameter).'
- ' - C(domain) (string): DNS domain name to use.'
- ' - C(hostname) (string): Computer hostname (default: shorted C(name) parameter). Allowed characters are alphanumeric (uppercase and lowercase)
and minus, rest of the characters are dropped as per RFC 952.'
- 'Parameters related to Windows customization:'
- ' - C(autologon) (bool): Auto logon after virtual machine customization (default: False).'
- ' - C(autologoncount) (int): Number of autologon after reboot (default: 1).'
- ' - C(domainadmin) (string): User used to join in AD domain (mandatory with C(joindomain)).'
- ' - C(domainadminpassword) (string): Password used to join in AD domain (mandatory with C(joindomain)).'
- ' - C(fullname) (string): Server owner name (default: Administrator).'
- ' - C(joindomain) (string): AD domain to join (Not compatible with C(joinworkgroup)).'
- ' - C(joinworkgroup) (string): Workgroup to join (Not compatible with C(joindomain), default: WORKGROUP).'
- ' - C(orgname) (string): Organisation name (default: ACME).'
- ' - C(password) (string): Local administrator password.'
- ' - C(productid) (string): Product ID.'
- ' - C(runonce) (list): List of commands to run at first user logon.'
- ' - C(timezone) (int): Timezone (See U(https://msdn.microsoft.com/en-us/library/ms912391.aspx)).'
version_added: '2.3'
vapp_properties:
description:
- A list of vApp properties
- 'For full list of attributes and types refer to: U(https://github.com/vmware/pyvmomi/blob/master/docs/vim/vApp/PropertyInfo.rst)'
- 'Basic attributes are:'
- ' - C(id) (string): Property id - required.'
- ' - C(value) (string): Property value.'
- ' - C(type) (string): Value type, string type by default.'
- ' - C(operation): C(remove): This attribute is required only when removing properties.'
version_added: '2.6'
customization_spec:
description:
- Unique name identifying the requested customization specification.
- This parameter is case sensitive.
- If set, then overrides C(customization) parameter values.
version_added: '2.6'
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- name: Create a virtual machine on given ESXi hostname
vmware_guest:
hostname: "{{ vcenter_ip }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
validate_certs: False
folder: /DC1/vm/
name: test_vm_0001
state: poweredon
guest_id: centos64Guest
# This is hostname of particular ESXi server on which user wants VM to be deployed
esxi_hostname: "{{ esxi_hostname }}"
disk:
- size_gb: 10
type: thin
datastore: datastore1
hardware:
memory_mb: 512
num_cpus: 4
scsi: paravirtual
networks:
- name: VM Network
mac: aa:bb:dd:aa:00:14
ip: 10.10.10.100
netmask: 255.255.255.0
device_type: vmxnet3
wait_for_ip_address: yes
delegate_to: localhost
register: deploy_vm
- name: Create a virtual machine from a template
vmware_guest:
hostname: "{{ vcenter_ip }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
validate_certs: False
folder: /testvms
name: testvm_2
state: poweredon
template: template_el7
disk:
- size_gb: 10
type: thin
datastore: g73_datastore
hardware:
memory_mb: 512
num_cpus: 6
num_cpu_cores_per_socket: 3
scsi: paravirtual
memory_reservation: 512
memory_reservation_lock: True
mem_limit: 8096
mem_reservation: 4096
cpu_limit: 8096
cpu_reservation: 4096
max_connections: 5
hotadd_cpu: True
hotremove_cpu: True
hotadd_memory: False
version: 12 # Hardware version of virtual machine
cdrom:
type: iso
iso_path: "[datastore1] livecd.iso"
networks:
- name: VM Network
mac: aa:bb:dd:aa:00:14
wait_for_ip_address: yes
delegate_to: localhost
register: deploy
- name: Clone a virtual machine from Template and customize
vmware_guest:
hostname: "{{ vcenter_ip }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
validate_certs: False
datacenter: datacenter1
cluster: cluster
name: testvm-2
template: template_windows
networks:
- name: VM Network
ip: 192.168.1.100
netmask: 255.255.255.0
gateway: 192.168.1.1
mac: aa:bb:dd:aa:00:14
domain: my_domain
dns_servers:
- 192.168.1.1
- 192.168.1.2
- vlan: 1234
type: dhcp
customization:
autologon: yes
dns_servers:
- 192.168.1.1
- 192.168.1.2
domain: my_domain
password: new_vm_password
runonce:
- powershell.exe -ExecutionPolicy Unrestricted -File C:\Windows\Temp\ConfigureRemotingForAnsible.ps1 -ForceNewSSLCert -EnableCredSSP
delegate_to: localhost
- name: Rename a virtual machine (requires the virtual machine's uuid)
vmware_guest:
hostname: "{{ vcenter_ip }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
validate_certs: False
uuid: "{{ vm_uuid }}"
name: new_name
state: present
delegate_to: localhost
- name: Remove a virtual machine by uuid
vmware_guest:
hostname: "{{ vcenter_ip }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
validate_certs: False
uuid: "{{ vm_uuid }}"
state: absent
delegate_to: localhost
- name: Manipulate vApp properties
vmware_guest:
hostname: "{{ vcenter_ip }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
validate_certs: False
name: vm_name
state: present
vapp_properties:
- id: remoteIP
category: Backup
label: Backup server IP
type: string
value: 10.10.10.1
- id: old_property
operation: remove
- name: Set powerstate of a virtual machine to poweroff by using UUID
vmware_guest:
hostname: "{{ vcenter_ip }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
validate_certs: False
uuid: "{{ vm_uuid }}"
state: poweredoff
delegate_to: localhost
'''
RETURN = r'''
instance:
description: metadata about the new virtual machine
returned: always
type: dict
sample: None
'''
import re
import time
HAS_PYVMOMI = False
try:
import pyVmomi
from pyVmomi import vim, vmodl
HAS_PYVMOMI = True
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_text, to_native
from ansible.module_utils.vmware import (find_obj, gather_vm_facts, get_all_objs,
compile_folder_path_for_object, serialize_spec,
vmware_argument_spec, set_vm_power_state, PyVmomi)
class PyVmomiDeviceHelper(object):
""" This class is a helper to create easily VMWare Objects for PyVmomiHelper """
def __init__(self, module):
self.module = module
self.next_disk_unit_number = 0
@staticmethod
def create_scsi_controller(scsi_type):
scsi_ctl = vim.vm.device.VirtualDeviceSpec()
scsi_ctl.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
if scsi_type == 'lsilogic':
scsi_ctl.device = vim.vm.device.VirtualLsiLogicController()
elif scsi_type == 'paravirtual':
scsi_ctl.device = vim.vm.device.ParaVirtualSCSIController()
elif scsi_type == 'buslogic':
scsi_ctl.device = vim.vm.device.VirtualBusLogicController()
elif scsi_type == 'lsilogicsas':
scsi_ctl.device = vim.vm.device.VirtualLsiLogicSASController()
scsi_ctl.device.deviceInfo = vim.Description()
scsi_ctl.device.slotInfo = vim.vm.device.VirtualDevice.PciBusSlotInfo()
scsi_ctl.device.slotInfo.pciSlotNumber = 16
scsi_ctl.device.controllerKey = 100
scsi_ctl.device.unitNumber = 3
scsi_ctl.device.busNumber = 0
scsi_ctl.device.hotAddRemove = True
scsi_ctl.device.sharedBus = 'noSharing'
scsi_ctl.device.scsiCtlrUnitNumber = 7
return scsi_ctl
@staticmethod
def is_scsi_controller(device):
return isinstance(device, vim.vm.device.VirtualLsiLogicController) or \
isinstance(device, vim.vm.device.ParaVirtualSCSIController) or \
isinstance(device, vim.vm.device.VirtualBusLogicController) or \
isinstance(device, vim.vm.device.VirtualLsiLogicSASController)
@staticmethod
def create_ide_controller():
ide_ctl = vim.vm.device.VirtualDeviceSpec()
ide_ctl.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
ide_ctl.device = vim.vm.device.VirtualIDEController()
ide_ctl.device.deviceInfo = vim.Description()
ide_ctl.device.busNumber = 0
return ide_ctl
@staticmethod
def create_cdrom(ide_ctl, cdrom_type, iso_path=None):
cdrom_spec = vim.vm.device.VirtualDeviceSpec()
cdrom_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
cdrom_spec.device = vim.vm.device.VirtualCdrom()
cdrom_spec.device.controllerKey = ide_ctl.device.key
cdrom_spec.device.key = -1
cdrom_spec.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
cdrom_spec.device.connectable.allowGuestControl = True
cdrom_spec.device.connectable.startConnected = (cdrom_type != "none")
if cdrom_type in ["none", "client"]:
cdrom_spec.device.backing = vim.vm.device.VirtualCdrom.RemotePassthroughBackingInfo()
elif cdrom_type == "iso":
cdrom_spec.device.backing = vim.vm.device.VirtualCdrom.IsoBackingInfo(fileName=iso_path)
return cdrom_spec
@staticmethod
def is_equal_cdrom(vm_obj, cdrom_device, cdrom_type, iso_path):
if cdrom_type == "none":
return (isinstance(cdrom_device.backing, vim.vm.device.VirtualCdrom.RemotePassthroughBackingInfo) and
cdrom_device.connectable.allowGuestControl and
not cdrom_device.connectable.startConnected and
(vm_obj.runtime.powerState != vim.VirtualMachinePowerState.poweredOn or not cdrom_device.connectable.connected))
elif cdrom_type == "client":
return (isinstance(cdrom_device.backing, vim.vm.device.VirtualCdrom.RemotePassthroughBackingInfo) and
cdrom_device.connectable.allowGuestControl and
cdrom_device.connectable.startConnected and
(vm_obj.runtime.powerState != vim.VirtualMachinePowerState.poweredOn or cdrom_device.connectable.connected))
elif cdrom_type == "iso":
return (isinstance(cdrom_device.backing, vim.vm.device.VirtualCdrom.IsoBackingInfo) and
cdrom_device.backing.fileName == iso_path and
cdrom_device.connectable.allowGuestControl and
cdrom_device.connectable.startConnected and
(vm_obj.runtime.powerState != vim.VirtualMachinePowerState.poweredOn or cdrom_device.connectable.connected))
def create_scsi_disk(self, scsi_ctl, disk_index=None):
diskspec = vim.vm.device.VirtualDeviceSpec()
diskspec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
diskspec.fileOperation = vim.vm.device.VirtualDeviceSpec.FileOperation.create
diskspec.device = vim.vm.device.VirtualDisk()
diskspec.device.backing = vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
diskspec.device.backing.diskMode = 'persistent'
diskspec.device.controllerKey = scsi_ctl.device.key
if self.next_disk_unit_number == 7:
raise AssertionError()
if disk_index == 7:
raise AssertionError()
"""
Configure disk unit number.
"""
if disk_index is not None:
diskspec.device.unitNumber = disk_index
self.next_disk_unit_number = disk_index + 1
else:
diskspec.device.unitNumber = self.next_disk_unit_number
self.next_disk_unit_number += 1
# unit number 7 is reserved to SCSI controller, increase next index
if self.next_disk_unit_number == 7:
self.next_disk_unit_number += 1
return diskspec
def get_device(self, device_type, name):
nic_dict = dict(pcnet32=vim.vm.device.VirtualPCNet32(),
vmxnet2=vim.vm.device.VirtualVmxnet2(),
vmxnet3=vim.vm.device.VirtualVmxnet3(),
e1000=vim.vm.device.VirtualE1000(),
e1000e=vim.vm.device.VirtualE1000e(),
sriov=vim.vm.device.VirtualSriovEthernetCard(),
)
if device_type in nic_dict:
return nic_dict[device_type]
else:
self.module.fail_json(msg='Invalid device_type "%s"'
' for network "%s"' % (device_type, name))
def create_nic(self, device_type, device_label, device_infos):
nic = vim.vm.device.VirtualDeviceSpec()
nic.device = self.get_device(device_type, device_infos['name'])
nic.device.wakeOnLanEnabled = bool(device_infos.get('wake_on_lan', True))
nic.device.deviceInfo = vim.Description()
nic.device.deviceInfo.label = device_label
nic.device.deviceInfo.summary = device_infos['name']
nic.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
nic.device.connectable.startConnected = bool(device_infos.get('start_connected', True))
nic.device.connectable.allowGuestControl = bool(device_infos.get('allow_guest_control', True))
nic.device.connectable.connected = True
if 'mac' in device_infos and self.is_valid_mac_addr(device_infos['mac']):
nic.device.addressType = 'manual'
nic.device.macAddress = device_infos['mac']
else:
nic.device.addressType = 'generated'
return nic
@staticmethod
def is_valid_mac_addr(mac_addr):
"""
Function to validate MAC address for given string
Args:
mac_addr: string to validate as MAC address
Returns: (Boolean) True if string is valid MAC address, otherwise False
"""
mac_addr_regex = re.compile('[0-9a-f]{2}([-:])[0-9a-f]{2}(\\1[0-9a-f]{2}){4}$')
return bool(mac_addr_regex.match(mac_addr))
class PyVmomiCache(object):
""" This class caches references to objects which are requested multiples times but not modified """
def __init__(self, content, dc_name=None):
self.content = content
self.dc_name = dc_name
self.networks = {}
self.clusters = {}
self.esx_hosts = {}
self.parent_datacenters = {}
def find_obj(self, content, types, name, confine_to_datacenter=True):
""" Wrapper around find_obj to set datacenter context """
result = find_obj(content, types, name)
if result and confine_to_datacenter:
if self.get_parent_datacenter(result).name != self.dc_name:
result = None
objects = self.get_all_objs(content, types, confine_to_datacenter=True)
for obj in objects:
if name is None or obj.name == name:
return obj
return result
def get_all_objs(self, content, types, confine_to_datacenter=True):
""" Wrapper around get_all_objs to set datacenter context """
objects = get_all_objs(content, types)
if confine_to_datacenter:
if hasattr(objects, 'items'):
# resource pools come back as a dictionary
# make a copy
tmpobjs = objects.copy()
for k, v in objects.items():
parent_dc = self.get_parent_datacenter(k)
if parent_dc.name != self.dc_name:
tmpobjs.pop(k, None)
objects = tmpobjs
else:
# everything else should be a list
objects = [x for x in objects if self.get_parent_datacenter(x).name == self.dc_name]
return objects
def get_network(self, network):
if network not in self.networks:
self.networks[network] = self.find_obj(self.content, [vim.Network], network)
return self.networks[network]
def get_cluster(self, cluster):
if cluster not in self.clusters:
self.clusters[cluster] = self.find_obj(self.content, [vim.ClusterComputeResource], cluster)
return self.clusters[cluster]
def get_esx_host(self, host):
if host not in self.esx_hosts:
self.esx_hosts[host] = self.find_obj(self.content, [vim.HostSystem], host)
return self.esx_hosts[host]
def get_parent_datacenter(self, obj):
""" Walk the parent tree to find the objects datacenter """
if isinstance(obj, vim.Datacenter):
return obj
if obj in self.parent_datacenters:
return self.parent_datacenters[obj]
datacenter = None
while True:
if not hasattr(obj, 'parent'):
break
obj = obj.parent
if isinstance(obj, vim.Datacenter):
datacenter = obj
break
self.parent_datacenters[obj] = datacenter
return datacenter
class PyVmomiHelper(PyVmomi):
def __init__(self, module):
super(PyVmomiHelper, self).__init__(module)
self.device_helper = PyVmomiDeviceHelper(self.module)
self.configspec = None
self.change_detected = False
self.customspec = None
self.cache = PyVmomiCache(self.content, dc_name=self.params['datacenter'])
def gather_facts(self, vm):
return gather_vm_facts(self.content, vm)
def remove_vm(self, vm):
# https://www.vmware.com/support/developer/converter-sdk/conv60_apireference/vim.ManagedEntity.html#destroy
if vm.summary.runtime.powerState.lower() == 'poweredon':
self.module.fail_json(msg="Virtual machine %s found in 'powered on' state, "
"please use 'force' parameter to remove or poweroff VM "
"and try removing VM again." % vm.name)
task = vm.Destroy()
self.wait_for_task(task)
if task.info.state == 'error':
return {'changed': False, 'failed': True, 'msg': task.info.error.msg}
else:
return {'changed': True, 'failed': False}
def configure_guestid(self, vm_obj, vm_creation=False):
# guest_id is not required when using templates
if self.params['template'] and not self.params['guest_id']:
return
# guest_id is only mandatory on VM creation
if vm_creation and self.params['guest_id'] is None:
self.module.fail_json(msg="guest_id attribute is mandatory for VM creation")
if self.params['guest_id'] and \
(vm_obj is None or self.params['guest_id'].lower() != vm_obj.summary.config.guestId.lower()):
self.change_detected = True
self.configspec.guestId = self.params['guest_id']
def configure_resource_alloc_info(self, vm_obj):
"""
Function to configure resource allocation information about virtual machine
:param vm_obj: VM object in case of reconfigure, None in case of deploy
:return: None
"""
rai_change_detected = False
memory_allocation = vim.ResourceAllocationInfo()
cpu_allocation = vim.ResourceAllocationInfo()
if 'hardware' in self.params:
if 'mem_limit' in self.params['hardware']:
mem_limit = None
try:
mem_limit = int(self.params['hardware'].get('mem_limit'))
except ValueError as e:
self.module.fail_json(msg="hardware.mem_limit attribute should be an integer value.")
memory_allocation.limit = mem_limit
if vm_obj is None or memory_allocation.limit != vm_obj.config.memoryAllocation.limit:
rai_change_detected = True
if 'mem_reservation' in self.params['hardware']:
mem_reservation = None
try:
mem_reservation = int(self.params['hardware'].get('mem_reservation'))
except ValueError as e:
self.module.fail_json(msg="hardware.mem_reservation should be an integer value.")
memory_allocation.reservation = mem_reservation
if vm_obj is None or \
memory_allocation.reservation != vm_obj.config.memoryAllocation.reservation:
rai_change_detected = True
if 'cpu_limit' in self.params['hardware']:
cpu_limit = None
try:
cpu_limit = int(self.params['hardware'].get('cpu_limit'))
except ValueError as e:
self.module.fail_json(msg="hardware.cpu_limit attribute should be an integer value.")
cpu_allocation.limit = cpu_limit
if vm_obj is None or cpu_allocation.limit != vm_obj.config.cpuAllocation.limit:
rai_change_detected = True
if 'cpu_reservation' in self.params['hardware']:
cpu_reservation = None
try:
cpu_reservation = int(self.params['hardware'].get('cpu_reservation'))
except ValueError as e:
self.module.fail_json(msg="hardware.cpu_reservation should be an integer value.")
cpu_allocation.reservation = cpu_reservation
if vm_obj is None or \
cpu_allocation.reservation != vm_obj.config.cpuAllocation.reservation:
rai_change_detected = True
if rai_change_detected:
self.configspec.memoryAllocation = memory_allocation
self.configspec.cpuAllocation = cpu_allocation
self.change_detected = True
def configure_cpu_and_memory(self, vm_obj, vm_creation=False):
# set cpu/memory/etc
if 'hardware' in self.params:
if 'num_cpus' in self.params['hardware']:
try:
num_cpus = int(self.params['hardware']['num_cpus'])
except ValueError as e:
self.module.fail_json(msg="hardware.num_cpus attribute should be an integer value.")
if 'num_cpu_cores_per_socket' in self.params['hardware']:
try:
num_cpu_cores_per_socket = int(self.params['hardware']['num_cpu_cores_per_socket'])
except ValueError as e:
self.module.fail_json(msg="hardware.num_cpu_cores_per_socket attribute "
"should be an integer value.")
if num_cpus % num_cpu_cores_per_socket != 0:
self.module.fail_json(msg="hardware.num_cpus attribute should be a multiple "
"of hardware.num_cpu_cores_per_socket")
self.configspec.numCoresPerSocket = num_cpu_cores_per_socket
if vm_obj is None or self.configspec.numCoresPerSocket != vm_obj.config.hardware.numCoresPerSocket:
self.change_detected = True
self.configspec.numCPUs = num_cpus
if vm_obj is None or self.configspec.numCPUs != vm_obj.config.hardware.numCPU:
self.change_detected = True
# num_cpu is mandatory for VM creation
elif vm_creation and not self.params['template']:
self.module.fail_json(msg="hardware.num_cpus attribute is mandatory for VM creation")
if 'memory_mb' in self.params['hardware']:
try:
self.configspec.memoryMB = int(self.params['hardware']['memory_mb'])
except ValueError:
self.module.fail_json(msg="Failed to parse hardware.memory_mb value."
" Please refer the documentation and provide"
" correct value.")
if vm_obj is None or self.configspec.memoryMB != vm_obj.config.hardware.memoryMB:
self.change_detected = True
# memory_mb is mandatory for VM creation
elif vm_creation and not self.params['template']:
self.module.fail_json(msg="hardware.memory_mb attribute is mandatory for VM creation")
if 'hotadd_memory' in self.params['hardware']:
self.configspec.memoryHotAddEnabled = bool(self.params['hardware']['hotadd_memory'])
if vm_obj is None or self.configspec.memoryHotAddEnabled != vm_obj.config.memoryHotAddEnabled:
self.change_detected = True
if 'hotadd_cpu' in self.params['hardware']:
self.configspec.cpuHotAddEnabled = bool(self.params['hardware']['hotadd_cpu'])
if vm_obj is None or self.configspec.cpuHotAddEnabled != vm_obj.config.cpuHotAddEnabled:
self.change_detected = True
if 'hotremove_cpu' in self.params['hardware']:
self.configspec.cpuHotRemoveEnabled = bool(self.params['hardware']['hotremove_cpu'])
if vm_obj is None or self.configspec.cpuHotRemoveEnabled != vm_obj.config.cpuHotRemoveEnabled:
self.change_detected = True
if 'memory_reservation' in self.params['hardware']:
memory_reservation_mb = 0
try:
memory_reservation_mb = int(self.params['hardware']['memory_reservation'])
except ValueError as e:
self.module.fail_json(msg="Failed to set memory_reservation value."
"Valid value for memory_reservation value in MB (integer): %s" % e)
mem_alloc = vim.ResourceAllocationInfo()
mem_alloc.reservation = memory_reservation_mb
self.configspec.memoryAllocation = mem_alloc
if vm_obj is None or self.configspec.memoryAllocation.reservation != vm_obj.config.memoryAllocation.reservation:
self.change_detected = True
if 'memory_reservation_lock' in self.params['hardware']:
self.configspec.memoryReservationLockedToMax = bool(self.params['hardware']['memory_reservation_lock'])
if vm_obj is None or self.configspec.memoryReservationLockedToMax != vm_obj.config.memoryReservationLockedToMax:
self.change_detected = True
def configure_cdrom(self, vm_obj):
# Configure the VM CD-ROM
if "cdrom" in self.params and self.params["cdrom"]:
if "type" not in self.params["cdrom"] or self.params["cdrom"]["type"] not in ["none", "client", "iso"]:
self.module.fail_json(msg="cdrom.type is mandatory")
if self.params["cdrom"]["type"] == "iso" and ("iso_path" not in self.params["cdrom"] or not self.params["cdrom"]["iso_path"]):
self.module.fail_json(msg="cdrom.iso_path is mandatory in case cdrom.type is iso")
if vm_obj and vm_obj.config.template:
# Changing CD-ROM settings on a template is not supported
return
cdrom_spec = None
cdrom_device = self.get_vm_cdrom_device(vm=vm_obj)
iso_path = self.params["cdrom"]["iso_path"] if "iso_path" in self.params["cdrom"] else None
if cdrom_device is None:
# Creating new CD-ROM
ide_device = self.get_vm_ide_device(vm=vm_obj)
if ide_device is None:
# Creating new IDE device
ide_device = self.device_helper.create_ide_controller()
self.change_detected = True
self.configspec.deviceChange.append(ide_device)
elif len(ide_device.device) > 3:
self.module.fail_json(msg="hardware.cdrom specified for a VM or template which already has 4 IDE devices of which none are a cdrom")
cdrom_spec = self.device_helper.create_cdrom(ide_ctl=ide_device, cdrom_type=self.params["cdrom"]["type"], iso_path=iso_path)
if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
cdrom_spec.device.connectable.connected = (self.params["cdrom"]["type"] != "none")
elif not self.device_helper.is_equal_cdrom(vm_obj=vm_obj, cdrom_device=cdrom_device, cdrom_type=self.params["cdrom"]["type"], iso_path=iso_path):
# Updating an existing CD-ROM
if self.params["cdrom"]["type"] in ["client", "none"]:
cdrom_device.backing = vim.vm.device.VirtualCdrom.RemotePassthroughBackingInfo()
elif self.params["cdrom"]["type"] == "iso":
cdrom_device.backing = vim.vm.device.VirtualCdrom.IsoBackingInfo(fileName=iso_path)
cdrom_device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
cdrom_device.connectable.allowGuestControl = True
cdrom_device.connectable.startConnected = (self.params["cdrom"]["type"] != "none")
if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
cdrom_device.connectable.connected = (self.params["cdrom"]["type"] != "none")
cdrom_spec = vim.vm.device.VirtualDeviceSpec()
cdrom_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
cdrom_spec.device = cdrom_device
if cdrom_spec:
self.change_detected = True
self.configspec.deviceChange.append(cdrom_spec)
def configure_hardware_params(self, vm_obj):
"""
Function to configure hardware related configuration of virtual machine
Args:
vm_obj: virtual machine object
"""
if 'hardware' in self.params:
if 'max_connections' in self.params['hardware']:
# maxMksConnections == max_connections
self.configspec.maxMksConnections = int(self.params['hardware']['max_connections'])
if vm_obj is None or self.configspec.maxMksConnections != vm_obj.config.hardware.maxMksConnections:
self.change_detected = True
if 'nested_virt' in self.params['hardware']:
self.configspec.nestedHVEnabled = bool(self.params['hardware']['nested_virt'])
if vm_obj is None or self.configspec.nestedHVEnabled != bool(vm_obj.config.nestedHVEnabled):
self.change_detected = True
if 'version' in self.params['hardware']:
hw_version_check_failed = False
temp_version = self.params['hardware'].get('version', 10)
try:
temp_version = int(temp_version)
except ValueError:
hw_version_check_failed = True
if temp_version not in range(3, 15):
hw_version_check_failed = True
if hw_version_check_failed:
self.module.fail_json(msg="Failed to set hardware.version '%s' value as valid"
" values range from 3 (ESX 2.x) to 14 (ESXi 6.5 and greater)." % temp_version)
# Hardware version is denoted as "vmx-10"
version = "vmx-%02d" % temp_version
self.configspec.version = version
if vm_obj is None or self.configspec.version != vm_obj.config.version:
self.change_detected = True
if vm_obj is not None:
# VM exists and we need to update the hardware version
current_version = vm_obj.config.version
# current_version = "vmx-10"
version_digit = int(current_version.split("-", 1)[-1])
if temp_version < version_digit:
self.module.fail_json(msg="Current hardware version '%d' which is greater than the specified"
" version '%d'. Downgrading hardware version is"
" not supported. Please specify version greater"
" than the current version." % (version_digit,
temp_version))
new_version = "vmx-%02d" % temp_version
try:
task = vm_obj.UpgradeVM_Task(new_version)
self.wait_for_task(task)
if task.info.state != 'error':
self.change_detected = True
except vim.fault.AlreadyUpgraded:
# Don't fail if VM is already upgraded.
pass
def get_vm_cdrom_device(self, vm=None):
if vm is None:
return None
for device in vm.config.hardware.device:
if isinstance(device, vim.vm.device.VirtualCdrom):
return device
return None
def get_vm_ide_device(self, vm=None):
if vm is None:
return None
for device in vm.config.hardware.device:
if isinstance(device, vim.vm.device.VirtualIDEController):
return device
return None
def get_vm_network_interfaces(self, vm=None):
if vm is None:
return []
device_list = []
for device in vm.config.hardware.device:
if isinstance(device, vim.vm.device.VirtualPCNet32) or \
isinstance(device, vim.vm.device.VirtualVmxnet2) or \
isinstance(device, vim.vm.device.VirtualVmxnet3) or \
isinstance(device, vim.vm.device.VirtualE1000) or \
isinstance(device, vim.vm.device.VirtualE1000e) or \
isinstance(device, vim.vm.device.VirtualSriovEthernetCard):
device_list.append(device)
return device_list
def sanitize_network_params(self):
"""
Function to sanitize user provided network provided params
Returns: A sanitized list of network params, else fails
"""
network_devices = list()
# Clean up user data here
for network in self.params['networks']:
if 'name' not in network and 'vlan' not in network:
self.module.fail_json(msg="Please specify at least a network name or"
" a VLAN name under VM network list.")
if 'name' in network and find_obj(self.content, [vim.Network], network['name']) is None:
self.module.fail_json(msg="Network '%(name)s' does not exist." % network)
elif 'vlan' in network:
dvps = self.cache.get_all_objs(self.content, [vim.dvs.DistributedVirtualPortgroup])
for dvp in dvps:
if hasattr(dvp.config.defaultPortConfig, 'vlan') and \
dvp.config.defaultPortConfig.vlan.vlanId == int(network['vlan']):
network['name'] = dvp.config.name
break
if dvp.config.name == str(network['vlan']):
network['name'] = dvp.config.name
break
else:
self.module.fail_json(msg="VLAN '%(vlan)s' does not exist." % network)
if 'type' in network:
if network['type'] not in ['dhcp', 'static']:
self.module.fail_json(msg="Network type '%(type)s' is not a valid parameter."
" Valid parameters are ['dhcp', 'static']." % network)
if network['type'] != 'static' and ('ip' in network or 'netmask' in network):
self.module.fail_json(msg='Static IP information provided for network "%(name)s",'
' but "type" is set to "%(type)s".' % network)
else:
# Type is optional parameter, if user provided IP or Subnet assume
# network type as 'static'
if 'ip' in network or 'netmask' in network:
network['type'] = 'static'
else:
# User wants network type as 'dhcp'
network['type'] = 'dhcp'
if network.get('type') == 'static':
if 'ip' in network and 'netmask' not in network:
self.module.fail_json(msg="'netmask' is required if 'ip' is"
" specified under VM network list.")
if 'ip' not in network and 'netmask' in network:
self.module.fail_json(msg="'ip' is required if 'netmask' is"
" specified under VM network list.")
validate_device_types = ['pcnet32', 'vmxnet2', 'vmxnet3', 'e1000', 'e1000e', 'sriov']
if 'device_type' in network and network['device_type'] not in validate_device_types:
self.module.fail_json(msg="Device type specified '%s' is not valid."
" Please specify correct device"
" type from ['%s']." % (network['device_type'],
"', '".join(validate_device_types)))
if 'mac' in network and not PyVmomiDeviceHelper.is_valid_mac_addr(network['mac']):
self.module.fail_json(msg="Device MAC address '%s' is invalid."
" Please provide correct MAC address." % network['mac'])
network_devices.append(network)
return network_devices
def configure_network(self, vm_obj):
# Ignore empty networks, this permits to keep networks when deploying a template/cloning a VM
if len(self.params['networks']) == 0:
return
network_devices = self.sanitize_network_params()
# List current device for Clone or Idempotency
current_net_devices = self.get_vm_network_interfaces(vm=vm_obj)
if len(network_devices) < len(current_net_devices):
self.module.fail_json(msg="Given network device list is lesser than current VM device list (%d < %d). "
"Removing interfaces is not allowed"
% (len(network_devices), len(current_net_devices)))
for key in range(0, len(network_devices)):
nic_change_detected = False
network_name = network_devices[key]['name']
if key < len(current_net_devices) and (vm_obj or self.params['template']):
# We are editing existing network devices, this is either when
# are cloning from VM or Template
nic = vim.vm.device.VirtualDeviceSpec()
nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
nic.device = current_net_devices[key]
if ('wake_on_lan' in network_devices[key] and
nic.device.wakeOnLanEnabled != network_devices[key].get('wake_on_lan')):
nic.device.wakeOnLanEnabled = network_devices[key].get('wake_on_lan')
nic_change_detected = True
if ('start_connected' in network_devices[key] and
nic.device.connectable.startConnected != network_devices[key].get('start_connected')):
nic.device.connectable.startConnected = network_devices[key].get('start_connected')
nic_change_detected = True
if ('allow_guest_control' in network_devices[key] and
nic.device.connectable.allowGuestControl != network_devices[key].get('allow_guest_control')):
nic.device.connectable.allowGuestControl = network_devices[key].get('allow_guest_control')
nic_change_detected = True
if nic.device.deviceInfo.summary != network_name:
nic.device.deviceInfo.summary = network_name
nic_change_detected = True
if 'device_type' in network_devices[key]:
device = self.device_helper.get_device(network_devices[key]['device_type'], network_name)
device_class = type(device)
if not isinstance(nic.device, device_class):
self.module.fail_json(msg="Changing the device type is not possible when interface is already present. "
"The failing device type is %s" % network_devices[key]['device_type'])
# Changing mac address has no effect when editing interface
if 'mac' in network_devices[key] and nic.device.macAddress != current_net_devices[key].macAddress:
self.module.fail_json(msg="Changing MAC address has not effect when interface is already present. "
"The failing new MAC address is %s" % nic.device.macAddress)
else:
# Default device type is vmxnet3, VMWare best practice
device_type = network_devices[key].get('device_type', 'vmxnet3')
nic = self.device_helper.create_nic(device_type,
'Network Adapter %s' % (key + 1),
network_devices[key])
nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
nic_change_detected = True
if hasattr(self.cache.get_network(network_name), 'portKeys'):
# VDS switch
pg_obj = find_obj(self.content, [vim.dvs.DistributedVirtualPortgroup], network_name)
if (nic.device.backing and
(not hasattr(nic.device.backing, 'port') or
(nic.device.backing.port.portgroupKey != pg_obj.key or
nic.device.backing.port.switchUuid != pg_obj.config.distributedVirtualSwitch.uuid))):
nic_change_detected = True
dvs_port_connection = vim.dvs.PortConnection()
dvs_port_connection.portgroupKey = pg_obj.key
# If user specifies distributed port group without associating to the hostsystem on which
# virtual machine is going to be deployed then we get error. We can infer that there is no
# association between given distributed port group and host system.
host_system = self.params.get('esxi_hostname')
if host_system and host_system not in [host.config.host.name for host in pg_obj.config.distributedVirtualSwitch.config.host]:
self.module.fail_json(msg="It seems that host system '%s' is not associated with distributed"
" virtual portgroup '%s'. Please make sure host system is associated"
" with given distributed virtual portgroup" % (host_system, pg_obj.name))
# TODO: (akasurde) There is no way to find association between resource pool and distributed virtual portgroup
# For now, check if we are able to find distributed virtual switch
if not pg_obj.config.distributedVirtualSwitch:
self.module.fail_json(msg="Failed to find distributed virtual switch which is associated with"
" distributed virtual portgroup '%s'. Make sure hostsystem is associated with"
" the given distributed virtual portgroup." % pg_obj.name)
dvs_port_connection.switchUuid = pg_obj.config.distributedVirtualSwitch.uuid
nic.device.backing = vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo()
nic.device.backing.port = dvs_port_connection
elif isinstance(self.cache.get_network(network_name), vim.OpaqueNetwork):
# NSX-T Logical Switch
nic.device.backing = vim.vm.device.VirtualEthernetCard.OpaqueNetworkBackingInfo()
network_id = self.cache.get_network(network_name).summary.opaqueNetworkId
nic.device.backing.opaqueNetworkType = 'nsx.LogicalSwitch'
nic.device.backing.opaqueNetworkId = network_id
nic.device.deviceInfo.summary = 'nsx.LogicalSwitch: %s' % network_id
else:
# vSwitch
if not isinstance(nic.device.backing, vim.vm.device.VirtualEthernetCard.NetworkBackingInfo):
nic.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()
nic_change_detected = True
net_obj = self.cache.get_network(network_name)
if nic.device.backing.network != net_obj:
nic.device.backing.network = net_obj
nic_change_detected = True
if nic.device.backing.deviceName != network_name:
nic.device.backing.deviceName = network_name
nic_change_detected = True
if nic_change_detected:
self.configspec.deviceChange.append(nic)
self.change_detected = True
def configure_vapp_properties(self, vm_obj):
if len(self.params['vapp_properties']) == 0:
return
for x in self.params['vapp_properties']:
if not x.get('id'):
self.module.fail_json(msg="id is required to set vApp property")
new_vmconfig_spec = vim.vApp.VmConfigSpec()
# This is primarily for vcsim/integration tests, unset vAppConfig was not seen on my deployments
orig_spec = vm_obj.config.vAppConfig if vm_obj.config.vAppConfig else new_vmconfig_spec
vapp_properties_current = dict((x.id, x) for x in orig_spec.property)
vapp_properties_to_change = dict((x['id'], x) for x in self.params['vapp_properties'])
# each property must have a unique key
# init key counter with max value + 1
all_keys = [x.key for x in orig_spec.property]
new_property_index = max(all_keys) + 1 if all_keys else 0
for property_id, property_spec in vapp_properties_to_change.items():
is_property_changed = False
new_vapp_property_spec = vim.vApp.PropertySpec()
if property_id in vapp_properties_current:
if property_spec.get('operation') == 'remove':
new_vapp_property_spec.operation = 'remove'
new_vapp_property_spec.removeKey = vapp_properties_current[property_id].key
is_property_changed = True
else:
# this is 'edit' branch
new_vapp_property_spec.operation = 'edit'
new_vapp_property_spec.info = vapp_properties_current[property_id]
try:
for property_name, property_value in property_spec.items():
if property_name == 'operation':
# operation is not an info object property
# if set to anything other than 'remove' we don't fail
continue
# Updating attributes only if needed
if getattr(new_vapp_property_spec.info, property_name) != property_value:
setattr(new_vapp_property_spec.info, property_name, property_value)
is_property_changed = True
except Exception as e:
self.module.fail_json(msg="Failed to set vApp property field='%s' and value='%s'. Error: %s"
% (property_name, property_value, to_text(e)))
else:
if property_spec.get('operation') == 'remove':
# attemp to delete non-existent property
continue
# this is add new property branch
new_vapp_property_spec.operation = 'add'
property_info = vim.vApp.PropertyInfo()
property_info.classId = property_spec.get('classId')
property_info.instanceId = property_spec.get('instanceId')
property_info.id = property_spec.get('id')
property_info.category = property_spec.get('category')
property_info.label = property_spec.get('label')
property_info.type = property_spec.get('type', 'string')
property_info.userConfigurable = property_spec.get('userConfigurable', True)
property_info.defaultValue = property_spec.get('defaultValue')
property_info.value = property_spec.get('value', '')
property_info.description = property_spec.get('description')
new_vapp_property_spec.info = property_info
new_vapp_property_spec.info.key = new_property_index
new_property_index += 1
is_property_changed = True
if is_property_changed:
new_vmconfig_spec.property.append(new_vapp_property_spec)
if new_vmconfig_spec.property:
self.configspec.vAppConfig = new_vmconfig_spec
self.change_detected = True
def customize_customvalues(self, vm_obj, config_spec):
if len(self.params['customvalues']) == 0:
return
vm_custom_spec = config_spec
vm_custom_spec.extraConfig = []
changed = False
facts = self.gather_facts(vm_obj)
for kv in self.params['customvalues']:
if 'key' not in kv or 'value' not in kv:
self.module.exit_json(msg="customvalues items required both 'key' and 'value fields.")
# If kv is not kv fetched from facts, change it
if kv['key'] not in facts['customvalues'] or facts['customvalues'][kv['key']] != kv['value']:
option = vim.option.OptionValue()
option.key = kv['key']
option.value = kv['value']
vm_custom_spec.extraConfig.append(option)
changed = True
if changed:
self.change_detected = True
def customize_vm(self, vm_obj):
# User specified customization specification
custom_spec_name = self.params.get('customization_spec')
if custom_spec_name:
cc_mgr = self.content.customizationSpecManager
if cc_mgr.DoesCustomizationSpecExist(name=custom_spec_name):
temp_spec = cc_mgr.GetCustomizationSpec(name=custom_spec_name)
self.customspec = temp_spec.spec
return
else:
self.module.fail_json(msg="Unable to find customization specification"
" '%s' in given configuration." % custom_spec_name)
# Network settings
adaptermaps = []
for network in self.params['networks']:
guest_map = vim.vm.customization.AdapterMapping()
guest_map.adapter = vim.vm.customization.IPSettings()
if 'ip' in network and 'netmask' in network:
guest_map.adapter.ip = vim.vm.customization.FixedIp()
guest_map.adapter.ip.ipAddress = str(network['ip'])
guest_map.adapter.subnetMask = str(network['netmask'])
elif 'type' in network and network['type'] == 'dhcp':
guest_map.adapter.ip = vim.vm.customization.DhcpIpGenerator()
if 'gateway' in network:
guest_map.adapter.gateway = network['gateway']
# On Windows, DNS domain and DNS servers can be set by network interface
# https://pubs.vmware.com/vi3/sdk/ReferenceGuide/vim.vm.customization.IPSettings.html
if 'domain' in network:
guest_map.adapter.dnsDomain = network['domain']
elif 'domain' in self.params['customization']:
guest_map.adapter.dnsDomain = self.params['customization']['domain']
if 'dns_servers' in network:
guest_map.adapter.dnsServerList = network['dns_servers']
elif 'dns_servers' in self.params['customization']:
guest_map.adapter.dnsServerList = self.params['customization']['dns_servers']
adaptermaps.append(guest_map)
# Global DNS settings
globalip = vim.vm.customization.GlobalIPSettings()
if 'dns_servers' in self.params['customization']:
globalip.dnsServerList = self.params['customization']['dns_servers']
# TODO: Maybe list the different domains from the interfaces here by default ?
if 'dns_suffix' in self.params['customization']:
globalip.dnsSuffixList = self.params['customization']['dns_suffix']
elif 'domain' in self.params['customization']:
globalip.dnsSuffixList = self.params['customization']['domain']
if self.params['guest_id']:
guest_id = self.params['guest_id']
else:
guest_id = vm_obj.summary.config.guestId
# For windows guest OS, use SysPrep
# https://pubs.vmware.com/vi3/sdk/ReferenceGuide/vim.vm.customization.Sysprep.html#field_detail
if 'win' in guest_id:
ident = vim.vm.customization.Sysprep()
ident.userData = vim.vm.customization.UserData()
# Setting hostName, orgName and fullName is mandatory, so we set some default when missing
ident.userData.computerName = vim.vm.customization.FixedName()
ident.userData.computerName.name = str(self.params['customization'].get('hostname', self.params['name'].split('.')[0]))
ident.userData.fullName = str(self.params['customization'].get('fullname', 'Administrator'))
ident.userData.orgName = str(self.params['customization'].get('orgname', 'ACME'))
if 'productid' in self.params['customization']:
ident.userData.productId = str(self.params['customization']['productid'])
ident.guiUnattended = vim.vm.customization.GuiUnattended()
if 'autologon' in self.params['customization']:
ident.guiUnattended.autoLogon = self.params['customization']['autologon']
ident.guiUnattended.autoLogonCount = self.params['customization'].get('autologoncount', 1)
if 'timezone' in self.params['customization']:
ident.guiUnattended.timeZone = self.params['customization']['timezone']
ident.identification = vim.vm.customization.Identification()
if self.params['customization'].get('password', '') != '':
ident.guiUnattended.password = vim.vm.customization.Password()
ident.guiUnattended.password.value = str(self.params['customization']['password'])
ident.guiUnattended.password.plainText = True
if 'joindomain' in self.params['customization']:
if 'domainadmin' not in self.params['customization'] or 'domainadminpassword' not in self.params['customization']:
self.module.fail_json(msg="'domainadmin' and 'domainadminpassword' entries are mandatory in 'customization' section to use "
"joindomain feature")
ident.identification.domainAdmin = str(self.params['customization']['domainadmin'])
ident.identification.joinDomain = str(self.params['customization']['joindomain'])
ident.identification.domainAdminPassword = vim.vm.customization.Password()
ident.identification.domainAdminPassword.value = str(self.params['customization']['domainadminpassword'])
ident.identification.domainAdminPassword.plainText = True
elif 'joinworkgroup' in self.params['customization']:
ident.identification.joinWorkgroup = str(self.params['customization']['joinworkgroup'])
if 'runonce' in self.params['customization']:
ident.guiRunOnce = vim.vm.customization.GuiRunOnce()
ident.guiRunOnce.commandList = self.params['customization']['runonce']
else:
# FIXME: We have no clue whether this non-Windows OS is actually Linux, hence it might fail!
# For Linux guest OS, use LinuxPrep
# https://pubs.vmware.com/vi3/sdk/ReferenceGuide/vim.vm.customization.LinuxPrep.html
ident = vim.vm.customization.LinuxPrep()
# TODO: Maybe add domain from interface if missing ?
if 'domain' in self.params['customization']:
ident.domain = str(self.params['customization']['domain'])
ident.hostName = vim.vm.customization.FixedName()
hostname = str(self.params['customization'].get('hostname', self.params['name'].split('.')[0]))
# Remove all characters except alphanumeric and minus which is allowed by RFC 952
valid_hostname = re.sub(r"[^a-zA-Z0-9\-]", "", hostname)
ident.hostName.name = valid_hostname
self.customspec = vim.vm.customization.Specification()
self.customspec.nicSettingMap = adaptermaps
self.customspec.globalIPSettings = globalip
self.customspec.identity = ident
def get_vm_scsi_controller(self, vm_obj):
# If vm_obj doesn't exist there is no SCSI controller to find
if vm_obj is None:
return None
for device in vm_obj.config.hardware.device:
if self.device_helper.is_scsi_controller(device):
scsi_ctl = vim.vm.device.VirtualDeviceSpec()
scsi_ctl.device = device
return scsi_ctl
return None
def get_configured_disk_size(self, expected_disk_spec):
# what size is it?
if [x for x in expected_disk_spec.keys() if x.startswith('size_') or x == 'size']:
# size, size_tb, size_gb, size_mb, size_kb
if 'size' in expected_disk_spec:
size_regex = re.compile(r'(\d+(?:\.\d+)?)([tgmkTGMK][bB])')
disk_size_m = size_regex.match(expected_disk_spec['size'])
try:
if disk_size_m:
expected = disk_size_m.group(1)
unit = disk_size_m.group(2)
else:
raise ValueError
if re.match(r'\d+\.\d+', expected):
# We found float value in string, let's typecast it
expected = float(expected)
else:
# We found int value in string, let's typecast it
expected = int(expected)
if not expected or not unit:
raise ValueError
except (TypeError, ValueError, NameError):
# Common failure
self.module.fail_json(msg="Failed to parse disk size please review value"
" provided using documentation.")
else:
param = [x for x in expected_disk_spec.keys() if x.startswith('size_')][0]
unit = param.split('_')[-1].lower()
expected = [x[1] for x in expected_disk_spec.items() if x[0].startswith('size_')][0]
expected = int(expected)
disk_units = dict(tb=3, gb=2, mb=1, kb=0)
if unit in disk_units:
unit = unit.lower()
return expected * (1024 ** disk_units[unit])
else:
self.module.fail_json(msg="%s is not a supported unit for disk size."
" Supported units are ['%s']." % (unit,
"', '".join(disk_units.keys())))
# No size found but disk, fail
self.module.fail_json(
msg="No size, size_kb, size_mb, size_gb or size_tb attribute found into disk configuration")
def configure_disks(self, vm_obj):
# Ignore empty disk list, this permits to keep disks when deploying a template/cloning a VM
if len(self.params['disk']) == 0:
return
scsi_ctl = self.get_vm_scsi_controller(vm_obj)
# Create scsi controller only if we are deploying a new VM, not a template or reconfiguring
if vm_obj is None or scsi_ctl is None:
scsi_ctl = self.device_helper.create_scsi_controller(self.get_scsi_type())
self.change_detected = True
self.configspec.deviceChange.append(scsi_ctl)
disks = [x for x in vm_obj.config.hardware.device if isinstance(x, vim.vm.device.VirtualDisk)] \
if vm_obj is not None else None
if disks is not None and self.params.get('disk') and len(self.params.get('disk')) < len(disks):
self.module.fail_json(msg="Provided disks configuration has less disks than "
"the target object (%d vs %d)" % (len(self.params.get('disk')), len(disks)))
disk_index = 0
for expected_disk_spec in self.params.get('disk'):
disk_modified = False
# If we are manipulating and existing objects which has disks and disk_index is in disks
if vm_obj is not None and disks is not None and disk_index < len(disks):
diskspec = vim.vm.device.VirtualDeviceSpec()
# set the operation to edit so that it knows to keep other settings
diskspec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
diskspec.device = disks[disk_index]
else:
diskspec = self.device_helper.create_scsi_disk(scsi_ctl, disk_index)
disk_modified = True
# is it thin?
if 'type' in expected_disk_spec:
disk_type = expected_disk_spec.get('type', '').lower()
if disk_type == 'thin':
diskspec.device.backing.thinProvisioned = True
elif disk_type == 'eagerzeroedthick':
diskspec.device.backing.eagerlyScrub = True
# which datastore?
if expected_disk_spec.get('datastore'):
# TODO: This is already handled by the relocation spec,
# but it needs to eventually be handled for all the
# other disks defined
pass
# increment index for next disk search
disk_index += 1
# index 7 is reserved to SCSI controller
if disk_index == 7:
disk_index += 1
kb = self.get_configured_disk_size(expected_disk_spec)
# VMWare doesn't allow to reduce disk sizes
if kb < diskspec.device.capacityInKB:
self.module.fail_json(
msg="Given disk size is smaller than found (%d < %d). Reducing disks is not allowed." %
(kb, diskspec.device.capacityInKB))
if kb != diskspec.device.capacityInKB or disk_modified:
diskspec.device.capacityInKB = kb
self.configspec.deviceChange.append(diskspec)
self.change_detected = True
def select_host(self):
hostsystem = self.cache.get_esx_host(self.params['esxi_hostname'])
if not hostsystem:
self.module.fail_json(msg='Failed to find ESX host "%(esxi_hostname)s"' % self.params)
if hostsystem.runtime.connectionState != 'connected' or hostsystem.runtime.inMaintenanceMode:
self.module.fail_json(msg='ESXi "%(esxi_hostname)s" is in invalid state or in maintenance mode.' % self.params)
return hostsystem
def autoselect_datastore(self):
datastore = None
datastores = self.cache.get_all_objs(self.content, [vim.Datastore])
if datastores is None or len(datastores) == 0:
self.module.fail_json(msg="Unable to find a datastore list when autoselecting")
datastore_freespace = 0
for ds in datastores:
if ds.summary.freeSpace > datastore_freespace:
datastore = ds
datastore_freespace = ds.summary.freeSpace
return datastore
def get_recommended_datastore(self, datastore_cluster_obj=None):
"""
Function to return Storage DRS recommended datastore from datastore cluster
Args:
datastore_cluster_obj: datastore cluster managed object
Returns: Name of recommended datastore from the given datastore cluster
"""
if datastore_cluster_obj is None:
return None
# Check if Datastore Cluster provided by user is SDRS ready
sdrs_status = datastore_cluster_obj.podStorageDrsEntry.storageDrsConfig.podConfig.enabled
if sdrs_status:
# We can get storage recommendation only if SDRS is enabled on given datastorage cluster
pod_sel_spec = vim.storageDrs.PodSelectionSpec()
pod_sel_spec.storagePod = datastore_cluster_obj
storage_spec = vim.storageDrs.StoragePlacementSpec()
storage_spec.podSelectionSpec = pod_sel_spec
storage_spec.type = 'create'
try:
rec = self.content.storageResourceManager.RecommendDatastores(storageSpec=storage_spec)
rec_action = rec.recommendations[0].action[0]
return rec_action.destination.name
except Exception as e:
# There is some error so we fall back to general workflow
pass
datastore = None
datastore_freespace = 0
for ds in datastore_cluster_obj.childEntity:
if isinstance(ds, vim.Datastore) and ds.summary.freeSpace > datastore_freespace:
# If datastore field is provided, filter destination datastores
datastore = ds
datastore_freespace = ds.summary.freeSpace
if datastore:
return datastore.name
return None
def select_datastore(self, vm_obj=None):
datastore = None
datastore_name = None
if len(self.params['disk']) != 0:
# TODO: really use the datastore for newly created disks
if 'autoselect_datastore' in self.params['disk'][0] and self.params['disk'][0]['autoselect_datastore']:
datastores = self.cache.get_all_objs(self.content, [vim.Datastore])
datastores = [x for x in datastores if self.cache.get_parent_datacenter(x).name == self.params['datacenter']]
if datastores is None or len(datastores) == 0:
self.module.fail_json(msg="Unable to find a datastore list when autoselecting")
datastore_freespace = 0
for ds in datastores:
if (ds.summary.freeSpace > datastore_freespace) or (ds.summary.freeSpace == datastore_freespace and not datastore):
# If datastore field is provided, filter destination datastores
if 'datastore' in self.params['disk'][0] and \
isinstance(self.params['disk'][0]['datastore'], str) and \
ds.name.find(self.params['disk'][0]['datastore']) < 0:
continue
datastore = ds
datastore_name = datastore.name
datastore_freespace = ds.summary.freeSpace
elif 'datastore' in self.params['disk'][0]:
datastore_name = self.params['disk'][0]['datastore']
# Check if user has provided datastore cluster first
datastore_cluster = self.cache.find_obj(self.content, [vim.StoragePod], datastore_name)
if datastore_cluster:
# If user specified datastore cluster so get recommended datastore
datastore_name = self.get_recommended_datastore(datastore_cluster_obj=datastore_cluster)
# Check if get_recommended_datastore or user specified datastore exists or not
datastore = self.cache.find_obj(self.content, [vim.Datastore], datastore_name)
else:
self.module.fail_json(msg="Either datastore or autoselect_datastore should be provided to select datastore")
if not datastore and self.params['template']:
# use the template's existing DS
disks = [x for x in vm_obj.config.hardware.device if isinstance(x, vim.vm.device.VirtualDisk)]
if disks:
datastore = disks[0].backing.datastore
datastore_name = datastore.name
# validation
if datastore:
dc = self.cache.get_parent_datacenter(datastore)
if dc.name != self.params['datacenter']:
datastore = self.autoselect_datastore()
datastore_name = datastore.name
if not datastore:
if len(self.params['disk']) != 0 or self.params['template'] is None:
self.module.fail_json(msg="Unable to find the datastore with given parameters."
" This could mean, %s is a non-existent virtual machine and module tried to"
" deploy it as new virtual machine with no disk. Please specify disks parameter"
" or specify template to clone from." % self.params['name'])
self.module.fail_json(msg="Failed to find a matching datastore")
return datastore, datastore_name
def obj_has_parent(self, obj, parent):
if obj is None and parent is None:
raise AssertionError()
current_parent = obj
while True:
if current_parent.name == parent.name:
return True
# Check if we have reached till root folder
moid = current_parent._moId
if moid in ['group-d1', 'ha-folder-root']:
return False
current_parent = current_parent.parent
if current_parent is None:
return False
def select_resource_pool_by_name(self, resource_pool_name):
resource_pool = self.cache.find_obj(self.content, [vim.ResourcePool], resource_pool_name)
if resource_pool is None:
self.module.fail_json(msg='Could not find resource_pool "%s"' % resource_pool_name)
return resource_pool
def select_resource_pool_by_host(self, host):
resource_pools = self.cache.get_all_objs(self.content, [vim.ResourcePool])
for rp in resource_pools.items():
if not rp[0]:
continue
if not hasattr(rp[0], 'parent') or not rp[0].parent:
continue
# Find resource pool on host
if self.obj_has_parent(rp[0].parent, host.parent):
# If no resource_pool selected or it's the selected pool, return it
if self.module.params['resource_pool'] is None or rp[0].name == self.module.params['resource_pool']:
return rp[0]
if self.module.params['resource_pool'] is not None:
self.module.fail_json(msg="Could not find resource_pool %s for selected host %s"
% (self.module.params['resource_pool'], host.name))
else:
self.module.fail_json(msg="Failed to find a resource group for %s" % host.name)
def get_scsi_type(self):
disk_controller_type = "paravirtual"
# set cpu/memory/etc
if 'hardware' in self.params:
if 'scsi' in self.params['hardware']:
if self.params['hardware']['scsi'] in ['buslogic', 'paravirtual', 'lsilogic', 'lsilogicsas']:
disk_controller_type = self.params['hardware']['scsi']
else:
self.module.fail_json(msg="hardware.scsi attribute should be 'paravirtual' or 'lsilogic'")
return disk_controller_type
def find_folder(self, searchpath):
""" Walk inventory objects one position of the searchpath at a time """
# split the searchpath so we can iterate through it
paths = [x.replace('/', '') for x in searchpath.split('/')]
paths_total = len(paths) - 1
position = 0
# recursive walk while looking for next element in searchpath
root = self.content.rootFolder
while root and position <= paths_total:
change = False
if hasattr(root, 'childEntity'):
for child in root.childEntity:
if child.name == paths[position]:
root = child
position += 1
change = True
break
elif isinstance(root, vim.Datacenter):
if hasattr(root, 'vmFolder'):
if root.vmFolder.name == paths[position]:
root = root.vmFolder
position += 1
change = True
else:
root = None
if not change:
root = None
return root
def get_resource_pool(self):
resource_pool = None
# highest priority, resource_pool given.
if self.params['resource_pool']:
resource_pool = self.select_resource_pool_by_name(self.params['resource_pool'])
# next priority, esxi hostname given.
elif self.params['esxi_hostname']:
host = self.select_host()
resource_pool = self.select_resource_pool_by_host(host)
# next priority, cluster given, take the root of the pool
elif self.params['cluster']:
cluster = self.cache.get_cluster(self.params['cluster'])
if cluster is None:
self.module.fail_json(msg="Unable to find cluster '%(cluster)s'" % self.params)
resource_pool = cluster.resourcePool
# fallback, pick any RP
else:
resource_pool = self.select_resource_pool_by_name(self.params['resource_pool'])
if resource_pool is None:
self.module.fail_json(msg='Unable to find resource pool, need esxi_hostname, resource_pool, or cluster')
return resource_pool
def deploy_vm(self):
# https://github.com/vmware/pyvmomi-community-samples/blob/master/samples/clone_vm.py
# https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.vm.CloneSpec.html
# https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.vm.ConfigSpec.html
# https://www.vmware.com/support/developer/vc-sdk/visdk41pubs/ApiReference/vim.vm.RelocateSpec.html
# FIXME:
# - static IPs
self.folder = self.params.get('folder', None)
if self.folder is None:
self.module.fail_json(msg="Folder is required parameter while deploying new virtual machine")
# Prepend / if it was missing from the folder path, also strip trailing slashes
if not self.folder.startswith('/'):
self.folder = '/%(folder)s' % self.params
self.folder = self.folder.rstrip('/')
datacenter = self.cache.find_obj(self.content, [vim.Datacenter], self.params['datacenter'])
if datacenter is None:
self.module.fail_json(msg='No datacenter named %(datacenter)s was found' % self.params)
dcpath = compile_folder_path_for_object(datacenter)
# Nested folder does not have trailing /
if not dcpath.endswith('/'):
dcpath += '/'
# Check for full path first in case it was already supplied
if (self.folder.startswith(dcpath + self.params['datacenter'] + '/vm') or
self.folder.startswith(dcpath + '/' + self.params['datacenter'] + '/vm')):
fullpath = self.folder
elif self.folder.startswith('/vm/') or self.folder == '/vm':
fullpath = "%s%s%s" % (dcpath, self.params['datacenter'], self.folder)
elif self.folder.startswith('/'):
fullpath = "%s%s/vm%s" % (dcpath, self.params['datacenter'], self.folder)
else:
fullpath = "%s%s/vm/%s" % (dcpath, self.params['datacenter'], self.folder)
f_obj = self.content.searchIndex.FindByInventoryPath(fullpath)
# abort if no strategy was successful
if f_obj is None:
# Add some debugging values in failure.
details = {
'datacenter': datacenter.name,
'datacenter_path': dcpath,
'folder': self.folder,
'full_search_path': fullpath,
}
self.module.fail_json(msg='No folder %s matched in the search path : %s' % (self.folder, fullpath),
details=details)
destfolder = f_obj
if self.params['template']:
vm_obj = self.get_vm_or_template(template_name=self.params['template'])
if vm_obj is None:
self.module.fail_json(msg="Could not find a template named %(template)s" % self.params)
else:
vm_obj = None
# always get a resource_pool
resource_pool = self.get_resource_pool()
# set the destination datastore for VM & disks
(datastore, datastore_name) = self.select_datastore(vm_obj)
self.configspec = vim.vm.ConfigSpec()
self.configspec.deviceChange = []
self.configure_guestid(vm_obj=vm_obj, vm_creation=True)
self.configure_cpu_and_memory(vm_obj=vm_obj, vm_creation=True)
self.configure_hardware_params(vm_obj=vm_obj)
self.configure_resource_alloc_info(vm_obj=vm_obj)
self.configure_disks(vm_obj=vm_obj)
self.configure_network(vm_obj=vm_obj)
self.configure_cdrom(vm_obj=vm_obj)
# Find if we need network customizations (find keys in dictionary that requires customizations)
network_changes = False
for nw in self.params['networks']:
for key in nw:
# We don't need customizations for these keys
if key not in ('device_type', 'mac', 'name', 'vlan'):
network_changes = True
break
if len(self.params['customization']) > 0 or network_changes is True:
self.customize_vm(vm_obj=vm_obj)
clonespec = None
clone_method = None
try:
if self.params['template']:
# create the relocation spec
relospec = vim.vm.RelocateSpec()
# Only select specific host when ESXi hostname is provided
if self.params['esxi_hostname']:
relospec.host = self.select_host()
relospec.datastore = datastore
# https://www.vmware.com/support/developer/vc-sdk/visdk41pubs/ApiReference/vim.vm.RelocateSpec.html
# > pool: For a clone operation from a template to a virtual machine, this argument is required.
relospec.pool = resource_pool
if self.params['snapshot_src'] is not None and self.params['linked_clone']:
relospec.diskMoveType = vim.vm.RelocateSpec.DiskMoveOptions.createNewChildDiskBacking
clonespec = vim.vm.CloneSpec(template=self.params['is_template'], location=relospec)
if self.customspec:
clonespec.customization = self.customspec
if self.params['snapshot_src'] is not None:
snapshot = self.get_snapshots_by_name_recursively(snapshots=vm_obj.snapshot.rootSnapshotList, snapname=self.params['snapshot_src'])
if len(snapshot) != 1:
self.module.fail_json(msg='virtual machine "%(template)s" does not contain snapshot named "%(snapshot_src)s"' % self.params)
clonespec.snapshot = snapshot[0].snapshot
clonespec.config = self.configspec
clone_method = 'Clone'
try:
task = vm_obj.Clone(folder=destfolder, name=self.params['name'], spec=clonespec)
except vim.fault.NoPermission as e:
self.module.fail_json(msg="Failed to clone virtual machine %s to folder %s "
"due to permission issue: %s" % (self.params['name'],
destfolder,
to_native(e.msg)))
self.change_detected = True
else:
# ConfigSpec require name for VM creation
self.configspec.name = self.params['name']
self.configspec.files = vim.vm.FileInfo(logDirectory=None,
snapshotDirectory=None,
suspendDirectory=None,
vmPathName="[" + datastore_name + "]")
clone_method = 'CreateVM_Task'
try:
task = destfolder.CreateVM_Task(config=self.configspec, pool=resource_pool)
except vmodl.fault.InvalidRequest as e:
self.module.fail_json(msg="Failed to create virtual machine due to invalid configuration "
"parameter %s" % to_native(e.msg))
except vim.fault.RestrictedVersion as e:
self.module.fail_json(msg="Failed to create virtual machine due to "
"product versioning restrictions: %s" % to_native(e.msg))
self.change_detected = True
self.wait_for_task(task)
except TypeError as e:
self.module.fail_json(msg="TypeError was returned, please ensure to give correct inputs. %s" % to_text(e))
if task.info.state == 'error':
# https://kb.vmware.com/selfservice/microsites/search.do?language=en_US&cmd=displayKC&externalId=2021361
# https://kb.vmware.com/selfservice/microsites/search.do?language=en_US&cmd=displayKC&externalId=2173
# provide these to the user for debugging
clonespec_json = serialize_spec(clonespec)
configspec_json = serialize_spec(self.configspec)
kwargs = {
'changed': self.change_detected,
'failed': True,
'msg': task.info.error.msg,
'clonespec': clonespec_json,
'configspec': configspec_json,
'clone_method': clone_method
}
return kwargs
else:
# set annotation
vm = task.info.result
if self.params['annotation']:
annotation_spec = vim.vm.ConfigSpec()
annotation_spec.annotation = str(self.params['annotation'])
task = vm.ReconfigVM_Task(annotation_spec)
self.wait_for_task(task)
if self.params['customvalues']:
vm_custom_spec = vim.vm.ConfigSpec()
self.customize_customvalues(vm_obj=vm, config_spec=vm_custom_spec)
task = vm.ReconfigVM_Task(vm_custom_spec)
self.wait_for_task(task)
if self.params['wait_for_ip_address'] or self.params['state'] in ['poweredon', 'restarted']:
set_vm_power_state(self.content, vm, 'poweredon', force=False)
if self.params['wait_for_ip_address']:
self.wait_for_vm_ip(vm)
vm_facts = self.gather_facts(vm)
return {'changed': self.change_detected, 'failed': False, 'instance': vm_facts}
def get_snapshots_by_name_recursively(self, snapshots, snapname):
snap_obj = []
for snapshot in snapshots:
if snapshot.name == snapname:
snap_obj.append(snapshot)
else:
snap_obj = snap_obj + self.get_snapshots_by_name_recursively(snapshot.childSnapshotList, snapname)
return snap_obj
def reconfigure_vm(self):
self.configspec = vim.vm.ConfigSpec()
self.configspec.deviceChange = []
self.configure_guestid(vm_obj=self.current_vm_obj)
self.configure_cpu_and_memory(vm_obj=self.current_vm_obj)
self.configure_hardware_params(vm_obj=self.current_vm_obj)
self.configure_disks(vm_obj=self.current_vm_obj)
self.configure_network(vm_obj=self.current_vm_obj)
self.configure_cdrom(vm_obj=self.current_vm_obj)
self.customize_customvalues(vm_obj=self.current_vm_obj, config_spec=self.configspec)
self.configure_resource_alloc_info(vm_obj=self.current_vm_obj)
self.configure_vapp_properties(vm_obj=self.current_vm_obj)
if self.params['annotation'] and self.current_vm_obj.config.annotation != self.params['annotation']:
self.configspec.annotation = str(self.params['annotation'])
self.change_detected = True
change_applied = False
relospec = vim.vm.RelocateSpec()
if self.params['resource_pool']:
relospec.pool = self.select_resource_pool_by_name(self.params['resource_pool'])
if relospec.pool is None:
self.module.fail_json(msg='Unable to find resource pool "%(resource_pool)s"' % self.params)
elif relospec.pool != self.current_vm_obj.resourcePool:
task = self.current_vm_obj.RelocateVM_Task(spec=relospec)
self.wait_for_task(task)
change_applied = True
# Only send VMWare task if we see a modification
if self.change_detected:
task = None
try:
task = self.current_vm_obj.ReconfigVM_Task(spec=self.configspec)
except vim.fault.RestrictedVersion as e:
self.module.fail_json(msg="Failed to reconfigure virtual machine due to"
" product versioning restrictions: %s" % to_native(e.msg))
self.wait_for_task(task)
change_applied = True
if task.info.state == 'error':
# https://kb.vmware.com/selfservice/microsites/search.do?language=en_US&cmd=displayKC&externalId=2021361
# https://kb.vmware.com/selfservice/microsites/search.do?language=en_US&cmd=displayKC&externalId=2173
return {'changed': change_applied, 'failed': True, 'msg': task.info.error.msg}
# Rename VM
if self.params['uuid'] and self.params['name'] and self.params['name'] != self.current_vm_obj.config.name:
task = self.current_vm_obj.Rename_Task(self.params['name'])
self.wait_for_task(task)
change_applied = True
if task.info.state == 'error':
return {'changed': change_applied, 'failed': True, 'msg': task.info.error.msg}
# Mark VM as Template
if self.params['is_template'] and not self.current_vm_obj.config.template:
try:
self.current_vm_obj.MarkAsTemplate()
except vmodl.fault.NotSupported as e:
self.module.fail_json(msg="Failed to mark virtual machine [%s] "
"as template: %s" % (self.params['name'], e.msg))
change_applied = True
# Mark Template as VM
elif not self.params['is_template'] and self.current_vm_obj.config.template:
if self.params['resource_pool']:
resource_pool = self.select_resource_pool_by_name(self.params['resource_pool'])
if resource_pool is None:
self.module.fail_json(msg='Unable to find resource pool "%(resource_pool)s"' % self.params)
self.current_vm_obj.MarkAsVirtualMachine(pool=resource_pool)
# Automatically update VMWare UUID when converting template to VM.
# This avoids an interactive prompt during VM startup.
uuid_action = [x for x in self.current_vm_obj.config.extraConfig if x.key == "uuid.action"]
if not uuid_action:
uuid_action_opt = vim.option.OptionValue()
uuid_action_opt.key = "uuid.action"
uuid_action_opt.value = "create"
self.configspec.extraConfig.append(uuid_action_opt)
self.change_detected = True
change_applied = True
else:
self.module.fail_json(msg="Resource pool must be specified when converting template to VM!")
vm_facts = self.gather_facts(self.current_vm_obj)
return {'changed': change_applied, 'failed': False, 'instance': vm_facts}
@staticmethod
def wait_for_task(task):
# https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.Task.html
# https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.TaskInfo.html
# https://github.com/virtdevninja/pyvmomi-community-samples/blob/master/samples/tools/tasks.py
while task.info.state not in ['error', 'success']:
time.sleep(1)
def wait_for_vm_ip(self, vm, poll=100, sleep=5):
ips = None
facts = {}
thispoll = 0
while not ips and thispoll <= poll:
newvm = self.get_vm()
facts = self.gather_facts(newvm)
if facts['ipv4'] or facts['ipv6']:
ips = True
else:
time.sleep(sleep)
thispoll += 1
return facts
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(
state=dict(type='str', default='present',
choices=['absent', 'poweredoff', 'poweredon', 'present', 'rebootguest', 'restarted', 'shutdownguest', 'suspended']),
template=dict(type='str', aliases=['template_src']),
is_template=dict(type='bool', default=False),
annotation=dict(type='str', aliases=['notes']),
customvalues=dict(type='list', default=[]),
name=dict(type='str'),
name_match=dict(type='str', choices=['first', 'last'], default='first'),
uuid=dict(type='str'),
folder=dict(type='str'),
guest_id=dict(type='str'),
disk=dict(type='list', default=[]),
cdrom=dict(type='dict', default={}),
hardware=dict(type='dict', default={}),
force=dict(type='bool', default=False),
datacenter=dict(type='str', default='ha-datacenter'),
esxi_hostname=dict(type='str'),
cluster=dict(type='str'),
wait_for_ip_address=dict(type='bool', default=False),
state_change_timeout=dict(type='int', default=0),
snapshot_src=dict(type='str'),
linked_clone=dict(type='bool', default=False),
networks=dict(type='list', default=[]),
resource_pool=dict(type='str'),
customization=dict(type='dict', default={}, no_log=True),
customization_spec=dict(type='str', default=None),
vapp_properties=dict(type='list', default=[]),
)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[
['cluster', 'esxi_hostname'],
],
required_one_of=[
['name', 'uuid'],
],
)
result = {'failed': False, 'changed': False}
pyv = PyVmomiHelper(module)
# Check if the VM exists before continuing
vm = pyv.get_vm()
# VM already exists
if vm:
if module.params['state'] == 'absent':
# destroy it
if module.check_mode:
result.update(
vm_name=vm.name,
changed=True,
current_powerstate=vm.summary.runtime.powerState.lower(),
desired_operation='remove_vm',
)
module.exit_json(**result)
if module.params['force']:
# has to be poweredoff first
set_vm_power_state(pyv.content, vm, 'poweredoff', module.params['force'])
result = pyv.remove_vm(vm)
elif module.params['state'] == 'present':
if module.check_mode:
result.update(
vm_name=vm.name,
changed=True,
desired_operation='reconfigure_vm',
)
module.exit_json(**result)
result = pyv.reconfigure_vm()
elif module.params['state'] in ['poweredon', 'poweredoff', 'restarted', 'suspended', 'shutdownguest', 'rebootguest']:
if module.check_mode:
result.update(
vm_name=vm.name,
changed=True,
current_powerstate=vm.summary.runtime.powerState.lower(),
desired_operation='set_vm_power_state',
)
module.exit_json(**result)
# set powerstate
tmp_result = set_vm_power_state(pyv.content, vm, module.params['state'], module.params['force'], module.params['state_change_timeout'])
if tmp_result['changed']:
result["changed"] = True
if not tmp_result["failed"]:
result["failed"] = False
result['instance'] = tmp_result['instance']
else:
# This should not happen
raise AssertionError()
# VM doesn't exist
else:
if module.params['state'] in ['poweredon', 'poweredoff', 'present', 'restarted', 'suspended']:
if module.check_mode:
result.update(
changed=True,
desired_operation='deploy_vm',
)
module.exit_json(**result)
result = pyv.deploy_vm()
if result['failed']:
module.fail_json(msg='Failed to create a virtual machine : %s' % result['msg'])
if result['failed']:
module.fail_json(**result)
else:
module.exit_json(**result)
if __name__ == '__main__':
main()
|
mheap/ansible
|
lib/ansible/modules/cloud/vmware/vmware_guest.py
|
Python
|
gpl-3.0
| 111,203
|
[
"VisIt"
] |
8533bcbc52b5180dfd89e4c7d843be2bd75ad2d37d79fc5d8931462ba772f809
|
#! /usr/bin/env python
########################################################################
# $HeadURL: $
########################################################################
__RCSID__ = "$Id: $"
from DIRAC.Core.Base import Script
Script.setUsageMessage( """
Monitor the status of the given FTS request
Usage:
%s <lfn|fileOfLFN> sourceSE targetSE server GUID
""" % Script.scriptName )
Script.parseCommandLine()
from DIRAC.DataManagementSystem.Client.FTSRequest import FTSRequest
import DIRAC
import os
args = Script.getPositionalArgs()
if not len( args ) == 5:
Script.showHelp()
else:
inputFileName = args[0]
sourceSE = args[1]
targetSE = args[2]
server = args[3]
guid = args[4]
if not os.path.exists( inputFileName ):
lfns = [inputFileName]
else:
inputFile = open( inputFileName, 'r' )
string = inputFile.read()
inputFile.close()
lfns = string.splitlines()
oFTSRequest = FTSRequest()
for lfn in lfns:
oFTSRequest.setLFN( lfn )
oFTSRequest.setFTSGUID( guid )
oFTSRequest.setSourceSE( sourceSE )
oFTSRequest.setTargetSE( targetSE )
oFTSRequest.setFTSServer( server )
result = oFTSRequest.monitor( untilTerminal = True, printOutput = True )
if not result['OK']:
DIRAC.gLogger.error( 'Failed to issue FTS Request', result['Message'] )
DIRAC.exit( -1 )
|
avedaee/DIRAC
|
DataManagementSystem/scripts/dirac-dms-fts-monitor.py
|
Python
|
gpl-3.0
| 1,304
|
[
"DIRAC"
] |
67bc468d5d7732a0b61f8714e6b549796c3a15c003c883b08b0112cac643de1f
|
"""
Script entry point
"""
from src.calrissian.particle333_network import Particle333Network
from src.calrissian.layers.particle333 import Particle333
from src.calrissian.optimizers.particle333_sgd import Particle333SGD
from multiprocessing import Pool
import numpy as np
import time
import pandas as pd
import pickle
def main():
train_X = np.asarray([[0.45, 3.33], [0.0, 2.22], [0.45, -0.54]])
train_Y = np.asarray([[1.0], [0.0], [0.0]])
net = Particle333Network(cost="mse")
net.append(Particle333(2, 5, activation="sigmoid", nr=4, nc=6))
net.append(Particle333(5, 1, activation="sigmoid", nr=4, nc=6))
print(net.predict(train_X))
print(net.cost(train_X, train_Y))
print(net.cost_gradient(train_X, train_Y))
def main2():
train_X = np.random.normal(0.0, 0.1, (3, 4*4))
train_Y = np.random.normal(0.0, 0.1, (3, 1))
nr = 3
nc = 3
net = Particle333Network(cost="mse")
net.append(Particle333(activation="sigmoid", nr=nr, nc=nc,
apply_convolution=True,
input_shape=(4, 4, 1),
output_shape=(2, 2, 3),
input_delta=(0.5, 0.5, 0.5),
output_delta=(0.5, 0.5, 0.5)))
net.append(Particle333(activation="sigmoid", nr=nr, nc=nc,
apply_convolution=True,
input_shape=(2, 2, 3),
output_shape=(2, 2, 1),
input_delta=(0.5, 0.5, 0.5),
output_delta=(0.5, 0.5, 0.5)))
net.append(Particle333(4, 1, activation="sigmoid", nr=nr, nc=nc))
print(net.predict(train_X))
print(net.cost(train_X, train_Y))
def main3():
train_X = np.random.normal(0.0, 0.1, (3, 4*4))
train_Y = np.random.normal(0.0, 0.1, (3, 1))
nr = 3
nc = 3
net = Particle333Network(cost="mse")
net.append(Particle333(activation="sigmoid", nr=nr, nc=nc,
apply_convolution=True,
input_shape=(4, 4, 1),
output_shape=(2, 2, 3),
input_delta=(0.5, 0.5, 0.5),
output_delta=(0.5, 0.5, 0.5),
output_pool_shape=(2, 2, 1),
output_pool_delta=(0.1, 0.1, 0.1)
))
net.append(Particle333(activation="sigmoid", nr=nr, nc=nc,
apply_convolution=True,
input_shape=(2, 2, 3),
output_shape=(2, 2, 1),
input_delta=(0.5, 0.5, 0.5),
output_delta=(0.5, 0.5, 0.5)))
net.append(Particle333(4, 1, activation="sigmoid", nr=nr, nc=nc))
print(net.predict(train_X))
print(net.cost(train_X, train_Y))
def main4():
train_X = np.random.normal(0.0, 0.1, (1, 4*4))
train_Y = np.random.normal(0.0, 0.1, (1, 1))
nr = 3
nc = 1
net = Particle333Network(cost="mse")
net.append(Particle333(activation="sigmoid", nr=nr, nc=nc,
apply_convolution=True,
input_shape=(4, 4, 1),
output_shape=(1, 1, 1),
input_delta=(0.5, 0.5, 0.5),
output_delta=(0.5, 0.5, 0.5)))
net.append(Particle333(1, 1, activation="sigmoid", nr=nr, nc=nc))
print(net.predict(train_X))
print(net.cost(train_X, train_Y))
def fd():
ts = time.time()
train_X = None
train_Y = None
nc = None
nr = None
net = None
if False:
train_X = np.asarray([[0.2, -0.3], [0.1, -0.9], [0.1, 0.05], [0.2, -0.3], [0.1, -0.9], [0.1, 0.05]])
train_Y = np.asarray(
[[0.0, 1.0, 0.0], [0.0, 0.0, 1.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0], [1.0, 0.0, 0.0]])
nc = 4
nr = 3
net = Particle333Network(cost="categorical_cross_entropy")
net.append(Particle333(2, 5, activation="sigmoid", nr=nr, nc=nc))
net.append(Particle333(5, 6, activation="sigmoid", nr=nr, nc=nc))
net.append(Particle333(6, 3, activation="softmax", nr=nr, nc=nc))
else:
train_X = np.random.normal(0.0, 1.0, (3, 4 * 4))
train_Y = np.random.choice([0.0, 1.0], (3, 1))
nr = 3
nc = 2
# working!
# net = Particle333Network(cost="mse")
# net.append(Particle333(activation="sigmoid", nr=nr, nc=nc,
# apply_convolution=True,
# input_shape=(4, 4, 1),
# output_shape=(3, 3, 2),
# input_delta=(0.5, 0.5, 0.5),
# output_delta=(0.5, 0.5, 0.5),
# output_pool_shape=(2, 3, 1),
# output_pool_delta=(0.1, 0.1, 0.1)
# ))
# net.append(Particle333(3*3*2, 1, activation="sigmoid", nr=nr, nc=nc))
# working too!
net = Particle333Network(cost="mse")
# net = Particle333Network(cost="mse", regularizer="l2", lam=0.01)
net.append(Particle333(activation="tanh", nr=nr, nc=nc,
apply_convolution=True,
rand="normal",
input_shape=(4, 4, 1),
output_shape=(3, 3, 2),
input_delta=(0.5, 0.5, 0.5),
output_delta=(0.5, 0.5, 0.5),
output_pool_shape=(2, 3, 1),
output_pool_delta=(0.1, 0.1, 0.1)
))
net.append(Particle333(activation="tanh", nr=nr, nc=nc,
apply_convolution=True,
rand="normal",
input_shape=(3, 3, 2),
output_shape=(3, 3, 1),
input_delta=(0.5, 0.5, 0.5),
output_delta=(0.2, 0.2, 0.2),
output_pool_shape=(1, 1, 1),
output_pool_delta=(0.1, 0.1, 0.1)
))
net.append(Particle333(3*3*1, 4, activation="tanh", rand="normal", nr=nr, nc=nc, p_dropout=0.5))
net.append(Particle333(4, 1, activation="sigmoid", rand="normal", nr=nr, nc=nc))
db, dq, dz, dr_inp, dr_out = net.cost_gradient(train_X, train_Y)
h = 0.0001
print("analytic b")
print(db)
fd_b = []
for l in range(len(net.layers)):
lb = np.zeros_like(db[l])
for b in range(len(net.layers[l].b[0])):
orig = net.layers[l].b[0][b]
net.layers[l].b[0][b] += h
fp = net.cost(train_X, train_Y)
net.layers[l].b[0][b] -= 2*h
fm = net.cost(train_X, train_Y)
lb[0][b] = (fp - fm) / (2*h)
net.layers[l].b[0][b] = orig
fd_b.append(lb)
print("numerical b")
print(fd_b)
print("analytic q")
for x in dq:
print(x)
fd_q = []
for l in range(len(net.layers)):
lq = np.zeros_like(dq[l])
for i in range(len(net.layers[l].q)):
for c in range(nc):
orig = net.layers[l].q[i][c]
net.layers[l].q[i][c] += h
fp = net.cost(train_X, train_Y)
net.layers[l].q[i][c] -= 2*h
fm = net.cost(train_X, train_Y)
lq[i][c] = (fp - fm) / (2*h)
net.layers[l].q[i][c] = orig
fd_q.append(lq)
print("numerical q")
for x in fd_q:
print(x)
print("analytic zeta")
for x in dz:
print(x)
fd_zeta = []
for l in range(len(net.layers)):
lzeta = np.zeros_like(dz[l])
for i in range(len(net.layers[l].zeta)):
for c in range(nc):
orig = net.layers[l].zeta[i][c]
net.layers[l].zeta[i][c] += h
fp = net.cost(train_X, train_Y)
net.layers[l].zeta[i][c] -= 2*h
fm = net.cost(train_X, train_Y)
lzeta[i][c] = (fp - fm) / (2*h)
net.layers[l].zeta[i][c] = orig
fd_zeta.append(lzeta)
print("numerical zeta")
for x in fd_zeta:
print(x)
print("analytic r_inp")
for x in dr_inp:
print(x)
fd_r_inp = []
for l in range(len(dr_inp)):
lr = np.zeros_like(dr_inp[l])
layer = net.layers[l]
for i in range(len(layer.r_inp)):
for r in range(nr):
orig = layer.r_inp[i][r]
layer.r_inp[i][r] += h
fp = net.cost(train_X, train_Y)
layer.r_inp[i][r] -= 2*h
fm = net.cost(train_X, train_Y)
lr[i][r] = (fp - fm) / (2*h)
layer.r_inp[i][r] = orig
fd_r_inp.append(lr)
print("numerical r")
for x in fd_r_inp:
print(x)
print("analytic r_out")
for x in dr_out:
print(x)
fd_r_out = []
for l in range(len(net.layers)):
lr_out = np.zeros_like(dr_out[l])
for i in range(len(net.layers[l].r_out)):
for c in range(nc):
for r in range(nr):
orig = net.layers[l].r_out[i][c][r]
net.layers[l].r_out[i][c][r] += h
fp = net.cost(train_X, train_Y)
net.layers[l].r_out[i][c][r] -= 2*h
fm = net.cost(train_X, train_Y)
lr_out[i][c][r] = (fp - fm) / (2*h)
net.layers[l].r_out[i][c][r] = orig
fd_r_out.append(lr_out)
print("numerical r_out")
for x in fd_r_out:
print(x)
diff_b = np.sum([np.sum(np.abs(fd_b[i] - db[i])) for i in range(len(db))])
print("diff b: {}".format(diff_b))
diff_q = np.sum([np.sum(np.abs(fd_q[i] - dq[i])) for i in range(len(dq))])
print("diff q: {}".format(diff_q))
diff_zeta = np.sum([np.sum(np.abs(fd_zeta[i] - dz[i])) for i in range(len(dz))])
print("diff zeta: {}".format(diff_zeta))
diff_r_inp = np.sum([np.sum(np.abs(fd_r_inp[i] - dr_inp[i])) for i in range(len(dr_inp))])
print("diff r_inp: {}".format(diff_r_inp))
diff_r = np.sum([np.sum(np.abs(fd_r_out[i] - dr_out[i])) for i in range(len(dr_out))])
print("diff r_out: {}".format(diff_r))
print("time: {}".format(time.time() - ts))
def sgd(pool=None):
ts = time.time()
ndata = 500
train_X = np.random.normal(0.0, 1.0, (ndata, 12 * 12))
train_Y = np.random.choice([0.0, 1.0], (ndata, 2))
nr = 3
nc = 2
net = Particle333Network(cost="categorical_cross_entropy")
net.append(Particle333(activation="sigmoid", nr=nr, nc=nc,
apply_convolution=True,
input_shape=(12, 12, 1),
output_shape=(6, 6, 4),
input_delta=(0.5, 0.5, 0.5),
output_delta=(0.5, 0.5, 0.5),
output_pool_shape=(2, 3, 1),
output_pool_delta=(0.1, 0.1, 0.1)
))
net.append(Particle333(activation="sigmoid", nr=nr, nc=nc,
apply_convolution=True,
input_shape=(6, 6, 4),
output_shape=(3, 3, 2),
input_delta=(0.5, 0.5, 0.5),
output_delta=(0.2, 0.2, 0.2),
output_pool_shape=(1, 1, 1),
output_pool_delta=(0.1, 0.1, 0.1)
))
net.append(Particle333(3 * 3 * 2, 10, activation="sigmoid", nr=nr, nc=nc))
net.append(Particle333(10, 2, activation="softmax", nr=nr, nc=nc))
mbs = 20
nt = 2
cs = mbs // nt
sgd = Particle333SGD(n_epochs=1, mini_batch_size=mbs, verbosity=1, weight_update="sd",
alpha=0.01, beta=0.8, gamma=0.95,
cost_freq=1, fixed_input=True,
n_threads=nt, chunk_size=cs
)
sgd.optimize(net, train_X, train_Y, pool=pool)
print("time: {}".format(time.time() - ts))
def mnist(pool=None):
raw_data_train = pd.read_csv("/Users/adrianlange/programming/MNIST/data/mnist_train.csv", header=None)
raw_data_test = pd.read_csv("/Users/adrianlange/programming/MNIST/data/mnist_test.csv", header=None)
# In[7]:
# Prepare data
denom = 255.0
# denom = 1.0
X = np.asarray(raw_data_train.iloc[:,1:] / denom) # scaled values in range [0:1]
X_test = np.asarray(raw_data_test.iloc[:,1:] / denom)
# Z-score centering
m = X.mean()
s = X.std()
X = (X - m) / s
X_test = (X_test - m) / s
# length ten categorical vector
Y = []
for val in raw_data_train.iloc[:,0]:
y = np.zeros(10)
y[val] = 1.0
Y.append(y)
Y = np.asarray(Y)
Y_test = []
for val in raw_data_test.iloc[:,0]:
y = np.zeros(10)
y[val] = 1.0
Y_test.append(y)
Y_test = np.asarray(Y_test)
# Data subset
# n_sub = len(X)
n_sub = 50
X_sub = X[:n_sub, :]
Y_sub = Y[:n_sub, :]
X_other = X[n_sub:, :]
Y_other = Y[n_sub:, :]
s = 0.1
q = 0.1
b = 0.1
z = 0.1
p = "gaussian"
rand = "normal"
nr = 3
nc = 3
net = Particle333Network(cost="categorical_cross_entropy")
net.append(Particle333(activation="tanh", nr=nr, nc=nc,
s=0.1, q=0.1, b=b,
z=z, zoff=4.0,
potential=p, rand=rand,
apply_convolution=True,
input_shape=(28, 28, 1),
output_shape=(10, 10, 10),
input_delta=(0.1, 0.1, 0.1),
output_delta=(0.2, 0.2, 0.2),
output_pool_shape=(2, 2, 1),
output_pool_delta=(0.1, 0.1, 0.1)
))
net.append(Particle333(activation="tanh", nr=nr, nc=nc,
s=0.1, q=0.1, b=b,
z=z, zoff=4.0,
potential=p, rand=rand,
apply_convolution=True,
input_shape=(10, 10, 10),
output_shape=(7, 7, 10),
input_delta=(0.1, 0.1, 0.1),
output_delta=(0.2, 0.2, 0.2),
output_pool_shape=(2, 2, 1),
output_pool_delta=(0.1, 0.1, 0.1)
))
net.append(Particle333(7*7*10, 100, activation="tanh", potential=p, rand=rand, s=1.0, q=0.1, b=b, z=z, zoff=1.0, nr=nr, nc=nc))
net.append(Particle333(100, 10, activation="softmax", potential=p, rand=rand, s=1.0, q=0.1, b=b, z=z, zoff=1.0, nr=nr, nc=nc))
# input position to be fixed at the origin
net.layers[0].r_inp = np.zeros_like(net.layers[0].r_inp)
n = 0
mbs = 10
nt = 1
cs = mbs // nt
sgd = Particle333SGD(n_epochs=1, mini_batch_size=mbs, verbosity=1, weight_update="rmsprop",
alpha=0.01, beta=0.8, gamma=0.95,
cost_freq=1, fixed_input=True,
n_threads=nt, chunk_size=cs
)
print("Starting opt...")
ts = time.time()
sgd.optimize(net, X_sub, Y_sub, pool=pool)
print(time.time() - ts)
if __name__ == "__main__":
# Ensure same seed
np.random.seed(100)
# main()
# main2()
# main3()
# main4()
fd()
# sgd(pool=Pool(processes=2))
# mnist(pool=Pool(processes=2))
# mnist(pool=None)
|
awlange/brainsparks
|
tests/particle333_test.py
|
Python
|
mit
| 15,864
|
[
"Gaussian"
] |
090061faa44601eb12f125b64b70f1981f32b0af485907c254a7aebb49ceb5ef
|
"""
Hodgekin-Huxley Network
"""
import numpy as np
from SimulationMethods import eul, rk4
from NeuronModels import HodgekinHuxley
class HhNetwork:
"""
"""
def __init__(self, _neuronsPerLayer, _Dmax):
"""
Initialise network with given number of neurons
Inputs:
_neuronsPerLayer -- List with the number of neurons in each layer. A list
[N1, N2, ... Nk] will return a network with k layers
with the corresponding number of neurons in each.
_Dmax -- Maximum delay in all the synapses in the network. Any
longer delay will result in failing to deliver spikes.
"""
self.Dmax = _Dmax
self.Nlayers = len(_neuronsPerLayer)
self.layer = {}
for i, n in enumerate(_neuronsPerLayer):
self.layer[i] = HhLayer(n)
def Update(self, t):
"""
Run simulation of the whole network for 1 millisecond and update the
network's internal variables.
Inputs:
t -- Current timestep. Necessary to sort out the synaptic delays.
"""
for lr in xrange(self.Nlayers):
self.NeuronUpdate(lr, t)
def NeuronUpdate(self, i, t):
"""
Hodgekin-Huxley neuron update function. Update one layer for 1 millisecond
using the Euler method.
Inputs:
i -- Number of layer to update
t -- Current timestep. Necessary to sort out the synaptic delays.
"""
# Euler method step size in ms
dt = 0.02
# Calculate current from incoming spikes
for j in xrange(self.Nlayers):
# LAYER I IS CONNECTED TO LAYER J IF THERE EXISTS A CONNECTIVITY MATIX S[J] I.E LAYER J --> LAYER I
# If layer[i].S[j] exists then layer[i].factor[j] and
# layer[i].delay[j] have to exist
if j in self.layer[i].S:
S = self.layer[i].S[j] # target neuron->rows, source neuron->columns
# Firings contains time and neuron idx of each spike.
# [t, index of the neuron in the layer j]
firings = self.layer[j].firings
# Find incoming spikes taking delays into account
delay = self.layer[i].delay[j]
F = self.layer[i].factor[j]
# Sum current from incoming spikes
k = len(firings)
while k > 0 and (firings[k - 1, 0] > (t - self.Dmax)):
idx = delay[:, firings[k - 1, 1]] == (t - firings[
k - 1, 0]) # which reveiving neurons have been waiting long enough as all elements of delay are the same, all idx will be true or false
self.layer[i].I[idx] += F * S[
idx, firings[k - 1, 1]] # firings[k-1,1] is the index of neuron that has fired,
# idx the reveiving neurons which have been waiting long enough
k = k - 1
# Update v and u using the HH model and RK4 method
for k in xrange(int(1 / dt)):
x = np.array([self.layer[i].v,
self.layer[i].m,
self.layer[i].n,
self.layer[i].h])
fP = np.array([self.layer[i].I,
self.layer[i].gNa,
self.layer[i].gK,
self.layer[i].gL,
self.layer[i].ENa,
self.layer[i].EK,
self.layer[i].EL,
self.layer[i].C])
self.layer[i].v, self.layer[i].m, self.layer[i].n, self.layer[i].h = rk4(x, dt, fP, HodgekinHuxley)
# print(i, k, "v:", self.layer[i].v)
# print(i, k, "m:", self.layer[i].m)
# print(i, k, "n:", self.layer[i].n)
# print(i, k, "h:", self.layer[i].h)
#
# print('\n')
# Find index of neurons that have fired this millisecond
fired = np.where(self.layer[i].v >= 50)[0] # gives the index of the fired neurons i.e the column of the array
if len(fired) > 0:
for f in fired:
# Add spikes into spike train
if len(self.layer[i].firings) != 0:
self.layer[i].firings = np.vstack([self.layer[i].firings, [t, f]])
else:
self.layer[i].firings = np.array([[t, f]])
return
class HhLayer:
"""
Layer of Hodgkin-Huxley neurons to be used inside an IzNetwork.
"""
def __init__(self, n):
"""
Initialise layer with empty vectors.
Inputs:
n -- Number of neurons in the layer
"""
self.N = n
self.gNa = np.zeros(n)
self.gK = np.zeros(n)
self.gL = np.zeros(n)
self.ENa = np.zeros(n)
self.EK = np.zeros(n)
self.EL = np.zeros(n)
self.C = np.zeros(n)
self.S = {}
self.delay = {}
self.factor = {}
|
cpsnowden/ComputationalNeurodynamics
|
HH_Braitenberg/HhNetwork.py
|
Python
|
gpl-3.0
| 5,100
|
[
"NEURON"
] |
69cdd599100367ba5ec83cdaf441ff02e6017cdc82015ab4f50d8dde421489c3
|
import numpy as np
from .. import Variable
from ..core import indexing
from ..core.utils import Frozen, FrozenDict
from .common import AbstractDataStore, BackendArray
from .file_manager import CachingFileManager
from .locks import HDF5_LOCK, NETCDFC_LOCK, SerializableLock, combine_locks, ensure_lock
# PyNIO can invoke netCDF libraries internally
# Add a dedicated lock just in case NCL as well isn't thread-safe.
NCL_LOCK = SerializableLock()
PYNIO_LOCK = combine_locks([HDF5_LOCK, NETCDFC_LOCK, NCL_LOCK])
class NioArrayWrapper(BackendArray):
def __init__(self, variable_name, datastore):
self.datastore = datastore
self.variable_name = variable_name
array = self.get_array()
self.shape = array.shape
self.dtype = np.dtype(array.typecode())
def get_array(self, needs_lock=True):
ds = self.datastore._manager.acquire(needs_lock)
return ds.variables[self.variable_name]
def __getitem__(self, key):
return indexing.explicit_indexing_adapter(
key, self.shape, indexing.IndexingSupport.BASIC, self._getitem
)
def _getitem(self, key):
with self.datastore.lock:
array = self.get_array(needs_lock=False)
if key == () and self.ndim == 0:
return array.get_value()
return array[key]
class NioDataStore(AbstractDataStore):
"""Store for accessing datasets via PyNIO
"""
def __init__(self, filename, mode="r", lock=None, **kwargs):
import Nio
if lock is None:
lock = PYNIO_LOCK
self.lock = ensure_lock(lock)
self._manager = CachingFileManager(
Nio.open_file, filename, lock=lock, mode=mode, kwargs=kwargs
)
# xarray provides its own support for FillValue,
# so turn off PyNIO's support for the same.
self.ds.set_option("MaskedArrayMode", "MaskedNever")
@property
def ds(self):
return self._manager.acquire()
def open_store_variable(self, name, var):
data = indexing.LazilyOuterIndexedArray(NioArrayWrapper(name, self))
return Variable(var.dimensions, data, var.attributes)
def get_variables(self):
return FrozenDict(
(k, self.open_store_variable(k, v)) for k, v in self.ds.variables.items()
)
def get_attrs(self):
return Frozen(self.ds.attributes)
def get_dimensions(self):
return Frozen(self.ds.dimensions)
def get_encoding(self):
return {
"unlimited_dims": {k for k in self.ds.dimensions if self.ds.unlimited(k)}
}
def close(self):
self._manager.close()
|
jhamman/xarray
|
xarray/backends/pynio_.py
|
Python
|
apache-2.0
| 2,661
|
[
"NetCDF"
] |
bd418d97f6f608080404251763782131106b697011678e508feae8f6c4ecb349
|
#!/usr/bin/env python
"""
.. See the NOTICE file distributed with this work for additional information
regarding copyright ownership.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import argparse
from basic_modules.workflow import Workflow
from utils import logger
from utils import remap
from tool.bwa_aligner import bwaAlignerTool
from tool.inps import inps
# ------------------------------------------------------------------------------
class process_mnaseseq(Workflow):
"""
Functions for downloading and processing Mnase-seq FastQ files. Files are
downloaded from the European Nucleotide Archive (ENA), then aligned,
filtered and analysed for peak calling
"""
def __init__(self, configuration=None):
"""
Initialise the class
Parameters
----------
configuration : dict
a dictionary containing parameters that define how the operation
should be carried out, which are specific to each Tool.
"""
logger.info("Processing MNase-Seq")
if configuration is None:
configuration = {}
self.configuration.update(configuration)
def run(self, input_files, metadata, output_files):
"""
Main run function for processing MNase-Seq FastQ data. Pipeline aligns
the FASTQ files to the genome using BWA. iNPS is then used for peak
calling to identify nucleosome position sites within the genome.
Parameters
----------
files_ids : list
List of file locations
metadata : list
Required meta data
Returns
-------
outputfiles : list
List of locations for the output bam, bed and tsv files
"""
output_metadata = {}
if "genome_public" in input_files:
align_input_files = remap(
input_files, genome="genome_public", loc="loc", index="index_public")
align_input_file_meta = remap(
metadata, genome="genome_public", loc="loc", index="index_public")
else:
align_input_files = remap(input_files, "genome", "loc", "index")
align_input_file_meta = remap(metadata, "genome", "loc", "index")
bwa = bwaAlignerTool()
logger.progress("BWA ALN Aligner", status="RUNNING")
bwa_files, bwa_meta = bwa.run(
align_input_files, align_input_file_meta,
{"output": output_files["bam"], "bai": output_files["bai"]}
)
logger.progress("BWA ALN Aligner", status="DONE")
output_files_generated = {}
try:
output_files_generated["bam"] = bwa_files["bam"]
output_metadata["bam"] = bwa_meta["bam"]
tool_name = output_metadata['bam'].meta_data['tool']
output_metadata['bam'].meta_data['tool_description'] = tool_name
output_metadata['bam'].meta_data['tool'] = "process_mnaseseq"
output_files_generated["bai"] = bwa_files["bai"]
output_metadata["bai"] = bwa_meta["bai"]
tool_name = output_metadata['bai'].meta_data['tool']
output_metadata['bai'].meta_data['tool_description'] = tool_name
output_metadata['bai'].meta_data['tool'] = "process_mnaseseq"
except KeyError:
logger.fatal("BWA Alignment failed")
inps_tool = inps()
logger.progress("iNPS Peak Caller", status="RUNNING")
inps_files, inps_meta = inps_tool.run(
remap(bwa_files, "bam"),
remap(bwa_meta, "bam"),
{"bed": output_files["bed"]}
)
logger.progress("iNPS Peak Caller", status="DONE")
try:
output_files_generated["bed"] = inps_files["bed"]
output_metadata["bed"] = inps_meta["bed"]
tool_name = output_metadata['bed'].meta_data['tool']
output_metadata['bed'].meta_data['tool_description'] = tool_name
output_metadata['bed'].meta_data['tool'] = "process_mnaseseq"
except KeyError:
logger.fatal("BWA Alignment failed")
print("MNASESEQ RESULTS:", output_metadata)
return output_files, output_metadata
# ------------------------------------------------------------------------------
def main_json(config, in_metadata, out_metadata):
"""
Alternative main function
-------------
This function launches the app using configuration written in
two json files: config.json and input_metadata.json.
"""
# 1. Instantiate and launch the App
print("1. Instantiate and launch the App")
from apps.jsonapp import JSONApp
app = JSONApp()
result = app.launch(process_mnaseseq,
config,
in_metadata,
out_metadata)
# 2. The App has finished
print("2. Execution finished; see " + out_metadata)
print(result)
return result
# ------------------------------------------------------------------------------
if __name__ == "__main__":
# Set up the command line parameters
PARSER = argparse.ArgumentParser(description="MNase-seq peak calling")
PARSER.add_argument(
"--config", help="Configuration file")
PARSER.add_argument(
"--in_metadata", help="Location of input metadata file")
PARSER.add_argument(
"--out_metadata", help="Location of output metadata file")
PARSER.add_argument(
"--local", action="store_const", const=True, default=False)
# Get the matching parameters from the command line
ARGS = PARSER.parse_args()
CONFIG = ARGS.config
IN_METADATA = ARGS.in_metadata
OUT_METADATA = ARGS.out_metadata
LOCAL = ARGS.local
if LOCAL:
import sys
sys._run_from_cmdl = True # pylint: disable=protected-access
RESULTS = main_json(CONFIG, IN_METADATA, OUT_METADATA)
print(RESULTS)
|
Multiscale-Genomics/mg-process-fastq
|
process_mnaseseq.py
|
Python
|
apache-2.0
| 6,459
|
[
"BWA"
] |
42cb68db2b6f42bebef4142e9155466821e9a59443b1da1c0ee0b3a102d112e0
|
#####################################################################
# $HeadURL $
# File: RequestManagerHandler.py
########################################################################
""" :mod: RequestManagerHandler
===========================
.. module: RequestManagerHandler
:synopsis: Implementation of the RequestDB service in the DISET framework
:deprecated:
"""
__RCSID__ = "$Id$"
## imports
from types import DictType, IntType, ListType, LongType, StringTypes
## from DIARC
from DIRAC import gLogger, gConfig, S_OK, S_ERROR
from DIRAC.Core.DISET.RequestHandler import RequestHandler
from DIRAC.ConfigurationSystem.Client import PathFinder
## global RequestDB instance
gRequestDB = False
def initializeRequestManagerHandler(serviceInfo):
""" initialise handler """
global gRequestDB
csSection = PathFinder.getServiceSection( "RequestManagement/RequestManager" )
backend = gConfig.getValue('%s/Backend' % csSection)
if not backend:
fatStr = "RequestManager.initializeRequestManagerHandler: Failed to get backed for RequestDB from CS."
gLogger.fatal(fatStr)
return S_ERROR(fatStr)
gLogger.info("RequestManager.initializeRequestManagerHandler: Initialising with backend", backend)
if backend == 'file':
from DIRAC.RequestManagementSystem.DB.RequestDBFile import RequestDBFile
gRequestDB = RequestDBFile()
elif backend == 'mysql':
from DIRAC.RequestManagementSystem.DB.RequestDBMySQL import RequestDBMySQL
gRequestDB = RequestDBMySQL()
else:
fatStr = "RequestManager.initializeRequestManagerHandler: Supplied backend is not supported."
gLogger.fatal(fatStr, backend)
return S_ERROR(fatStr)
return S_OK()
class RequestManagerHandler(RequestHandler):
"""
.. class:: RequestManagerHandler
"""
@staticmethod
def __getRequestID( requestName ):
""" get requestID for given :requestName: """
requestID = requestName
if type(requestName) in StringTypes:
result = gRequestDB._getRequestAttribute( "RequestID", requestName=requestName )
if not result["OK"]:
return result
requestID = result["Value"]
return S_OK( requestID )
types_setRequest = [ StringTypes, StringTypes ]
@staticmethod
def export_setRequest( requestName, requestString ):
""" Set a new request """
gLogger.info("RequestManagerHandler.setRequest: Attempting to set %s." % requestName)
try:
return gRequestDB.setRequest( requestName, requestString )
except Exception, error:
errStr = "RequestManagerHandler.setRequest: Exception while setting request."
gLogger.exception( errStr, requestName, lException=error )
return S_ERROR(errStr)
types_setRequestStatus = [ StringTypes, StringTypes ]
@staticmethod
def export_setRequestStatus( requestName, requestStatus ):
""" Set status of a request """
gLogger.info("RequestHandler.setRequestStatus: Setting status of %s to %s." % ( requestName, requestStatus ) )
try:
return gRequestDB.setRequestStatus( requestName, requestStatus )
except Exception, error:
errStr = "RequestHandler.setRequestStatus: Exception while setting request status."
gLogger.exception( errStr, requestName, lException=error )
return S_ERROR(errStr)
types_updateRequest = [ StringTypes, StringTypes ]
@staticmethod
def export_updateRequest( requestName, requestString ):
""" Update the request with the supplied string """
gLogger.info("RequestManagerHandler.updateRequest: Attempting to update %s." % requestName)
try:
return gRequestDB.updateRequest( requestName, requestString )
except Exception, error:
errStr = "RequestManagerHandler.updateRequest: Exception which updating request."
gLogger.exception( errStr, requestName, lException=error )
return S_ERROR(errStr)
types_getDBSummary = []
@staticmethod
def export_getDBSummary():
""" Get the summary of requests in the Request DB """
gLogger.info("RequestManagerHandler.getDBSummary: Attempting to obtain database summary.")
try:
return gRequestDB.getDBSummary()
except Exception, error:
errStr = "RequestManagerHandler.getDBSummary: Exception while getting database summary."
gLogger.exception( errStr, lException=error )
return S_ERROR(errStr)
types_getRequest = [StringTypes]
@staticmethod
def export_getRequest( requestType ):
""" Get a request of given type from the database """
gLogger.info("RequestHandler.getRequest: Attempting to get request type", requestType)
try:
return gRequestDB.getRequest(requestType)
except Exception, error:
errStr = "RequestManagerHandler.getRequest: Exception while getting request."
gLogger.exception( errStr, requestType, lException=error )
return S_ERROR(errStr)
types_serveRequest = []
@staticmethod
def export_serveRequest( requestType ):
""" Serve a request of a given type from the database """
gLogger.info("RequestHandler.serveRequest: Attempting to serve request type", requestType)
try:
return gRequestDB.serveRequest(requestType)
except Exception, error:
errStr = "RequestManagerHandler.serveRequest: Exception while serving request."
gLogger.exception( errStr, requestType, lException=error )
return S_ERROR(errStr)
types_getRequestSummaryWeb = [ DictType, ListType, IntType, IntType ]
@staticmethod
def export_getRequestSummaryWeb( selectDict, sortList, startItem, maxItems):
""" Get summary of the request/subrequest info in the standard form for the web """
return gRequestDB.getRequestSummaryWeb(selectDict, sortList, startItem, maxItems)
types_getDistinctValues = [ StringTypes ]
@staticmethod
def export_getDistinctValues( attribute ):
""" Get distinct values for a given (sub)request attribute """
snames = ['RequestType', 'Operation', 'Status']
rnames = ['OwnerDN', 'OwnerGroup']
if attribute in snames:
return gRequestDB.getDistinctAttributeValues('SubRequests', attribute)
elif attribute in rnames:
return gRequestDB.getDistinctAttributeValues('Requests', attribute)
return S_ERROR('Invalid attribute %s' % attribute)
types_getDigest = [ list(StringTypes) + [ IntType, LongType ] ]
def export_getDigest( self, requestName ):
""" Get the digest of the request identified by its name """
requestID = self.__getRequestID( requestName )
return gRequestDB.getDigest( requestID["Value"] ) if requestID["OK"] else requestID
types_getCurrentExecutionOrder = [ list(StringTypes) + [ IntType, LongType ] ]
def export_getCurrentExecutionOrder( self, requestName ):
""" Get the current execution order of the given request """
requestID = self.__getRequestID( requestName )
return gRequestDB.getCurrentExecutionOrder( requestID["Value"] ) if requestID["OK"] else requestID
types_getRequestFileStatus = [ list(StringTypes) + [ IntType, LongType ], ListType ]
def export_getRequestFileStatus( self, requestName, lfns ):
""" Get the current status of the provided files for the request """
requestID = self.__getRequestID( requestName )
return gRequestDB.getRequestFileStatus( requestID["Value"], lfns ) if requestID["OK"] else requestID
types_getRequestStatus = [ list(StringTypes) + [ IntType, LongType ] ]
def export_getRequestStatus( self, requestName ):
""" Get request status given :requestName: """
requestID = self.__getRequestID( requestName )
return gRequestDB.getRequestStatus( requestID["Value"] ) if requestID["OK"] else requestID
types_getRequestInfo = [ list(StringTypes) + [ IntType, LongType ] ]
def export_getRequestInfo( self, requestName ):
""" get request info for given :requestName: """
requestID = self.__getRequestID( requestName )
return gRequestDB.getRequestInfo( requestID["Value"] ) if requestID["OK"] else requestID
types_deleteRequest = [ StringTypes ]
@staticmethod
def export_deleteRequest( requestName ):
""" Delete the request with the supplied name"""
gLogger.info("RequestManagerHandler.deleteRequest: Attempting to delete %s." % requestName)
try:
return gRequestDB.deleteRequest( requestName )
except Exception, error:
errStr = "RequestManagerHandler.deleteRequest: Exception which deleting request."
gLogger.exception( errStr, requestName, lException=error )
return S_ERROR(errStr)
types_getRequestForJobs = [ ListType ]
@staticmethod
def export_getRequestForJobs( jobIDs ):
""" Select the request names for supplied jobIDs """
gLogger.info("RequestManagerHandler.getRequestForJobs: Attempting to get request names for %s jobs." % len(jobIDs))
try:
return gRequestDB.getRequestForJobs( jobIDs )
except Exception, error:
errStr = "RequestManagerHandler.getRequestForJobs: Exception which getting request names."
gLogger.exception( errStr, '', lException=error )
return S_ERROR(errStr)
types_selectRequests = [ DictType ]
@staticmethod
def export_selectRequests( selectDict, limit=100 ):
""" Select requests according to supplied criteria """
gLogger.verbose("RequestManagerHandler.selectRequests: Attempting to select requests." )
try:
return gRequestDB.selectRequests( selectDict, limit )
except Exception, error:
errStr = "RequestManagerHandler.selectRequests: Exception while selecting requests."
gLogger.exception( errStr, '', lException=error)
return S_ERROR(errStr)
types_readRequestsForJobs = [ ListType ]
@staticmethod
def export_readRequestsForJobs( jobIDs ):
""" read requests for jobs given list of jobIDs """
gLogger.verbose("RequestManagerHandler.readRequestsForJobs: Attepting to read requests associated to the jobs." )
try:
res = gRequestDB.readRequestsForJobs( jobIDs )
return res
except Exception, error:
errStr = "RequestManagerHandler.readRequestsForJobs: Exception while selecting requests."
gLogger.exception( errStr, '', lException=error )
return S_ERROR( errStr )
|
avedaee/DIRAC
|
RequestManagementSystem/Service/RequestManagerHandler.py
|
Python
|
gpl-3.0
| 10,079
|
[
"DIRAC"
] |
7aaa57cb7c1d32eda01291ad4602318114957ef3c5ea78e69c45a2456839fd18
|
# Copyright 1999-2017. Plesk International GmbH. All rights reserved.
#!@@PYTHON@@
#------------------------------------------------------------------------
# Copyright (c) 1998 by Total Control Software
# All Rights Reserved
#------------------------------------------------------------------------
#
# Module Name: fcgi.py
#
# Description: Handles communication with the FastCGI module of the
# web server without using the FastCGI developers kit, but
# will also work in a non-FastCGI environment, (straight CGI.)
# This module was originally fetched from someplace on the
# Net (I don't remember where and I can't find it now...) and
# has been significantly modified to fix several bugs, be more
# readable, more robust at handling large CGI data and return
# document sizes, and also to fit the model that we had previously
# used for FastCGI.
#
# WARNING: If you don't know what you are doing, don't tinker with this
# module!
#
# Creation Date: 1/30/98 2:59:04PM
#
# License: This is free software. You may use this software for any
# purpose including modification/redistribution, so long as
# this header remains intact and that you do not claim any
# rights of ownership or authorship of this software. This
# software has been tested, but no warranty is expressed or
# implied.
#
#------------------------------------------------------------------------
import os, sys, string, socket, errno
from cStringIO import StringIO
import cgi
#---------------------------------------------------------------------------
# Set various FastCGI constants
# Maximum number of requests that can be handled
FCGI_MAX_REQS=1
FCGI_MAX_CONNS = 1
# Supported version of the FastCGI protocol
FCGI_VERSION_1 = 1
# Boolean: can this application multiplex connections?
FCGI_MPXS_CONNS=0
# Record types
FCGI_BEGIN_REQUEST = 1 ; FCGI_ABORT_REQUEST = 2 ; FCGI_END_REQUEST = 3
FCGI_PARAMS = 4 ; FCGI_STDIN = 5 ; FCGI_STDOUT = 6
FCGI_STDERR = 7 ; FCGI_DATA = 8 ; FCGI_GET_VALUES = 9
FCGI_GET_VALUES_RESULT = 10
FCGI_UNKNOWN_TYPE = 11
FCGI_MAXTYPE = FCGI_UNKNOWN_TYPE
# Types of management records
ManagementTypes = [FCGI_GET_VALUES]
FCGI_NULL_REQUEST_ID=0
# Masks for flags component of FCGI_BEGIN_REQUEST
FCGI_KEEP_CONN = 1
# Values for role component of FCGI_BEGIN_REQUEST
FCGI_RESPONDER = 1 ; FCGI_AUTHORIZER = 2 ; FCGI_FILTER = 3
# Values for protocolStatus component of FCGI_END_REQUEST
FCGI_REQUEST_COMPLETE = 0 # Request completed nicely
FCGI_CANT_MPX_CONN = 1 # This app can't multiplex
FCGI_OVERLOADED = 2 # New request rejected; too busy
FCGI_UNKNOWN_ROLE = 3 # Role value not known
error = 'fcgi.error'
#---------------------------------------------------------------------------
# The following function is used during debugging; it isn't called
# anywhere at the moment
def error(msg):
"Append a string to /tmp/err"
errf=open('/tmp/err', 'a+')
errf.write(msg+'\n')
errf.close()
#---------------------------------------------------------------------------
class record:
"Class representing FastCGI records"
def __init__(self):
self.version = FCGI_VERSION_1
self.recType = FCGI_UNKNOWN_TYPE
self.reqId = FCGI_NULL_REQUEST_ID
self.content = ""
#----------------------------------------
def readRecord(self, sock):
s = map(ord, sock.recv(8))
self.version, self.recType, paddingLength = s[0], s[1], s[6]
self.reqId, contentLength = (s[2]<<8)+s[3], (s[4]<<8)+s[5]
self.content = ""
while len(self.content) < contentLength:
data = sock.recv(contentLength - len(self.content))
self.content = self.content + data
if paddingLength != 0:
padding = sock.recv(paddingLength)
# Parse the content information
c = self.content
if self.recType == FCGI_BEGIN_REQUEST:
self.role = (ord(c[0])<<8) + ord(c[1])
self.flags = ord(c[2])
elif self.recType == FCGI_UNKNOWN_TYPE:
self.unknownType = ord(c[0])
elif self.recType == FCGI_GET_VALUES or self.recType == FCGI_PARAMS:
self.values={}
pos=0
while pos < len(c):
name, value, pos = readPair(c, pos)
self.values[name] = value
elif self.recType == FCGI_END_REQUEST:
b = map(ord, c[0:4])
self.appStatus = (b[0]<<24) + (b[1]<<16) + (b[2]<<8) + b[3]
self.protocolStatus = ord(c[4])
#----------------------------------------
def writeRecord(self, sock):
content = self.content
if self.recType == FCGI_BEGIN_REQUEST:
content = chr(self.role>>8) + chr(self.role & 255) + chr(self.flags) + 5*'\000'
elif self.recType == FCGI_UNKNOWN_TYPE:
content = chr(self.unknownType) + 7*'\000'
elif self.recType==FCGI_GET_VALUES or self.recType==FCGI_PARAMS:
content = ""
for i in self.values.keys():
content = content + writePair(i, self.values[i])
elif self.recType==FCGI_END_REQUEST:
v = self.appStatus
content = chr((v>>24)&255) + chr((v>>16)&255) + chr((v>>8)&255) + chr(v&255)
content = content + chr(self.protocolStatus) + 3*'\000'
cLen = len(content)
eLen = (cLen + 7) & (0xFFFF - 7) # align to an 8-byte boundary
padLen = eLen - cLen
hdr = [ self.version,
self.recType,
self.reqId >> 8,
self.reqId & 255,
cLen >> 8,
cLen & 255,
padLen,
0]
hdr = string.joinfields(map(chr, hdr), '')
sock.send(hdr + content + padLen*'\000')
#---------------------------------------------------------------------------
def readPair(s, pos):
nameLen=ord(s[pos]) ; pos=pos+1
if nameLen & 128:
b=map(ord, s[pos:pos+3]) ; pos=pos+3
nameLen=((nameLen&127)<<24) + (b[0]<<16) + (b[1]<<8) + b[2]
valueLen=ord(s[pos]) ; pos=pos+1
if valueLen & 128:
b=map(ord, s[pos:pos+3]) ; pos=pos+3
valueLen=((valueLen&127)<<24) + (b[0]<<16) + (b[1]<<8) + b[2]
return ( s[pos:pos+nameLen], s[pos+nameLen:pos+nameLen+valueLen],
pos+nameLen+valueLen )
#---------------------------------------------------------------------------
def writePair(name, value):
l=len(name)
if l<128: s=chr(l)
else:
s=chr(128|(l>>24)&255) + chr((l>>16)&255) + chr((l>>8)&255) + chr(l&255)
l=len(value)
if l<128: s=s+chr(l)
else:
s=s+chr(128|(l>>24)&255) + chr((l>>16)&255) + chr((l>>8)&255) + chr(l&255)
return s + name + value
#---------------------------------------------------------------------------
def HandleManTypes(r, conn):
if r.recType == FCGI_GET_VALUES:
r.recType = FCGI_GET_VALUES_RESULT
v={}
vars={'FCGI_MAX_CONNS' : FCGI_MAX_CONNS,
'FCGI_MAX_REQS' : FCGI_MAX_REQS,
'FCGI_MPXS_CONNS': FCGI_MPXS_CONNS}
for i in r.values.keys():
if vars.has_key(i): v[i]=vars[i]
r.values=vars
r.writeRecord(conn)
#---------------------------------------------------------------------------
#---------------------------------------------------------------------------
_isFCGI = 1 # assume it is until we find out for sure
def isFCGI():
global _isFCGI
return _isFCGI
#---------------------------------------------------------------------------
_init = None
_sock = None
class FCGI:
def __init__(self):
self.haveFinished = 0
if _init == None:
_startup()
if not isFCGI():
self.haveFinished = 1
self.inp, self.out, self.err, self.env = \
sys.stdin, sys.stdout, sys.stderr, os.environ
return
if os.environ.has_key('FCGI_WEB_SERVER_ADDRS'):
good_addrs=string.split(os.environ['FCGI_WEB_SERVER_ADDRS'], ',')
good_addrs=map(string.strip(good_addrs)) # Remove whitespace
else:
good_addrs=None
self.conn, addr=_sock.accept()
stdin, data="", ""
self.env = {}
self.requestId=0
remaining=1
# Check if the connection is from a legal address
if good_addrs!=None and addr not in good_addrs:
raise error, 'Connection from invalid server!'
while remaining:
r=record(); r.readRecord(self.conn)
if r.recType in ManagementTypes:
HandleManTypes(r, self.conn)
elif r.reqId==0:
# Oh, poopy. It's a management record of an unknown
# type. Signal the error.
r2=record()
r2.recType=FCGI_UNKNOWN_TYPE ; r2.unknownType=r.recType
r2.writeRecord(self.conn)
continue # Charge onwards
# Ignore requests that aren't active
elif r.reqId != self.requestId and r.recType != FCGI_BEGIN_REQUEST:
continue
# If we're already doing a request, ignore further BEGIN_REQUESTs
elif r.recType == FCGI_BEGIN_REQUEST and self.requestId != 0:
continue
# Begin a new request
if r.recType == FCGI_BEGIN_REQUEST:
self.requestId = r.reqId
if r.role == FCGI_AUTHORIZER: remaining=1
elif r.role == FCGI_RESPONDER: remaining=2
elif r.role == FCGI_FILTER: remaining=3
elif r.recType == FCGI_PARAMS:
if r.content == "":
remaining=remaining-1
else:
for i in r.values.keys():
self.env[i] = r.values[i]
elif r.recType == FCGI_STDIN:
if r.content == "":
remaining=remaining-1
else:
stdin=stdin+r.content
elif r.recType==FCGI_DATA:
if r.content == "":
remaining=remaining-1
else:
data=data+r.content
# end of while remaining:
self.inp = sys.stdin = StringIO(stdin)
self.err = sys.stderr = StringIO()
self.out = sys.stdout = StringIO()
self.data = StringIO(data)
def __del__(self):
self.Finish()
def Finish(self, status=0):
if not self.haveFinished:
self.haveFinished = 1
self.err.seek(0,0)
self.out.seek(0,0)
r=record()
r.recType = FCGI_STDERR
r.reqId = self.requestId
data = self.err.read()
if data:
while data:
chunk, data = self.getNextChunk(data)
r.content = chunk
r.writeRecord(self.conn)
r.content="" ; r.writeRecord(self.conn) # Terminate stream
r.recType = FCGI_STDOUT
data = self.out.read()
while data:
chunk, data = self.getNextChunk(data)
r.content = chunk
r.writeRecord(self.conn)
r.content="" ; r.writeRecord(self.conn) # Terminate stream
r=record()
r.recType=FCGI_END_REQUEST
r.reqId=self.requestId
r.appStatus=status
r.protocolStatus=FCGI_REQUEST_COMPLETE
r.writeRecord(self.conn)
self.conn.close()
def getFieldStorage(self):
method = 'GET'
if self.env.has_key('REQUEST_METHOD'):
method = string.upper(self.env['REQUEST_METHOD'])
if method == 'GET':
return cgi.FieldStorage(environ=self.env, keep_blank_values=1)
else:
return cgi.FieldStorage(fp=self.inp, environ=self.env, keep_blank_values=1)
def getNextChunk(self, data):
chunk = data[:8192]
data = data[8192:]
return chunk, data
Accept = FCGI # alias for backwards compatibility
#---------------------------------------------------------------------------
def _startup():
global _init
_init = 1
try:
s=socket.fromfd(sys.stdin.fileno(), socket.AF_INET,
socket.SOCK_STREAM)
s.getpeername()
except socket.error, (err, errmsg):
if err!=errno.ENOTCONN: # must be a non-fastCGI environment
global _isFCGI
_isFCGI = 0
return
global _sock
_sock = s
#---------------------------------------------------------------------------
def _test():
counter=0
try:
while isFCGI():
req = Accept()
counter=counter+1
try:
fs = req.getFieldStorage()
size = string.atoi(fs['size'].value)
doc = ['*' * size]
except:
doc = ['<HTML><HEAD><TITLE>FCGI TestApp</TITLE></HEAD>\n<BODY>\n']
doc.append('<H2>FCGI TestApp</H2><P>')
doc.append('<b>request count</b> = %d<br>' % counter)
# doc.append('<b>pid</b> = %s<br>' % os.getpid())
# if req.env.has_key('CONTENT_LENGTH'):
# cl = string.atoi(req.env['CONTENT_LENGTH'])
# doc.append('<br><b>POST data (%s):</b><br><pre>' % cl)
# keys = fs.keys()
# keys.sort()
# for k in keys:
# val = fs[k]
# if type(val) == type([]):
# doc.append(' <b>%-15s :</b> %s\n' % (k, val))
# else:
# doc.append(' <b>%-15s :</b> %s\n' % (k, val.value))
# doc.append('</pre>')
#
#
# doc.append('<P><HR><P><pre>')
# keys = req.env.keys()
# keys.sort()
# for k in keys:
# doc.append('<b>%-20s :</b> %s\n' % (k, req.env[k]))
# doc.append('\n</pre><P><HR>\n')
doc.append('</BODY></HTML>\n')
doc = string.join(doc, '')
req.out.write('Content-length: %s\r\n'
'Content-type: text/html\r\n'
'Cache-Control: no-cache\r\n'
'\r\n'
% len(doc))
req.out.write(doc)
req.Finish()
except:
import traceback
f = open('traceback', 'w')
traceback.print_exc( file = f )
# f.write('%s' % doc)
if __name__=='__main__':
#import pdb
#pdb.run('_test()')
_test()
|
wlanowski/wlanowski.github.io
|
tkalook/backup/test/fcgi/fcgi.py
|
Python
|
mit
| 15,057
|
[
"TINKER"
] |
3793bf7e1ac59f29bbaf1350eff7de475dbbbc8a4ef7bc9c3acb2d2395ee38ca
|
import warnings
from scipy.linalg import toeplitz
import scipy.sparse.linalg
import scipy.interpolate
from scipy.ndimage import zoom, gaussian_filter1d
import numpy as np
import numba
import cooler
from functools import partial
from ._numutils import (
iterative_correction_symmetric as _iterative_correction_symmetric,
observed_over_expected as _observed_over_expected,
fake_cis,
logbins,
MatVec,
)
def get_diag(arr, i=0):
"""Get the i-th diagonal of a matrix.
This solution was borrowed from
http://stackoverflow.com/questions/9958577/changing-the-values-of-the-diagonal-of-a-matrix-in-numpy
"""
return arr.ravel()[
max(i, -arr.shape[1] * i) : max(0, (arr.shape[1] - i))
* arr.shape[1] : arr.shape[1]
+ 1
]
def set_diag(arr, x, i=0, copy=False):
"""
Rewrite the i-th diagonal of a matrix with a value or an array of values.
Supports 2D arrays, square or rectangular. In-place by default.
Parameters
----------
arr : 2-D array
Array whose diagonal is to be filled.
x : scalar or 1-D vector of correct length
Values to be written on the diagonal.
i : int, optional
Which diagonal to write to. Default is 0.
Main diagonal is 0; upper diagonals are positive and
lower diagonals are negative.
copy : bool, optional
Return a copy. Diagonal is written in-place if false.
Default is True.
Returns
-------
Array with diagonal filled.
Notes
-----
Similar to numpy.fill_diagonal, but allows for kth diagonals as well.
This solution was borrowed from
http://stackoverflow.com/questions/9958577/changing-the-values-of-the-diagonal-of-a-matrix-in-numpy
"""
if copy:
arr = arr.copy()
start = max(i, -arr.shape[1] * i)
stop = max(0, (arr.shape[1] - i)) * arr.shape[1]
step = arr.shape[1] + 1
arr.flat[start:stop:step] = x
return arr
def fill_diag(arr, x, i=0, copy=True):
"""Identical to set_diag, but returns a copy by default"""
return set_diag(arr, x, i, copy)
def fill_na(arr, value=0, copy=True):
"""Replaces np.nan entries in an array with the provided value.
Parameters
----------
arr : np.array
value : float
copy : bool, optional
If True, creates a copy of x, otherwise replaces values in-place.
By default, True.
"""
if copy:
arr = arr.copy()
arr[np.isnan(arr)] = value
return arr
def dist_to_mask(mask, side="min"):
"""
Calculate the distance to the nearest True element of an array.
Parameters
----------
mask : iterable of bool
A boolean array.
side : str
The side . Accepted values are:
'left' : calculate the distance to the nearest True value on the left
'right' : calculate the distance to the nearest True value on the right
'min' : calculate the distance to the closest True value
'max' : calculate the distance to the furthest of the two neighbouring True values
Returns
-------
dist: array of int
Notes
-----
The solution is borrowed from https://stackoverflow.com/questions/18196811/cumsum-reset-at-nan
"""
if side not in ["left", "right", "min", "max"]:
raise ValueError("side can be `left`, `right`, `min` or `max`")
if side == "min":
return np.minimum(
dist_to_mask(mask, side="left"), dist_to_mask(mask, side="right")
)
if side == "max":
return np.maximum(
dist_to_mask(mask, side="left"), dist_to_mask(mask, side="right")
)
mask = np.asarray(mask)
if side == "right":
mask = mask[::-1]
d = np.diff(np.r_[0.0, np.cumsum(~mask)[mask]])
v = mask.astype(int).copy()
v[mask] = d
dist = (~mask).cumsum() - np.cumsum(v)
return dist[::-1] if side == "right" else dist
def get_finite(arr):
"""
Select only finite elements of an array.
"""
return arr[np.isfinite(arr)]
def fill_inf(arr, pos_value=0, neg_value=0, copy=True):
"""Replaces positive and negative infinity entries in an array with the
provided values.
Parameters
----------
arr : np.array
pos_value : float
Fill value for np.inf
neg_value : float
Fill value for -np.inf
copy : bool, optional
If True, creates a copy of x, otherwise replaces values in-place.
By default, True.
"""
if copy:
arr = arr.copy()
arr[np.isposinf(arr)] = pos_value
arr[np.isneginf(arr)] = neg_value
return arr
def fill_nainf(arr, value=0, copy=True):
"""Replaces np.nan and np.inf entries in an array with the provided value.
Parameters
----------
arr : np.array
value : float
copy : bool, optional
If True, creates a copy of x, otherwise replaces values in-place.
By default, True.
Notes
-----
Differs from np.nan_to_num in that it replaces np.inf with the same
number as np.nan.
"""
if copy:
arr = arr.copy()
arr[~np.isfinite(arr)] = value
return arr
def interp_nan(a_init, pad_zeros=True, method="linear", verbose=False):
"""Linearly interpolate to fill NaN rows and columns in a matrix.
Also interpolates NaNs in 1D arrays.
Parameters
----------
a_init : np.array
pad_zeros : bool, optional
If True, pads the matrix with zeros to fill NaNs at the edges.
By default, True.
method : str, optional
For 2D: "linear", "nearest", or "splinef2d"
For 1D: "linear", "nearest", "zero", "slinear", "quadratic", "cubic"
Returns
-------
array with NaNs linearly interpolated
Notes
-----
1D case adapted from: https://stackoverflow.com/a/39592604
2D case assumes that entire rows or columns are masked & edges to be
NaN-free, but is much faster than griddata implementation.
"""
shape = np.shape(a_init)
if pad_zeros:
a = np.zeros(tuple(s + 2 for s in shape))
a[tuple(slice(1, -1) for _ in shape)] = a_init
else:
a = np.array(a_init)
if len(shape) == 2 and (shape[0] == 1 or shape[1] == 1):
a = a.ravel()
isnan = np.isnan(a)
if np.sum(isnan) == 0:
if verbose:
print("no nans to interpolate")
return a_init
if a.ndim == 2:
if verbose:
print("interpolating 2D matrix")
if np.any(isnan[:, 0] | isnan[:, -1]) or np.any(isnan[0, :] | isnan[-1, :]):
raise ValueError("Edges must not have NaNs")
# Rows/cols to be considered fully null may have non-NaN diagonals
# so we'll take the maximum NaN count to identify them
n_nans_by_row = np.sum(isnan, axis=1)
n_nans_by_col = np.sum(isnan, axis=0)
i_inds = np.where(n_nans_by_row < np.max(n_nans_by_row))[0]
j_inds = np.where(n_nans_by_col < np.max(n_nans_by_col))[0]
if np.sum(isnan[np.ix_(i_inds, j_inds)]) > 0:
raise AssertionError("Found additional NaNs")
interpolator = partial(
scipy.interpolate.interpn,
(i_inds, j_inds),
a[np.ix_(i_inds, j_inds)],
method=method,
bounds_error=False,
)
else:
if verbose:
print("interpolating 1D vector")
inds = np.arange(len(a))
interpolator = scipy.interpolate.interp1d(
inds[~isnan], a[~isnan], kind=method, bounds_error=False
)
loc = np.where(isnan)
a[loc] = interpolator(loc)
if pad_zeros:
a = a[tuple(slice(1, -1) for _ in shape)]
return a
def slice_sorted(arr, lo, hi):
"""Get the subset of a sorted array with values >=lo and <hi.
A faster version of arr[(arr>=lo) & (arr<hi)]
"""
return arr[np.searchsorted(arr, lo) : np.searchsorted(arr, hi)]
def MAD(arr, axis=None, has_nans=False):
"""Calculate the Median Absolute Deviation from the median.
Parameters
----------
arr : np.ndarray
Input data.
axis : int
The axis along which to calculate MAD.
has_nans : bool
If True, use the slower NaN-aware method to calculate medians.
"""
if has_nans:
return np.nanmedian(np.abs(arr - np.nanmedian(arr, axis)), axis)
else:
return np.median(np.abs(arr - np.median(arr, axis)), axis)
def COMED(xs, ys, has_nans=False):
"""Calculate the comedian - the robust median-based counterpart of
Pearson's r.
::
comedian = median((xs-median(xs))*(ys-median(ys))) / MAD(xs) / MAD(ys)
Parameters
----------
has_nans : bool
if True, mask (x,y) pairs with at least one NaN
Notes
-----
Citations: "On MAD and comedians" by Michael Falk (1997),
"Robust Estimation of the Correlation Coefficient: An Attempt of Survey"
by Georgy Shevlyakov and Pavel Smirnov (2011)
"""
if has_nans:
mask = np.isfinite(xs) & np.isfinite(ys)
xs = xs[mask]
ys = ys[mask]
med_x = np.median(xs)
med_y = np.median(ys)
comedian = np.median((xs - med_x) * (ys - med_y)) / MAD(xs) / MAD(ys)
return comedian
def normalize_score(arr, norm="z", axis=None, has_nans=True):
"""Normalize an array by subtracting the first moment and dividing the
residual by the second.
Parameters
----------
arr : np.ndarray
Input data.
norm : str
The type of normalization.
'z' - report z-scores,
norm_arr = (arr - mean(arr)) / std(arr)
'mad' - report deviations from the median in units of MAD
(Median Absolute Deviation from the median),
norm_arr = (arr - median(arr)) / MAD(arr)
'madz' - report robust z-scores, i.e. estimate the mean as
the median and the standard error as MAD / 0.67499,
norm_arr = (arr - median(arr)) / MAD(arr) * 0.67499
axis : int
The axis along which to calculate the normalization parameters.
has_nans : bool
If True, use slower NaN-aware methods to calculate the
normalization parameters.
"""
norm_arr = np.copy(arr)
norm = norm.lower()
if norm == "z":
if has_nans:
norm_arr -= np.nanmean(norm_arr, axis=axis)
norm_arr /= np.nanstd(norm_arr, axis=axis)
else:
norm_arr -= np.mean(norm_arr, axis=axis)
norm_arr /= np.std(norm_arr, axis=axis)
elif norm == "mad" or norm == "madz":
if has_nans:
norm_arr -= np.nanmedian(norm_arr, axis=axis)
else:
norm_arr -= np.median(norm_arr, axis=axis)
norm_arr /= MAD(norm_arr, axis=axis, has_nans=has_nans)
if norm == "madz":
norm_arr *= 0.67449
else:
raise ValueError("Unknown norm type: {}".format(norm))
return norm_arr
def stochastic_sd(arr, n=10000, seed=0):
"""Estimate the standard deviation of an array by considering only the
subset of its elements.
Parameters
----------
n : int
The number of elements to consider. If the array contains fewer elements,
use all.
seed : int
The seed for the random number generator.
"""
arr = np.asarray(arr)
if arr.size < n:
return np.sqrt(arr.var())
else:
return np.sqrt(
np.random.RandomState(seed).choice(arr.flat, n, replace=True).var()
)
def is_symmetric(mat):
"""
Check if a matrix is symmetric.
"""
maxDiff = np.abs(mat - mat.T).max()
return maxDiff < stochastic_sd(mat) * 1e-7 + 1e-5
def get_eig(mat, n=3, mask_zero_rows=False, subtract_mean=False, divide_by_mean=False):
"""Perform an eigenvector decomposition.
Parameters
----------
mat : np.ndarray
A square matrix, must not contain nans, infs or zero rows.
n : int
The number of eigenvectors to return. Output is backfilled with NaNs
when n exceeds the size of the input matrix.
mask_zero_rows : bool
If True, mask empty rows/columns before eigenvector decomposition.
Works only with symmetric matrices.
subtract_mean : bool
If True, subtract the mean from the matrix.
divide_by_mean : bool
If True, divide the matrix by its mean.
Returns
-------
eigvecs : np.ndarray
An array of eigenvectors (in rows), sorted by a decreasing absolute
eigenvalue.
eigvals : np.ndarray
An array of sorted eigenvalues.
"""
symmetric = is_symmetric(mat)
if (
symmetric
and np.sum(np.sum(np.abs(mat), axis=0) == 0) > 0
and not mask_zero_rows
):
warnings.warn(
"The matrix contains empty rows/columns and is symmetric. "
"Mask the empty rows with remove_zeros=True"
)
# size of the input matrix
n_rows = mat.shape[0]
if n > n_rows:
warnings.warn(
"Number n of requested eigenvalues is larger than the matrix size."
)
if mask_zero_rows:
if not is_symmetric(mat):
raise ValueError("The input matrix must be symmetric!")
mask = np.sum(np.abs(mat), axis=0) != 0
mat_collapsed = mat[mask, :][:, mask]
eigvecs_collapsed, eigvals = get_eig(
mat_collapsed,
n=n,
mask_zero_rows=False,
subtract_mean=subtract_mean,
divide_by_mean=divide_by_mean,
)
eigvecs = np.full((n, n_rows), np.nan)
for i in range(n):
eigvecs[i][mask] = eigvecs_collapsed[i]
return eigvecs, eigvals
else:
mat = mat.astype(np.float64, copy=True) # make a copy, ensure float
# prepare NaN-filled arrays for output in case we requested
# more eigenvalues that can be computed (eigs/eigsh k limits)
eigvals = np.full(n, np.nan)
eigvecs = np.full((n, n_rows), np.nan)
mean = np.mean(mat)
if subtract_mean:
mat -= mean
if divide_by_mean:
mat /= mean
if symmetric:
# adjust requested number of eigvals for "eigsh"
_n = n if n < n_rows else (n_rows - 1)
_eigvals, _eigvecs = scipy.sparse.linalg.eigsh(mat, _n)
else:
# adjust requested number of eigvals for "eigs"
_n = n if n < (n_rows - 1) else (n_rows - 2)
_eigvals, _eigvecs = scipy.sparse.linalg.eigs(mat, _n)
# reorder according to eigvals and copy into output arrays
order = np.argsort(-np.abs(_eigvals))
eigvals[:_n] = _eigvals[order]
eigvecs[:_n, :] = _eigvecs.T[order]
return eigvecs, eigvals
@numba.njit
def _logbins_numba(lo, hi, ratio=0, N=0, prepend_zero=False):
"""Make bins with edges evenly spaced in log-space.
Parameters
----------
lo, hi : int
The span of the bins.
ratio : float
The target ratio between the upper and the lower edge of each bin.
Either ratio or N must be specified.
N : int
The target number of bins. The resulting number of bins is not guaranteed.
Either ratio or N must be specified.
"""
lo = int(lo)
hi = int(hi)
if ratio != 0:
if N != 0:
raise ValueError("Please specify N or ratio")
N = np.log(hi / lo) / np.log(ratio)
elif N == 0:
raise ValueError("Please specify N or ratio")
data10 = 10 ** np.linspace(np.log10(lo), np.log10(hi), int(N))
data10 = np.rint(data10)
data10_int = np.sort(np.unique(data10)).astype(np.int_)
assert data10_int[0] == lo
assert data10_int[-1] == hi
if prepend_zero:
data10_int = np.concatenate((np.array([0]), data10_int))
return data10_int
@numba.njit
def observed_over_expected(
matrix, mask=np.empty(shape=(0), dtype=np.bool_), dist_bin_edge_ratio=1.03
):
"""
Normalize the contact matrix for distance-dependent contact decay.
The diagonals of the matrix, corresponding to contacts between loci pairs
with a fixed distance, are grouped into exponentially growing bins of
distances; the diagonals from each bin are normalized by their average value.
Parameters
----------
matrix : np.ndarray
A 2D symmetric matrix of contact frequencies.
mask : np.ndarray
A 1D or 2D mask of valid data.
If 1D, it is interpreted as a mask of "good" bins.
If 2D, it is interpreted as a mask of "good" pixels.
dist_bin_edge_ratio : float
The ratio of the largest and the shortest distance in each distance bin.
Returns
-------
OE : np.ndarray
The diagonal-normalized matrix of contact frequencies.
dist_bins : np.ndarray
The edges of the distance bins used to calculate average
distance-dependent contact frequency.
sum_pixels : np.ndarray
The sum of contact frequencies in each distance bin.
n_pixels : np.ndarray
The total number of valid pixels in each distance bin.
"""
N = matrix.shape[0]
mask2d = np.empty(shape=(0, 0), dtype=np.bool_)
if mask.ndim == 1:
if mask.size > 0:
mask2d = mask.reshape((1, -1)) * mask.reshape((-1, 1))
elif mask.ndim == 2:
# Numba expects mask to be a 1d array, so we need to hint
# that it is now a 2d array
mask2d = mask.reshape((int(np.sqrt(mask.size)), int(np.sqrt(mask.size))))
else:
raise ValueError("The mask must be either 1D or 2D.")
data = np.copy(matrix).astype(np.float64)
has_mask = mask2d.size > 0
dist_bins = _logbins_numba(1, N, dist_bin_edge_ratio)
dist_bins = np.concatenate((np.array([0]), dist_bins))
n_pixels_arr = np.zeros_like(dist_bins[1:])
sum_pixels_arr = np.zeros_like(dist_bins[1:], dtype=np.float64)
bin_idx, n_pixels, sum_pixels = 0, 0, 0
for bin_idx, lo, hi in zip(
range(len(dist_bins) - 1), dist_bins[:-1], dist_bins[1:]
):
sum_pixels = 0
n_pixels = 0
for offset in range(lo, hi):
for j in range(0, N - offset):
if not has_mask or mask2d[offset + j, j]:
sum_pixels += data[offset + j, j]
n_pixels += 1
n_pixels_arr[bin_idx] = n_pixels
sum_pixels_arr[bin_idx] = sum_pixels
if n_pixels == 0:
continue
mean_pixel = sum_pixels / n_pixels
if mean_pixel == 0:
continue
for offset in range(lo, hi):
for j in range(0, N - offset):
if not has_mask or mask2d[offset + j, j]:
data[offset + j, j] /= mean_pixel
if offset > 0:
data[j, offset + j] /= mean_pixel
return data, dist_bins, sum_pixels_arr, n_pixels_arr
@numba.jit # (nopython=True)
def iterative_correction_symmetric(
x, max_iter=1000, ignore_diags=0, tol=1e-5, verbose=False
):
"""The main method for correcting DS and SS read data.
By default does iterative correction, but can perform an M-time correction
Parameters
----------
x : np.ndarray
A symmetric matrix to correct.
max_iter : int
The maximal number of iterations to take.
ignore_diags : int
The number of diagonals to ignore during iterative correction.
tol : float
If less or equal to zero, will perform max_iter iterations.
Returns
-------
_x : np.ndarray
Corrected matrix
totalBias : np.ndarray
Vector with corrections biases
report : (bool, int)
A tuple that reports convergence
status and used number of iterations
"""
N = len(x)
_x = x.copy()
if ignore_diags > 0:
for d in range(0, ignore_diags):
for j in range(0, N - d):
_x[j, j + d] = 0
_x[j + d, j] = 0
totalBias = np.ones(N, np.double)
converged = False
iternum = 0
mask = np.sum(_x, axis=1) == 0
for iternum in range(max_iter):
s = np.sum(_x, axis=1)
mask = s == 0
s = s / np.mean(s[~mask])
s[mask] = 1
s -= 1
s *= 0.8
s += 1
totalBias *= s
for i in range(N):
for j in range(N):
_x[i, j] /= s[i] * s[j]
crit = np.var(s) # np.abs(s - 1).max()
if verbose:
print(crit)
if (tol > 0) and (crit < tol):
converged = True
break
corr = totalBias[~mask].mean() # mean correction factor
_x = _x * corr * corr # renormalizing everything
totalBias /= corr
report = (converged, iternum)
return _x, totalBias, report
@numba.jit # (nopython=True)
def iterative_correction_asymmetric(x, max_iter=1000, tol=1e-5, verbose=False):
"""Adapted from iterative_correction_symmetric
Parameters
----------
x : np.ndarray
An asymmetric matrix to correct.
max_iter : int
The maximal number of iterations to take.
ignore_diags : int
The number of diagonals to ignore during iterative correction.
tol : float
If less or equal to zero, will perform max_iter iterations.
Returns
-------
_x : np.ndarray
Corrected matrix
totalBias : np.ndarray
Vector with corrections biases for columns
totalBias2 : np.ndarray
Vector with corrections biases for rows
report : (bool, int)
A tuple that reports convergence
status and used number of iterations
"""
N2, N = x.shape
_x = x.copy()
totalBias = np.ones(N, np.double)
totalBias2 = np.ones(N2, np.double)
iternum = 0
mask = np.sum(_x, axis=0) == 0
mask2 = np.sum(_x, axis=1) == 0
for iternum in range(max_iter):
s = np.sum(_x, axis=0)
mask = s == 0
s = s / np.mean(s[~mask])
s[mask] = 1.0
s -= 1.0
s *= 0.8
s += 1.0
totalBias *= s
s2 = np.sum(_x, axis=1)
mask2 = s2 == 0
s2 = s2 / np.mean(s2[~mask2])
s2[mask2] = 1.0
s2 -= 1.0
s2 *= 0.8
s2 += 1.0
totalBias2 *= s2
# _x = _x / s[:, None] / s[None,:]
# an explicit cycle is 2x faster here
for i in range(N2):
for j in range(N):
_x[i, j] /= s[j] * s2[i]
crit = np.var(s) # np.abs(s - 1).max()
crit2 = np.var(s2) # np.abs(s - 1).max()
if verbose:
print(crit)
if (tol > 0) and (crit < tol) and (crit2 < tol):
converged = True
break
corr = totalBias[~mask].mean() # mean correction factor
corr2 = totalBias2[~mask2].mean() # mean correction factor
_x = _x * corr * corr2 # renormalizing everything
totalBias /= corr
totalBias2 /= corr2
report = (converged, iternum)
return _x, totalBias, totalBias2, report
class LazyToeplitz(cooler.core._IndexingMixin):
"""
A Toeplitz matrix can be represented with one row and one column.
This lazy toeplitz object supports slice querying to construct dense
matrices on the fly.
"""
def __init__(self, c, r=None):
if r is None:
r = c
elif c[0] != r[0]:
raise ValueError("First element of `c` and `r` should match")
self._c = c
self._r = r
@property
def shape(self):
return (len(self._c), len(self._r))
def __getitem__(self, key):
slc0, slc1 = self._unpack_index(key)
i0, i1 = self._process_slice(slc0, self.shape[0])
j0, j1 = self._process_slice(slc1, self.shape[1])
C, R = self._c, self._r
# symmetric query
if (i0 == j0) and (i1 == j1):
c = C[0 : (i1 - i0)]
r = R[0 : (j1 - j0)]
# asymmetric query
else:
transpose = False
# tril
if j0 < i0 or (i0 == j0 and i1 < j1):
# tranpose the matrix, query,
# then transpose the result
i0, i1, j0, j1 = j0, j1, i0, i1
C, R = R, C
transpose = True
c = np.r_[R[(j0 - i0) : max(0, j0 - i1) : -1], C[0 : max(0, i1 - j0)]]
r = R[(j0 - i0) : (j1 - i0)]
if transpose:
c, r = r, c
return toeplitz(c, r)
def get_kernel(w, p, ktype):
"""
Return typical kernels given size parameteres w, p,and kernel type.
Parameters
----------
w : int
Outer kernel size (actually half of it).
p : int
Inner kernel size (half of it).
ktype : str
Name of the kernel type, could be one of the following: 'donut',
'vertical', 'horizontal', 'lowleft', 'upright'.
Returns
-------
kernel : ndarray
A square matrix of int type filled with 1 and 0, according to the
kernel type.
"""
width = 2 * w + 1
kernel = np.ones((width, width), dtype=np.int64)
# mesh grid:
y, x = np.ogrid[-w : w + 1, -w : w + 1]
if ktype == "donut":
# mask inner pXp square:
mask = (((-p) <= x) & (x <= p)) & (((-p) <= y) & (y <= p))
# mask vertical and horizontal
# lines of width 1 pixel:
mask += (x == 0) | (y == 0)
# they are all 0:
kernel[mask] = 0
elif ktype == "vertical":
# mask outside of vertical line
# of width 3:
mask = ((-1 > x) | (x > 1)) & ((y >= -w))
# mask inner pXp square:
mask += ((-p <= x) & (x <= p)) & ((-p <= y) & (y <= p))
# kernel masked:
kernel[mask] = 0
elif ktype == "horizontal":
# mask outside of horizontal line
# of width 3:
mask = ((-1 > y) | (y > 1)) & ((x >= -w))
# mask inner pXp square:
mask += ((-p <= x) & (x <= p)) & ((-p <= y) & (y <= p))
# kernel masked:
kernel[mask] = 0
# ACHTUNG!!! UPRIGHT AND LOWLEFT ARE SWITCHED ...
# IT SEEMS FOR UNKNOWN REASON THAT IT SHOULD
# BE THAT WAY ...
# OR IT'S A MISTAKE IN hIccups AS WELL ...
elif ktype == "upright":
# mask inner pXp square:
mask = ((x >= -p)) & ((y <= p))
mask += x >= 0
mask += y <= 0
# kernel masked:
kernel[mask] = 0
elif ktype == "lowleft":
# mask inner pXp square:
mask = ((x >= -p)) & ((y <= p))
mask += x >= 0
mask += y <= 0
# reflect that mask to
# make it upper-right:
mask = mask[::-1, ::-1]
# kernel masked:
kernel[mask] = 0
else:
raise ValueError("Kernel-type {} has not been implemented yet".format(ktype))
return kernel
def coarsen(reduction, x, axes, trim_excess=False):
"""
Coarsen an array by applying reduction to fixed size neighborhoods.
Adapted from `dask.array.coarsen` to work on regular numpy arrays.
Parameters
----------
reduction : function
Function like np.sum, np.mean, etc...
x : np.ndarray
Array to be coarsened
axes : dict
Mapping of axis to coarsening factor
trim_excess : bool, optional
Remove excess elements. Default is False.
Examples
--------
Provide dictionary of scale per dimension
>>> x = np.array([1, 2, 3, 4, 5, 6])
>>> coarsen(np.sum, x, {0: 2})
array([ 3, 7, 11])
>>> coarsen(np.max, x, {0: 3})
array([3, 6])
>>> x = np.arange(24).reshape((4, 6))
>>> x
array([[ 0, 1, 2, 3, 4, 5],
[ 6, 7, 8, 9, 10, 11],
[12, 13, 14, 15, 16, 17],
[18, 19, 20, 21, 22, 23]])
>>> coarsen(np.min, x, {0: 2, 1: 3})
array([[ 0, 3],
[12, 15]])
See also
--------
dask.array.coarsen
"""
# Insert singleton dimensions if they don't exist already
for i in range(x.ndim):
if i not in axes:
axes[i] = 1
if trim_excess:
ind = tuple(
slice(0, -(d % axes[i])) if d % axes[i] else slice(None, None)
for i, d in enumerate(x.shape)
)
x = x[ind]
# (10, 10) -> (5, 2, 5, 2)
newdims = [(x.shape[i] // axes[i], axes[i]) for i in range(x.ndim)]
newshape = tuple(np.concatenate(newdims))
reduction_axes = tuple(range(1, x.ndim * 2, 2))
return reduction(x.reshape(newshape), axis=reduction_axes)
def smooth(y, box_pts):
try:
from astropy.convolution import convolve
except ImportError:
raise ImportError("The astropy module is required to use this function")
box = np.ones(box_pts) / box_pts
# also: None, fill, wrap, extend
y_smooth = convolve(y, box, boundary="extend")
return y_smooth
def infer_mask2D(mat):
if mat.shape[0] != mat.shape[1]:
raise ValueError("matix must be symmetric!")
fill_na(mat, value=0, copy=False)
sum0 = np.sum(mat, axis=0) > 0
mask = sum0[:, None] * sum0[None, :]
return mask
def remove_good_singletons(mat, mask=None, returnMask=False):
mat = mat.copy()
if mask is None:
mask = infer_mask2D(mat)
badBins = np.sum(mask, axis=0) == 0
goodBins = badBins == 0
good_singletons = (goodBins * smooth(goodBins, 3)) == 1 / 3
mat[good_singletons, :] = np.nan
mat[:, good_singletons] = np.nan
mask[good_singletons, :] = 0
mask[:, good_singletons] = 0
if returnMask:
return mat, mask
else:
return mat
def interpolate_bad_singletons(
mat, mask=None, fillDiagonal=True, returnMask=False, secondPass=True, verbose=False
):
"""Interpolate singleton missing bins for visualization
Examples
--------
>>> ax = plt.subplot(121)
>>> maxval = np.log(np.nanmean(np.diag(mat,3))*2 )
>>> plt.matshow(np.log(mat)), vmax=maxval, fignum=False)
>>> plt.set_cmap('fall');
>>> plt.subplot(122, sharex=ax, sharey=ax)
>>> plt.matshow(
... np.log(interpolate_bad_singletons(remove_good_singletons(mat))),
... vmax=maxval,
... fignum=False
... )
>>> plt.set_cmap('fall');
>>> plt.show()
"""
mat = mat.copy()
if mask is None:
mask = infer_mask2D(mat)
antimask = ~mask
badBins = np.sum(mask, axis=0) == 0
singletons = (badBins * smooth(badBins == 0, 3)) > 1 / 3
bb_minus_singletons = (badBins.astype("int8") - singletons.astype("int8")).astype(
"bool"
)
mat[antimask] = np.nan
locs = np.zeros(np.shape(mat))
locs[singletons, :] = 1
locs[:, singletons] = 1
locs[bb_minus_singletons, :] = 0
locs[:, bb_minus_singletons] = 0
locs = np.nonzero(locs) # np.isnan(mat))
interpvals = np.zeros(np.shape(mat))
if verbose:
print("initial pass to interpolate:", len(locs[0]))
for loc in zip(locs[0], locs[1]):
i, j = loc
if loc[0] > loc[1]:
if (
(loc[0] > 0)
and (loc[1] > 0)
and (loc[0] < len(mat) - 1)
and (loc[1] < len(mat) - 1)
):
interpvals[i, j] = np.nanmean([mat[i - 1, j - 1], mat[i + 1, j + 1]])
elif loc[0] == 0:
interpvals[i, j] = np.nanmean([mat[i, j - 1], mat[i, j + 1]])
elif loc[1] == 0:
interpvals[i, j] = np.nanmean([mat[i - 1, j], mat[i + 1, j]])
elif loc[0] == (len(mat) - 1):
interpvals[i, j] = np.nanmean([mat[i, j - 1], mat[i, j + 1]])
elif loc[1] == (len(mat) - 1):
interpvals[i, j] = np.nanmean([mat[i - 1, j], mat[i + 1, j]])
interpvals = interpvals + interpvals.T
mat[locs] = interpvals[locs]
mask[locs] = 1
if secondPass:
locs = np.nonzero(np.isnan(interpvals)) # np.isnan(mat))
interpvals2 = np.zeros(np.shape(mat))
if verbose:
print("still remaining: ", len(locs[0]))
for loc in zip(locs[0], locs[1]):
i, j = loc
if loc[0] > loc[1]:
if (
(loc[0] > 0)
and (loc[1] > 0)
and (loc[0] < len(mat) - 1)
and (loc[1] < len(mat) - 1)
):
interpvals2[i, j] = np.nanmean(
[mat[i - 1, j - 1], mat[i + 1, j + 1]]
)
elif loc[0] == 0:
interpvals2[i, j] = np.nanmean([mat[i, j - 1], mat[i, j + 1]])
elif loc[1] == 0:
interpvals2[i, j] = np.nanmean([mat[i - 1, j], mat[i + 1, j]])
elif loc[0] == (len(mat) - 1):
interpvals2[i, j] = np.nanmean([mat[i, j - 1], mat[i, j + 1]])
elif loc[1] == (len(mat) - 1):
interpvals2[i, j] = np.nanmean([mat[i - 1, j], mat[i + 1, j]])
interpvals2 = interpvals2 + interpvals2.T
mat[locs] = interpvals2[locs]
mask[locs] = 1
if fillDiagonal:
for i in range(-1, 2):
set_diag(mat, np.nan, i=i, copy=False)
for i in range(-1, 2):
set_diag(mask, 0, i=i, copy=False)
if returnMask:
return mat, mask
else:
return mat
def zoom_array(
in_array,
final_shape,
same_sum=False,
zoom_function=partial(zoom, order=1),
**zoom_kwargs
):
"""Rescale an array or image.
Normally, one can use scipy.ndimage.zoom to do array/image rescaling.
However, scipy.ndimage.zoom does not coarsegrain images well. It basically
takes nearest neighbor, rather than averaging all the pixels, when
coarsegraining arrays. This increases noise. Photoshop doesn't do that, and
performs some smart interpolation-averaging instead.
If you were to coarsegrain an array by an integer factor, e.g. 100x100 ->
25x25, you just need to do block-averaging, that's easy, and it reduces
noise. But what if you want to coarsegrain 100x100 -> 30x30?
Then my friend you are in trouble. But this function will help you. This
function will blow up your 100x100 array to a 120x120 array using
scipy.ndimage zoom Then it will coarsegrain a 120x120 array by
block-averaging in 4x4 chunks.
It will do it independently for each dimension, so if you want a 100x100
array to become a 60x120 array, it will blow up the first and the second
dimension to 120, and then block-average only the first dimension.
(Copied from mirnylib.numutils)
Parameters
----------
in_array : ndarray
n-dimensional numpy array (1D also works)
final_shape : shape tuple
resulting shape of an array
same_sum : bool, optional
Preserve a sum of the array, rather than values. By default, values
are preserved
zoom_function : callable
By default, scipy.ndimage.zoom with order=1. You can plug your own.
**zoom_kwargs :
Options to pass to zoomFunction.
Returns
-------
rescaled : ndarray
Rescaled version of in_array
"""
in_array = np.asarray(in_array, dtype=np.double)
in_shape = in_array.shape
assert len(in_shape) == len(final_shape)
mults = [] # multipliers for the final coarsegraining
for i in range(len(in_shape)):
if final_shape[i] < in_shape[i]:
mults.append(int(np.ceil(in_shape[i] / final_shape[i])))
else:
mults.append(1)
# shape to which to blow up
temp_shape = tuple([i * j for i, j in zip(final_shape, mults)])
# stupid zoom doesn't accept the final shape. Carefully crafting the
# multipliers to make sure that it will work.
zoom_multipliers = np.array(temp_shape) / np.array(in_shape) + 0.0000001
assert zoom_multipliers.min() >= 1
# applying scipy.ndimage.zoom
rescaled = zoom_function(in_array, zoom_multipliers, **zoom_kwargs)
for ind, mult in enumerate(mults):
if mult != 1:
sh = list(rescaled.shape)
assert sh[ind] % mult == 0
newshape = sh[:ind] + [sh[ind] // mult, mult] + sh[ind + 1 :]
rescaled.shape = newshape
rescaled = np.mean(rescaled, axis=ind + 1)
assert rescaled.shape == final_shape
if same_sum:
extra_size = np.prod(final_shape) / np.prod(in_shape)
rescaled /= extra_size
return rescaled
def adaptive_coarsegrain(ar, countar, cutoff=5, max_levels=8, min_shape=8):
"""
Adaptively coarsegrain a Hi-C matrix based on local neighborhood pooling
of counts.
Parameters
----------
ar : array_like, shape (n, n)
A square Hi-C matrix to coarsegrain. Usually this would be a balanced
matrix.
countar : array_like, shape (n, n)
The raw count matrix for the same area. Has to be the same shape as the
Hi-C matrix.
cutoff : float, optional
A minimum number of raw counts per pixel required to stop 2x2 pooling.
Larger cutoff values would lead to a more coarse-grained, but smoother
map. 3 is a good default value for display purposes, could be lowered
to 1 or 2 to make the map less pixelated. Setting it to 1 will only
ensure there are no zeros in the map.
max_levels : int, optional
How many levels of coarsening to perform. It is safe to keep this
number large as very coarsened map will have large counts and no
substitutions would be made at coarser levels.
min_shape : int, optional
Stop coarsegraining when coarsegrained array shape is less than that.
Returns
-------
Smoothed array, shape (n, n)
Notes
-----
The algorithm works as follows:
First, it pads an array with NaNs to the nearest power of two. Second, it
coarsens the array in powers of two until the size is less than minshape.
Third, it starts with the most coarsened array, and goes one level up.
It looks at all 4 pixels that make each pixel in the second-to-last
coarsened array. If the raw counts for any valid (non-NaN) pixel are less
than ``cutoff``, it replaces the values of the valid (4 or less) pixels
with the NaN-aware average. It is then applied to the next
(less coarsened) level until it reaches the original resolution.
In the resulting matrix, there are guaranteed to be no zeros, unless very
large zero-only areas were provided such that zeros were produced
``max_levels`` times when coarsening.
Examples
--------
>>> c = cooler.Cooler("/path/to/some/cooler/at/about/2000bp/resolution")
>>> # sample region of about 6000x6000
>>> mat = c.matrix(balance=True).fetch("chr1:10000000-22000000")
>>> mat_raw = c.matrix(balance=False).fetch("chr1:10000000-22000000")
>>> mat_cg = adaptive_coarsegrain(mat, mat_raw)
>>> plt.figure(figsize=(16,7))
>>> ax = plt.subplot(121)
>>> plt.imshow(np.log(mat), vmax=-3)
>>> plt.colorbar()
>>> plt.subplot(122, sharex=ax, sharey=ax)
>>> plt.imshow(np.log(mat_cg), vmax=-3)
>>> plt.colorbar()
"""
def _coarsen(ar, operation=np.sum):
"""Coarsegrains an array by a factor of 2"""
M = ar.shape[0] // 2
newar = np.reshape(ar, (M, 2, M, 2))
cg = operation(newar, axis=1)
cg = operation(cg, axis=2)
return cg
def _expand(ar, counts=None):
"""
Performs an inverse of nancoarsen
"""
N = ar.shape[0] * 2
newar = np.zeros((N, N))
newar[::2, ::2] = ar
newar[1::2, ::2] = ar
newar[::2, 1::2] = ar
newar[1::2, 1::2] = ar
return newar
# defining arrays, making sure they are floats
ar = np.asarray(ar, float)
countar = np.asarray(countar, float)
# TODO: change this to the nearest shape correctly counting the smallest
# shape the algorithm will reach
Norig = ar.shape[0]
Nlog = np.log2(Norig)
if not np.allclose(Nlog, np.rint(Nlog)):
newN = np.int(2 ** np.ceil(Nlog)) # next power-of-two sized matrix
newar = np.empty((newN, newN), dtype=float) # fitting things in there
newar[:] = np.nan
newcountar = np.zeros((newN, newN), dtype=float)
newar[:Norig, :Norig] = ar
newcountar[:Norig, :Norig] = countar
ar = newar
countar = newcountar
armask = np.isfinite(ar) # mask of "valid" elements
countar[~armask] = 0
ar[~armask] = 0
assert np.isfinite(countar).all()
assert countar.shape == ar.shape
# We will be working with three arrays.
ar_cg = [ar] # actual Hi-C data
countar_cg = [countar] # counts contributing to Hi-C data (raw Hi-C reads)
armask_cg = [armask] # mask of "valid" pixels of the heatmap
# 1. Forward pass: coarsegrain all 3 arrays
for i in range(max_levels):
if countar_cg[-1].shape[0] > min_shape:
countar_cg.append(_coarsen(countar_cg[-1]))
armask_cg.append(_coarsen(armask_cg[-1]))
ar_cg.append(_coarsen(ar_cg[-1]))
# Get the most coarsegrained array
ar_cur = ar_cg.pop()
countar_cur = countar_cg.pop()
armask_cur = armask_cg.pop()
# 2. Reverse pass: replace values starting with most coarsegrained array
# We have 4 pixels that were coarsegrained to one pixel.
# Let V be the array of values (ar), and C be the array of counts of
# valid pixels. Then the coarsegrained values and valid pixel counts
# are:
# V_{cg} = V_{0,0} + V_{0,1} + V_{1,0} + V_{1,1}
# C_{cg} = C_{0,0} + C_{0,1} + C_{1,0} + C_{1,1}
# The average value at the coarser level is V_{cg} / C_{cg}
# The average value at the finer level is V_{0,0} / C_{0,0}, etc.
#
# We would replace 4 values with the average if counts for either of the
# 4 values are less than cutoff. To this end, we perform nanmin of raw
# Hi-C counts in each 4 pixels
# Because if counts are 0 due to this pixel being invalid - it's fine.
# But if they are 0 in a valid pixel - we replace this pixel.
# If we decide to replace the current 2x2 square with coarsegrained
# values, we need to make it produce the same average value
# To this end, we would replace V_{0,0} with V_{cg} * C_{0,0} / C_{cg} and
# so on.
for i in range(len(countar_cg)):
ar_next = ar_cg.pop()
countar_next = countar_cg.pop()
armask_next = armask_cg.pop()
# obtain current "average" value by dividing sum by the # of valid pixels
val_cur = ar_cur / armask_cur
# expand it so that it is the same shape as the previous level
val_exp = _expand(val_cur)
# create array of substitutions: multiply average value by counts
addar_exp = val_exp * armask_next
# make a copy of the raw Hi-C array at current level
countar_next_mask = np.array(countar_next)
countar_next_mask[armask_next == 0] = np.nan # fill nans
countar_exp = _expand(_coarsen(countar_next, operation=np.nanmin))
curmask = countar_exp < cutoff # replacement mask
ar_next[curmask] = addar_exp[curmask] # procedure of replacement
ar_next[armask_next == 0] = 0 # now setting zeros at invalid pixels
# prepare for the next level
ar_cur = ar_next
countar_cur = countar_next
armask_cur = armask_next
ar_next[armask_next == 0] = np.nan
ar_next = ar_next[:Norig, :Norig]
return ar_next
def robust_gauss_filter(
ar, sigma=2, functon=gaussian_filter1d, kwargs=None
):
"""
Implements an edge-handling mode for gaussian filter that basically ignores
the edge, and also handles NaNs.
Parameters
----------
ar : array-like
Input array
sigma : float
sigma to be passed to the filter
function : callable
Filter to use. Default is gauusian_filter1d
kwargs : dict
Additional args to pass to the filter. Default:None
Notes
-----
Available edge-handling modes in ndimage.filters attempt to somehow
"extrapolate" the edge value and then apply the filter (see
https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.convolve.html).
That's likely because convolve uses fast fourier transform, which requires
the kernel to be constant. Here we design a better edge-handling for the
gaussian smoothing.
In a gaussian-filtered array, a pixel away from the edge is a mean of nearby
pixels with gaussian weights. With this mode, pixels near start/end are
also a mean of nearby pixels with gaussian weights. That's it. If we
encounter NANs, we also simply ignore them, following the same definition:
mean of nearby valid pixels. Yes, it rases the weights for the first/last
pixels, because now only a part of the whole gaussian is being used (up to
1/2 for the first/last pixel and large sigma). But it preserves the "mean of
nearby pixels" definition. It is different from padding with zeros (it
would drag the first pixel down to be more like zero). It is also different
from "nearest" - that gives too much weight to the first/last pixel.
To achieve this smoothing, we preform regular gaussian smoothing using
mode="constant" (pad with zeros). Then we takes an array of valid pixels
and smooth it the same way. This calculates how many "average valid pixels"
contributed to each point of a smoothed array. Dividing one by the other
achieves the desired result.
"""
if kwargs is None:
kwargs = {}
ar = np.asarray(ar, dtype=float)
mask = np.isfinite(ar)
ar[~mask] = 0
a = functon(ar, sigma=sigma, mode="constant", **kwargs)
b = functon(1.0 * mask, sigma=sigma, mode="constant", **kwargs)
return a / b
def weighted_groupby_mean(df, group_by, weigh_by, mode="mean"):
"""
Weighted mean, std, and std in log space for a dataframe.groupby
Parameters
----------
df : dataframe
Dataframe to groupby
group_by : str or list
Columns to group by
weight_by : str
Column to use as weights
mode : "mean", "std" or "logstd"
Do the weighted mean, the weighted standard deviaton,
or the weighted std in log-space from the mean-log value
(useful for P(s) etc.)
"""
if type(group_by) == str:
group_by = [group_by]
gr = df.groupby(group_by)
if mode == "mean":
def wstd(x):
return np.average(x, weights=df.loc[x.index, weigh_by])
wm = wstd
elif mode == "std":
def wstd(x):
wm = np.average(x, weights=df.loc[x.index, weigh_by])
dev = x - wm
res = np.sqrt(np.average(dev ** 2, weights=df.loc[x.index, weigh_by]))
return res
wm = wstd
elif mode == "logstd":
def wstd(x):
x = np.log(x)
wm = np.average(x, weights=df.loc[x.index, weigh_by])
dev = x - wm
res = np.sqrt(np.average(dev ** 2, weights=df.loc[x.index, weigh_by]))
return np.exp(res)
wm = wstd
else:
raise NotImplementedError
f = {}
for i in df.columns:
if i in group_by:
continue
elif i == weigh_by:
f[i] = ["sum"]
else:
f[i] = [wm]
agg = gr.agg(f)
agg.columns = [i[0] for i in agg.columns]
return agg
def persistent_log_bins(end=10, bins_per_order_magnitude=10):
"""
Creates most nicely looking log-spaced integer bins starting at 1, with the
defined number of bins per order of magnitude.
Parameters
----------
end : number (int recommended) log10 of the last value. It is safe to put a
large value here and select your range of bins later.
bins_per_order_magnitude : int >0 how many bins per order of magnitude
Notes
-----
This is not a replacement for logbins, and it has a different purpose.
Difference between this and logbins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Logbins creates bins from lo to hi, spaced logarithmically with an
appriximate ratio. Logbins makes sure that the last bin is large (i.e.
hi/ratio ... hi), and will not allow the last bin to be much less than
ratio. It would slightly adjust the ratio to achieve that. As a result, by
construciton, logbins bins are different for different lo or hi.
This function is designed to create exactly the same bins that only depend
on one parameter, bins_per_order_magnitude. The goal is to make things
calculated for different datasets/organisms/etc. comparable. For example, if
these bins are used, it would allow us to divide P(s) for two different
organisms by each other because it was calculated for the same bins.
The price you pay for such versatility is that the last bin can be much less
than others in real application. For example, if you have 10 bins per order
of magnitude (ratio of 1.25), but your data ends at 10500, then the only
points in the last bin would be 10000..10500, 1/5 of what could be. This may
make the last point noisy.
The main part is done using np.logspace and rounding to the nearest integer,
followed by unique. The gaps are then re-sorted to ensure that gaps are
strictly increasing. The re-sorting of gaps was essential, and produced
better results than manual adjustment.
Alternatives that produce irregular bins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Using np.unique(np.logspace(a,b,N,dtype=int)) can be sub-optimal For
example, np.unique(np.logspace(0,1,11,dtype=int)) = [ 1, 2, 3, 5, 6, 7,
10] Note the gaps jump from 1 to 2 back to 1
Similarly using np.unique(np.rint(np.logspace..)) can be suboptimal
np.unique(np.array(np.rint(np.logspace(0,1,9)),dtype=int)) = [ 1, 2, 3,
4, 6, 7, 10]
for bins_per_order_of_magnitude=16, 10 is not in bins. Other than that, 10,
100, 1000, etc. are always included.
"""
if end > 50:
raise ValueError("End is a log10(max_value), not the max_value itself")
bin_float = np.logspace(0, end, end * bins_per_order_magnitude + 1)
bin_int = np.array(np.rint(bin_float), dtype=int) # rounding to the nearest int
bins = np.unique(bin_int) # unique bins
bins = np.cumsum(
np.sort(np.r_[1, np.diff(bins)])
) # re-ordering gaps (important step)
return bins
|
open2c/cooltools
|
cooltools/lib/numutils.py
|
Python
|
mit
| 50,305
|
[
"Gaussian"
] |
0eb38312d2d3ea18b81ed52a9011a10991d681fec3bdbceb8905fdc8a78be4be
|
# Copyright 2009 by Cymon J. Cox. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Command line wrapper for the multiple alignment program Clustal W.
"""
from __future__ import print_function
__docformat__ = "restructuredtext en" # Don't just use plain text in epydoc API pages!
import os
from Bio.Application import _Option, _Switch, AbstractCommandline
class ClustalwCommandline(AbstractCommandline):
"""Command line wrapper for clustalw (version one or two).
http://www.clustal.org/
Example:
--------
>>> from Bio.Align.Applications import ClustalwCommandline
>>> in_file = "unaligned.fasta"
>>> clustalw_cline = ClustalwCommandline("clustalw2", infile=in_file)
>>> print(clustalw_cline)
clustalw2 -infile=unaligned.fasta
You would typically run the command line with clustalw_cline() or via
the Python subprocess module, as described in the Biopython tutorial.
Citation:
---------
Larkin MA, Blackshields G, Brown NP, Chenna R, McGettigan PA,
McWilliam H, Valentin F, Wallace IM, Wilm A, Lopez R, Thompson JD,
Gibson TJ, Higgins DG. (2007). Clustal W and Clustal X version 2.0.
Bioinformatics, 23, 2947-2948.
Last checked against versions: 1.83 and 2.1
"""
# TODO - Should we default to cmd="clustalw2" now?
def __init__(self, cmd="clustalw", **kwargs):
self.parameters = \
[
_Option(["-infile", "-INFILE", "INFILE", "infile"],
"Input sequences.",
filename=True),
_Option(["-profile1", "-PROFILE1", "PROFILE1", "profile1"],
"Profiles (old alignment).",
filename=True),
_Option(["-profile2", "-PROFILE2", "PROFILE2", "profile2"],
"Profiles (old alignment).",
filename=True),
# ################# VERBS (do things) #############################
_Switch(["-options", "-OPTIONS", "OPTIONS", "options"],
"List the command line parameters"),
_Switch(["-help", "-HELP", "HELP", "help"],
"Outline the command line params."),
_Switch(["-check", "-CHECK", "CHECK", "check"],
"Outline the command line params."),
_Switch(["-fullhelp", "-FULLHELP", "FULLHELP", "fullhelp"],
"Output full help content."),
_Switch(["-align", "-ALIGN", "ALIGN", "align"],
"Do full multiple alignment."),
_Switch(["-tree", "-TREE", "TREE", "tree"],
"Calculate NJ tree."),
_Switch(["-pim", "-PIM", "PIM", "pim"],
"Output percent identity matrix (while calculating the tree)."),
_Option(["-bootstrap", "-BOOTSTRAP", "BOOTSTRAP", "bootstrap"],
"Bootstrap a NJ tree (n= number of bootstraps; def. = 1000).",
checker_function=lambda x: isinstance(x, int)),
_Switch(["-convert", "-CONVERT", "CONVERT", "convert"],
"Output the input sequences in a different file format."),
# #################### PARAMETERS (set things) #########################
# ***General settings:****
# Makes no sense in biopython
# _Option(["-interactive", "-INTERACTIVE", "INTERACTIVE", "interactive"],
# [],
# lambda x: 0, # Does not take value
# False,
# "read command line, then enter normal interactive menus",
# False),
_Switch(["-quicktree", "-QUICKTREE", "QUICKTREE", "quicktree"],
"Use FAST algorithm for the alignment guide tree"),
_Option(["-type", "-TYPE", "TYPE", "type"],
"PROTEIN or DNA sequences",
checker_function=lambda x: x in ["PROTEIN", "DNA",
"protein", "dna"]),
_Switch(["-negative", "-NEGATIVE", "NEGATIVE", "negative"],
"Protein alignment with negative values in matrix"),
_Option(["-outfile", "-OUTFILE", "OUTFILE", "outfile"],
"Output sequence alignment file name",
filename=True),
_Option(["-output", "-OUTPUT", "OUTPUT", "output"],
"Output format: CLUSTAL(default), GCG, GDE, PHYLIP, PIR, NEXUS and FASTA",
checker_function=lambda x: x in ["CLUSTAL", "GCG", "GDE", "PHYLIP",
"PIR", "NEXUS", "FASTA",
"clustal", "gcg", "gde", "phylip",
"pir", "nexus", "fasta"]),
_Option(["-outorder", "-OUTORDER", "OUTORDER", "outorder"],
"Output taxon order: INPUT or ALIGNED",
checker_function=lambda x: x in ["INPUT", "input",
"ALIGNED", "aligned"]),
_Option(["-case", "-CASE", "CASE", "case"],
"LOWER or UPPER (for GDE output only)",
checker_function=lambda x: x in ["UPPER", "upper",
"LOWER", "lower"]),
_Option(["-seqnos", "-SEQNOS", "SEQNOS", "seqnos"],
"OFF or ON (for Clustal output only)",
checker_function=lambda x: x in ["ON", "on",
"OFF", "off"]),
_Option(["-seqno_range", "-SEQNO_RANGE", "SEQNO_RANGE", "seqno_range"],
"OFF or ON (NEW- for all output formats)",
checker_function=lambda x: x in ["ON", "on",
"OFF", "off"]),
_Option(["-range", "-RANGE", "RANGE", "range"],
"Sequence range to write starting m to m+n. "
"Input as string eg. '24,200'"),
_Option(["-maxseqlen", "-MAXSEQLEN", "MAXSEQLEN", "maxseqlen"],
"Maximum allowed input sequence length",
checker_function=lambda x: isinstance(x, int)),
_Switch(["-quiet", "-QUIET", "QUIET", "quiet"],
"Reduce console output to minimum"),
_Option(["-stats", "-STATS", "STATS", "stats"],
"Log some alignment statistics to file",
filename=True),
# ***Fast Pairwise Alignments:***
_Option(["-ktuple", "-KTUPLE", "KTUPLE", "ktuple"],
"Word size",
checker_function=lambda x: isinstance(x, int) or
isinstance(x, float)),
_Option(["-topdiags", "-TOPDIAGS", "TOPDIAGS", "topdiags"],
"Number of best diags.",
checker_function=lambda x: isinstance(x, int) or
isinstance(x, float)),
_Option(["-window", "-WINDOW", "WINDOW", "window"],
"Window around best diags.",
checker_function=lambda x: isinstance(x, int) or
isinstance(x, float)),
_Option(["-pairgap", "-PAIRGAP", "PAIRGAP", "pairgap"],
"Gap penalty",
checker_function=lambda x: isinstance(x, int) or
isinstance(x, float)),
_Option(["-score", "-SCORE", "SCORE", "score"],
"Either: PERCENT or ABSOLUTE",
checker_function=lambda x: x in ["percent", "PERCENT",
"absolute", "ABSOLUTE"]),
# ***Slow Pairwise Alignments:***
_Option(["-pwmatrix", "-PWMATRIX", "PWMATRIX", "pwmatrix"],
"Protein weight matrix=BLOSUM, PAM, GONNET, ID or filename",
checker_function=lambda x: x in ["BLOSUM", "PAM",
"GONNET", "ID",
"blosum", "pam",
"gonnet", "id"] or
os.path.exists(x),
filename=True),
_Option(["-pwdnamatrix", "-PWDNAMATRIX", "PWDNAMATRIX", "pwdnamatrix"],
"DNA weight matrix=IUB, CLUSTALW or filename",
checker_function=lambda x: x in ["IUB", "CLUSTALW",
"iub", "clustalw"] or
os.path.exists(x),
filename=True),
_Option(["-pwgapopen", "-PWGAPOPEN", "PWGAPOPEN", "pwgapopen"],
"Gap opening penalty",
checker_function=lambda x: isinstance(x, int) or
isinstance(x, float)),
_Option(["-pwgapext", "-PWGAPEXT", "PWGAPEXT", "pwgapext"],
"Gap extension penalty",
checker_function=lambda x: isinstance(x, int) or
isinstance(x, float)),
# ***Multiple Alignments:***
_Option(["-newtree", "-NEWTREE", "NEWTREE", "newtree"],
"Output file name for newly created guide tree",
filename=True),
_Option(["-usetree", "-USETREE", "USETREE", "usetree"],
"File name of guide tree",
checker_function=lambda x: os.path.exists,
filename=True),
_Option(["-matrix", "-MATRIX", "MATRIX", "matrix"],
"Protein weight matrix=BLOSUM, PAM, GONNET, ID or filename",
checker_function=lambda x: x in ["BLOSUM", "PAM",
"GONNET", "ID",
"blosum", "pam",
"gonnet", "id"] or
os.path.exists(x),
filename=True),
_Option(["-dnamatrix", "-DNAMATRIX", "DNAMATRIX", "dnamatrix"],
"DNA weight matrix=IUB, CLUSTALW or filename",
checker_function=lambda x: x in ["IUB", "CLUSTALW",
"iub", "clustalw"] or
os.path.exists(x),
filename=True),
_Option(["-gapopen", "-GAPOPEN", "GAPOPEN", "gapopen"],
"Gap opening penalty",
checker_function=lambda x: isinstance(x, int) or
isinstance(x, float)),
_Option(["-gapext", "-GAPEXT", "GAPEXT", "gapext"],
"Gap extension penalty",
checker_function=lambda x: isinstance(x, int) or
isinstance(x, float)),
_Switch(["-endgaps", "-ENDGAPS", "ENDGAPS", "endgaps"],
"No end gap separation pen."),
_Option(["-gapdist", "-GAPDIST", "GAPDIST", "gapdist"],
"Gap separation pen. range",
checker_function=lambda x: isinstance(x, int) or
isinstance(x, float)),
_Switch(["-nopgap", "-NOPGAP", "NOPGAP", "nopgap"],
"Residue-specific gaps off"),
_Switch(["-nohgap", "-NOHGAP", "NOHGAP", "nohgap"],
"Hydrophilic gaps off"),
_Switch(["-hgapresidues", "-HGAPRESIDUES", "HGAPRESIDUES", "hgapresidues"],
"List hydrophilic res."),
_Option(["-maxdiv", "-MAXDIV", "MAXDIV", "maxdiv"],
"% ident. for delay",
checker_function=lambda x: isinstance(x, int) or
isinstance(x, float)),
# Already handled in General Settings section, but appears a second
# time under Multiple Alignments in the help
# _Option(["-type", "-TYPE", "TYPE", "type"],
# "PROTEIN or DNA",
# checker_function=lambda x: x in ["PROTEIN", "DNA",
# "protein", "dna"]),
_Option(["-transweight", "-TRANSWEIGHT", "TRANSWEIGHT", "transweight"],
"Transitions weighting",
checker_function=lambda x: isinstance(x, int) or
isinstance(x, float)),
_Option(["-iteration", "-ITERATION", "ITERATION", "iteration"],
"NONE or TREE or ALIGNMENT",
checker_function=lambda x: x in ["NONE", "TREE",
"ALIGNMENT",
"none", "tree",
"alignment"]),
_Option(["-numiter", "-NUMITER", "NUMITER", "numiter"],
"maximum number of iterations to perform",
checker_function=lambda x: isinstance(x, int)),
_Switch(["-noweights", "-NOWEIGHTS", "NOWEIGHTS", "noweights"],
"Disable sequence weighting"),
# ***Profile Alignments:***
_Switch(["-profile", "-PROFILE", "PROFILE", "profile"],
"Merge two alignments by profile alignment"),
_Option(["-newtree1", "-NEWTREE1", "NEWTREE1", "newtree1"],
"Output file name for new guide tree of profile1",
filename=True),
_Option(["-newtree2", "-NEWTREE2", "NEWTREE2", "newtree2"],
"Output file for new guide tree of profile2",
filename=True),
_Option(["-usetree1", "-USETREE1", "USETREE1", "usetree1"],
"File name of guide tree for profile1",
checker_function=lambda x: os.path.exists,
filename=True),
_Option(["-usetree2", "-USETREE2", "USETREE2", "usetree2"],
"File name of guide tree for profile2",
checker_function=lambda x: os.path.exists,
filename=True),
# ***Sequence to Profile Alignments:***
_Switch(["-sequences", "-SEQUENCES", "SEQUENCES", "sequences"],
"Sequentially add profile2 sequences to profile1 alignment"),
# These are already handled in the Multiple Alignments section,
# but appear a second time here in the help.
# _Option(["-newtree", "-NEWTREE", "NEWTREE", "newtree"],
# "File for new guide tree",
# filename=True),
# _Option(["-usetree", "-USETREE", "USETREE", "usetree"],
# "File for old guide tree",
# checker_function=lambda x: os.path.exists,
# filename=True),
# ***Structure Alignments:***
_Switch(["-nosecstr1", "-NOSECSTR1", "NOSECSTR1", "nosecstr1"],
"Do not use secondary structure-gap penalty mask for profile 1"),
_Switch(["-nosecstr2", "-NOSECSTR2", "NOSECSTR2", "nosecstr2"],
"Do not use secondary structure-gap penalty mask for profile 2"),
_Option(["-secstrout", "-SECSTROUT", "SECSTROUT", "secstrout"],
"STRUCTURE or MASK or BOTH or NONE output in alignment file",
checker_function=lambda x: x in ["STRUCTURE", "MASK",
"BOTH", "NONE",
"structure", "mask",
"both", "none"]),
_Option(["-helixgap", "-HELIXGAP", "HELIXGAP", "helixgap"],
"Gap penalty for helix core residues",
checker_function=lambda x: isinstance(x, int) or
isinstance(x, float)),
_Option(["-strandgap", "-STRANDGAP", "STRANDGAP", "strandgap"],
"gap penalty for strand core residues",
checker_function=lambda x: isinstance(x, int) or
isinstance(x, float)),
_Option(["-loopgap", "-LOOPGAP", "LOOPGAP", "loopgap"],
"Gap penalty for loop regions",
checker_function=lambda x: isinstance(x, int) or
isinstance(x, float)),
_Option(["-terminalgap", "-TERMINALGAP", "TERMINALGAP", "terminalgap"],
"Gap penalty for structure termini",
checker_function=lambda x: isinstance(x, int) or
isinstance(x, float)),
_Option(["-helixendin", "-HELIXENDIN", "HELIXENDIN", "helixendin"],
"Number of residues inside helix to be treated as terminal",
checker_function=lambda x: isinstance(x, int)),
_Option(["-helixendout", "-HELIXENDOUT", "HELIXENDOUT", "helixendout"],
"Number of residues outside helix to be treated as terminal",
checker_function=lambda x: isinstance(x, int)),
_Option(["-strandendin", "-STRANDENDIN", "STRANDENDIN", "strandendin"],
"Number of residues inside strand to be treated as terminal",
checker_function=lambda x: isinstance(x, int)),
_Option(["-strandendout", "-STRANDENDOUT", "STRANDENDOUT", "strandendout"],
"Number of residues outside strand to be treated as terminal",
checker_function=lambda x: isinstance(x, int)),
# ***Trees:***
_Option(["-outputtree", "-OUTPUTTREE", "OUTPUTTREE", "outputtree"],
"nj OR phylip OR dist OR nexus",
checker_function=lambda x: x in ["NJ", "PHYLIP",
"DIST", "NEXUS",
"nj", "phylip",
"dist", "nexus"]),
_Option(["-seed", "-SEED", "SEED", "seed"],
"Seed number for bootstraps.",
checker_function=lambda x: isinstance(x, int)),
_Switch(["-kimura", "-KIMURA", "KIMURA", "kimura"],
"Use Kimura's correction."),
_Switch(["-tossgaps", "-TOSSGAPS", "TOSSGAPS", "tossgaps"],
"Ignore positions with gaps."),
_Option(["-bootlabels", "-BOOTLABELS", "BOOTLABELS", "bootlabels"],
"Node OR branch position of bootstrap values in tree display",
checker_function=lambda x: x in ["NODE", "BRANCH",
"node", "branch"]),
_Option(["-clustering", "-CLUSTERING", "CLUSTERING", "clustering"],
"NJ or UPGMA",
checker_function=lambda x: x in ["NJ", "UPGMA", "nj", "upgma"])
]
AbstractCommandline.__init__(self, cmd, **kwargs)
def _test():
"""Run the module's doctests (PRIVATE)."""
print("Running ClustalW doctests...")
import doctest
doctest.testmod()
print("Done")
if __name__ == "__main__":
_test()
|
updownlife/multipleK
|
dependencies/biopython-1.65/build/lib.linux-x86_64-2.7/Bio/Align/Applications/_Clustalw.py
|
Python
|
gpl-2.0
| 19,835
|
[
"Biopython"
] |
f3b1d28944c3fda1df252c9e6487b91dc2342d91a825365d26e92bc537896b67
|
########################################################################
# $HeadURL$
# File : CloudDirector.py
# Author : Victor Mendez
########################################################################
#DIRAC
from DIRAC import S_OK, S_ERROR, gConfig
#VMDIRAC
from VMDIRAC.Resources.Cloud.VMDirector import VMDirector
from VMDIRAC.WorkloadManagementSystem.Client.AmazonImage import AmazonImage
from VMDIRAC.WorkloadManagementSystem.Client.CloudStackImage import CloudStackImage
from VMDIRAC.WorkloadManagementSystem.Client.NovaImage import NovaImage
from VMDIRAC.WorkloadManagementSystem.Client.OcciImage import OcciImage
# aqui conf automatica de modulos (adri)
__RCSID__ = '$Id: $'
class CloudDirector( VMDirector ):
def __init__( self, submitPool ):
VMDirector.__init__( self, submitPool )
def configure( self, csSection, submitPool ):
"""
Here goes common configuration for Cloud Director
"""
VMDirector.configure( self, csSection, submitPool )
# fin Flavor esto no se usa:
#self.reloadConfiguration( csSection, submitPool )
def configureFromSection( self, mySection ):
"""
reload from CS
"""
VMDirector.configureFromSection( self, mySection )
def _submitInstance( self, imageName, endpoint, instanceID, runningRequirementsDict ):
"""
Real backend method to submit a new Instance of a given Image
It has the decision logic of sumbission to the multi-endpoint, from the available from a given imageName, first approach: FirstFit
It checks wether are free slots by requesting status of sended VM to a cloud endpoint
"""
endpointsPath = "/Resources/VirtualMachines/CloudEndpoints"
driver = gConfig.getValue( "%s/%s/%s" % ( endpointsPath, endpoint, 'cloudDriver' ), "" )
if driver == 'Amazon':
try:
ami = AmazonImage( endpoint )
except Exception:
return S_ERROR("Failed to connect to AWS")
result = ami.startNewInstances(imageName)
if not result[ 'OK' ]:
return result
idInstance = result['Value'][0]
return S_OK( idInstance )
if driver == 'CloudStack':
csi = CloudStackImage( imageName , endpoint )
result = csi.startNewInstance()
if not result[ 'OK' ]:
return result
idInstance = result['Value']
return S_OK( idInstance )
if ( driver == 'occi-0.9' or driver == 'occi-0.8' or driver =='rocci-1.1'):
oima = OcciImage( imageName, endpoint )
connOcci = oima.connectOcci()
if not connOcci[ 'OK' ]:
return connOcci
# CPUTime and SubmitPool are for maintenance of old occi 0.8 like, checked at VMDIRECTOR father class
# DIRAC instanceID used instead of metadata service not installed in most opennebula sites
CPUTime = runningRequirementsDict['CPUTime']
submitPool = runningRequirementsDict['SubmitPool']
result = oima.startNewInstance( CPUTime, submitPool, runningRequirementsDict, instanceID )
if not result[ 'OK' ]:
return result
idInstance = result['Value']
return S_OK( idInstance )
if driver == 'nova-1.1':
nima = NovaImage( imageName, endpoint )
connNova = nima.connectNova()
if not connNova[ 'OK' ]:
return connNova
result = nima.startNewInstance( instanceID, runningRequirementsDict )
if not result[ 'OK' ]:
return result
return S_OK( result['Value'] )
return S_ERROR( 'Unknown DIRAC Cloud driver %s' % driver )
|
myco/VMDIRAC
|
Resources/Cloud/CloudDirector.py
|
Python
|
gpl-3.0
| 3,532
|
[
"DIRAC"
] |
b2f6c9c578a9af8a50e10b057ab9e5ba8aa9a05c294a2b74d808bd82c837a9b9
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import libcst as cst
import pathlib
import sys
from typing import (Any, Callable, Dict, List, Sequence, Tuple)
def partition(
predicate: Callable[[Any], bool],
iterator: Sequence[Any]
) -> Tuple[List[Any], List[Any]]:
"""A stable, out-of-place partition."""
results = ([], [])
for i in iterator:
results[int(predicate(i))].append(i)
# Returns trueList, falseList
return results[1], results[0]
class speechCallTransformer(cst.CSTTransformer):
CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata')
METHOD_TO_PARAMS: Dict[str, Tuple[str]] = {
'create_custom_class': ('parent', 'custom_class_id', 'custom_class', ),
'create_phrase_set': ('parent', 'phrase_set_id', 'phrase_set', ),
'delete_custom_class': ('name', ),
'delete_phrase_set': ('name', ),
'get_custom_class': ('name', ),
'get_phrase_set': ('name', ),
'list_custom_classes': ('parent', 'page_size', 'page_token', ),
'list_phrase_set': ('parent', 'page_size', 'page_token', ),
'long_running_recognize': ('config', 'audio', 'output_config', ),
'recognize': ('config', 'audio', ),
'streaming_recognize': ('streaming_config', 'audio_content', ),
'update_custom_class': ('custom_class', 'update_mask', ),
'update_phrase_set': ('phrase_set', 'update_mask', ),
}
def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode:
try:
key = original.func.attr.value
kword_params = self.METHOD_TO_PARAMS[key]
except (AttributeError, KeyError):
# Either not a method from the API or too convoluted to be sure.
return updated
# If the existing code is valid, keyword args come after positional args.
# Therefore, all positional args must map to the first parameters.
args, kwargs = partition(lambda a: not bool(a.keyword), updated.args)
if any(k.keyword.value == "request" for k in kwargs):
# We've already fixed this file, don't fix it again.
return updated
kwargs, ctrl_kwargs = partition(
lambda a: a.keyword.value not in self.CTRL_PARAMS,
kwargs
)
args, ctrl_args = args[:len(kword_params)], args[len(kword_params):]
ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl))
for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS))
request_arg = cst.Arg(
value=cst.Dict([
cst.DictElement(
cst.SimpleString("'{}'".format(name)),
cst.Element(value=arg.value)
)
# Note: the args + kwargs looks silly, but keep in mind that
# the control parameters had to be stripped out, and that
# those could have been passed positionally or by keyword.
for name, arg in zip(kword_params, args + kwargs)]),
keyword=cst.Name("request")
)
return updated.with_changes(
args=[request_arg] + ctrl_kwargs
)
def fix_files(
in_dir: pathlib.Path,
out_dir: pathlib.Path,
*,
transformer=speechCallTransformer(),
):
"""Duplicate the input dir to the output dir, fixing file method calls.
Preconditions:
* in_dir is a real directory
* out_dir is a real, empty directory
"""
pyfile_gen = (
pathlib.Path(os.path.join(root, f))
for root, _, files in os.walk(in_dir)
for f in files if os.path.splitext(f)[1] == ".py"
)
for fpath in pyfile_gen:
with open(fpath, 'r') as f:
src = f.read()
# Parse the code and insert method call fixes.
tree = cst.parse_module(src)
updated = tree.visit(transformer)
# Create the path and directory structure for the new file.
updated_path = out_dir.joinpath(fpath.relative_to(in_dir))
updated_path.parent.mkdir(parents=True, exist_ok=True)
# Generate the updated source file at the corresponding path.
with open(updated_path, 'w') as f:
f.write(updated.code)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="""Fix up source that uses the speech client library.
The existing sources are NOT overwritten but are copied to output_dir with changes made.
Note: This tool operates at a best-effort level at converting positional
parameters in client method calls to keyword based parameters.
Cases where it WILL FAIL include
A) * or ** expansion in a method call.
B) Calls via function or method alias (includes free function calls)
C) Indirect or dispatched calls (e.g. the method is looked up dynamically)
These all constitute false negatives. The tool will also detect false
positives when an API method shares a name with another method.
""")
parser.add_argument(
'-d',
'--input-directory',
required=True,
dest='input_dir',
help='the input directory to walk for python files to fix up',
)
parser.add_argument(
'-o',
'--output-directory',
required=True,
dest='output_dir',
help='the directory to output files fixed via un-flattening',
)
args = parser.parse_args()
input_dir = pathlib.Path(args.input_dir)
output_dir = pathlib.Path(args.output_dir)
if not input_dir.is_dir():
print(
f"input directory '{input_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if not output_dir.is_dir():
print(
f"output directory '{output_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if os.listdir(output_dir):
print(
f"output directory '{output_dir}' is not empty",
file=sys.stderr,
)
sys.exit(-1)
fix_files(input_dir, output_dir)
|
googleapis/python-speech
|
scripts/fixup_speech_v1p1beta1_keywords.py
|
Python
|
apache-2.0
| 6,672
|
[
"VisIt"
] |
24c88cec18f460da881ef40878ba9a13cb4f9b9676fcc5b653e63616cf538c8d
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from DIRAC.Core.Workflow.Parameter import *
from DIRAC.Core.Workflow.Module import *
from DIRAC.Core.Workflow.Step import *
from DIRAC.Core.Workflow.Workflow import *
from DIRAC.Core.Workflow.WorkflowReader import *
# define Module 2
module2 = ModuleDefinition('GaudiApplication') # during constraction class creates duplicating copies of the params
module2.setDescription('Gaudi Application module')
module2.setBody('from WorkflowLib.Module.GaudiApplication import GaudiApplication\n')
# we add empty parameters but linked up as default
module2.addParameter(Parameter("DataType", "", "string", "self", "DataType", True, False, "data type"))
module2.addParameter(Parameter("CONFIG_NAME", "", "string", "self", "CONFIG_NAME", True, False, "Configuration Name"))
module2.addParameter(
Parameter(
"CONFIG_VERSION",
"",
"string",
"self",
"CONFIG_VERSION",
True,
False,
"Configuration Version"))
module2.addParameter(Parameter("EVENTTYPE", "", "string", "self", "EVENTTYPE", True, False, "Event Type"))
module2.addParameter(Parameter("appName", "", "string", "self", "appName", True, False, "Application Name"))
module2.addParameter(Parameter("appVersion", "", "string", "self", "appVersion", True, False, "Application Version"))
module2.addParameter(Parameter("appType", "", "string", "self", "appType", True, False, "Application Version"))
module2.addParameter(Parameter("appLog", "", "string", "self", "appLog", True, False, "list of logfile"))
module2.addParameter(Parameter("inputData", "", "jdl", "self", "inputData", True, False, "List of InputData"))
module2.addParameter(Parameter("inputDataType", "", "string", "self", "inputDataType", True, False, "Input Data Type"))
module2.addParameter(
Parameter(
"NUMBER_OF_EVENTS",
"",
"string",
"self",
"NUMBER_OF_EVENTS",
True,
False,
"number of events requested"))
module2.addParameter(Parameter("optionsFile", "", "string", "self", "optionsFile", True, False, "Options File"))
module2.addParameter(
Parameter(
"optionsLine",
"",
"string",
"self",
"optionsLine",
True,
False,
"Number of Event",
"option"))
module2.addParameter(Parameter("systemConfig", "", "string", "self", "systemConfig", True, False, "Job Platform"))
module2.addParameter(Parameter("poolXMLCatName", "", "string", "self", "poolXMLCatName", True, False, "POOL XML slice"))
# define module 3
module3 = ModuleDefinition('LogChecker') # during constraction class creates duplicating copies of the params
module3.setDescription('Check LogFile module')
module3.setBody('from WorkflowLib.Module.LogChecker import *\n')
# we add parameters and link them to the level of step
module3.addParameter(Parameter("DataType", "", "string", "self", "DataType", True, False, "data type"))
module3.addParameter(Parameter("SourceData", "", "string", "self", "SourceData", True, False, "InputData"))
module3.addParameter(Parameter("CONFIG_NAME", "", "string", "self", "CONFIG_NAME", True, False, "Configuration Name"))
module3.addParameter(
Parameter(
"CONFIG_VERSION",
"",
"string",
"self",
"CONFIG_VERSION",
True,
False,
"Configuration Version"))
module3.addParameter(
Parameter(
"NUMBER_OF_EVENTS",
"",
"string",
"self",
"NUMBER_OF_EVENTS",
True,
False,
"number of events requested"))
module3.addParameter(Parameter("NUMBER_OF_EVENTS_INPUT", "", "string", "", "", True, True, "number of events as input"))
module3.addParameter(
Parameter(
"NUMBER_OF_EVENTS_OUTPUT",
"",
"string",
"",
"",
False,
True,
"number of events as input"))
module3.addParameter(Parameter("appName", "", "string", "self", "appName", True, False, "Application Name"))
module3.addParameter(Parameter("appVersion", "", "string", "self", "appVersion", True, False, "Application Version"))
module3.addParameter(Parameter("appType", "", "string", "self", "appType", True, False, "Application Version"))
module3.addParameter(Parameter("appLog", "", "string", "self", "appLog", True, False, "list of logfile"))
module3.addParameter(Parameter("poolXMLCatName", "", "string", "self", "poolXMLCatName", True, False, "POOL XML slice"))
module3.addParameter(Parameter("inputData", "", "string", "self", "inputData", True, False, "InputData"))
module3.addParameter(
Parameter(
"OUTPUT_MAX",
"",
"string",
"self",
"OUTPUT_MAX",
True,
False,
"nb max of output to keep"))
module3.addParameter(Parameter("EMAIL", "@{EMAILNAME}", "string", "", "", True, False, "EMAIL adress"))
# define module 4
module4 = ModuleDefinition('BookkeepingReport')
module4.setDescription('Bookkeeping Report module')
module4.setBody('from WorkflowLib.Module.BookkeepingReport import * \n')
module4.addParameter(Parameter("STEP_ID", "", "string", "self", "STEP_ID", True, False, "EMAIL adress"))
module4.addParameter(
Parameter(
"NUMBER_OF_EVENTS",
"",
"string",
"self",
"NUMBER_OF_EVENTS",
True,
False,
"number of events requested"))
module4.addParameter(
Parameter(
"NUMBER_OF_EVENTS_INPUT",
"",
"string",
"",
"",
True,
False,
"number of events as input"))
module4.addParameter(
Parameter(
"NUMBER_OF_EVENTS_OUTPUT",
"",
"string",
"",
"",
True,
False,
"number of events as input"))
module4.addParameter(Parameter("DataType", "", "string", "self", "DataType", True, False, "data type"))
module4.addParameter(Parameter("CONFIG_NAME", "", "string", "self", "CONFIG_NAME", True, False, "Configuration Name"))
module4.addParameter(
Parameter(
"CONFIG_VERSION",
"",
"string",
"self",
"CONFIG_VERSION",
True,
False,
"Configuration Version"))
module4.addParameter(Parameter("appName", "", "string", "self", "appName", True, False, "Application Name"))
module4.addParameter(Parameter("appVersion", "", "string", "self", "appVersion", True, False, "Application Version"))
module4.addParameter(Parameter("inputData", "", "string", "self", "inputData", True, False, "InputData"))
module4.addParameter(Parameter("inputDataType", "", "string", "self", "inputDataType", True, False, "Input Data Type"))
module4.addParameter(Parameter("EVENTTYPE", "", "string", "self", "EVENTTYPE", True, False, "Event Type"))
module4.addParameter(Parameter("appType", "", "string", "self", "appType", True, False, "Application Version"))
module4.addParameter(Parameter("poolXMLCatName", "", "string", "self", "poolXMLCatName", True, False, "POOL XML slice"))
module4.addParameter(Parameter("SourceData", "", "string", "self", "SourceData", True, False, "InputData"))
module4.addParameter(Parameter("appLog", "", "string", "self", "appLog", True, False, "list of logfile"))
module4.addParameter(Parameter("listoutput", [], "list", "self", "listoutput", True, False, "list of output data"))
# define module 5
module5 = ModuleDefinition('StepFinalization')
module5.setDescription('Step Finalization module')
module5.setBody('from WorkflowLib.Module.StepFinalization import * \n')
module5.addParameter(Parameter("listoutput", [], "list", "self", "listoutput", True, False, "list of output data"))
module5.addParameter(Parameter("poolXMLCatName", "", "string", "self", "poolXMLCatName", True, False, "POOL XML slice"))
module5.addParameter(Parameter("inputData", "", "string", "self", "inputData", True, False, "InputData"))
module5.addParameter(Parameter("SourceData", "", "string", "self", "SourceData", True, False, "InputData"))
# define module 6
module6 = ModuleDefinition('JobFinalization')
module6.setDescription('Job Finalization module')
module6.setBody('from WorkflowLib.Module.JobFinalization import * \n')
#module6.addParameter(Parameter("listoutput",[],"list","self","listoutput",True,False,"list of output data"))
#module6.addParameter(Parameter("outputData",'',"string","self","outputData",True,False,"list of output data"))
module6.addParameter(Parameter("poolXMLCatName", "", "string", "self", "poolXMLCatName", True, False, "POOL XML slice"))
# KGG Some problem with InputData and with inputData - it is not clear for me
# module6.addParameter(Parameter("inputData","","string","self","InputData",True,False,"InputData"))
module6.addParameter(Parameter("SourceData", "", "string", "self", "SourceData", True, False, "InputData"))
#module6.addParameter(Parameter("listoutput",[],"list","self","listoutput",True,False,"list of output data"))
# I am adding those parameters with empty list
# used one going to be replaced on the level of the step
module6.addParameter(Parameter("listoutput_1", [], "list", "self", "listoutput_1", True, False, "list of output data"))
module6.addParameter(Parameter("listoutput_2", [], "list", "self", "listoutput_2", True, False, "list of output data"))
module6.addParameter(Parameter("listoutput_3", [], "list", "self", "listoutput_3", True, False, "list of output data"))
module6.addParameter(Parameter("listoutput_4", [], "list", "self", "listoutput_4", True, False, "list of output data"))
module6.addParameter(Parameter("listoutput_5", [], "list", "self", "listoutput_5", True, False, "list of output data"))
module6.addParameter(Parameter("listoutput_6", [], "list", "self", "listoutput_6", True, False, "list of output data"))
# define module 7
#module7 = ModuleDefinition('Dummy')
#module7.setDescription('Dummy module')
#module7.setBody('from WorkflowLib.Module.Dummy import * \n')
############### STEPS ##################################
# step 1 we are creating step definition
step1 = StepDefinition('Gaudi_App_Step')
step1.addModule(module2) # Creating instance of the module 'Gaudi_App_Step'
moduleInstance2 = step1.createModuleInstance('GaudiApplication', 'module2')
#moduleInstance2 = step1.createModuleInstance('Dummy', 'module7')
step1.addModule(module3) # Creating instance of the module 'LogChecker'
moduleInstance3 = step1.createModuleInstance('LogChecker', 'module3')
step1.addModule(module4) # Creating instance of the module 'LogChecker'
moduleInstance4 = step1.createModuleInstance('BookkeepingReport', 'module4')
moduleInstance4.setLink('NUMBER_OF_EVENTS_INPUT', moduleInstance3.getName(), 'NUMBER_OF_EVENTS_INPUT')
moduleInstance4.setLink('NUMBER_OF_EVENTS_OUTPUT', moduleInstance3.getName(), 'NUMBER_OF_EVENTS_OUTPUT')
# in principle we can link parameters of moduleInstance2 with moduleInstance1 but
# in this case we going to use link with the step
# step1.addModule(module5)
#moduleInstance5 = step1.createModuleInstance('StepFinalization','module5')
# now we can add parameters for the STEP but instead of typing them we can just use old one from modules
step1.addParameterLinked(module3.parameters)
step1.addParameterLinked(module2.parameters)
# and we can add additional parameter which will be used as a global
step1.addParameter(
Parameter(
"STEP_ID",
"@{PRODUCTION_ID}_@{JOB_ID}_@{STEP_NUMBER}",
"string",
"",
"",
True,
False,
"Temporary fix"))
step1.addParameter(Parameter("EVENTTYPE", "30000000", "string", "", "", True, False, "Event Type"))
step1.addParameter(Parameter("outputData", "@{STEP_ID}.root", "string", "", "", True, False, "etc name"))
step1.addParameter(Parameter("etcf", "SETC_@{STEP_ID}.root", "string", "", "", True, False, "etc name"))
step1.setValue("appLog", "@{appName}_@{PRODUCTION_ID}_@{JOB_ID}_@{STEP_NUMBER}.log")
step1.unlink(["appLog", "appName", "appType"])
step1.unlink(["poolXMLCatName", "SourceData", "DataType", "CONFIG_NAME", "CONFIG_VERSION"])
step1.addParameter(Parameter("listoutput", [], "list", "", "", True, False, "list of output data"))
#outputData = "@{PRODUCTION_ID}_@{JOB_ID}_@{STEP_NUMBER}.@{appType}"
opt_dav = "EvtTupleSvc.Output = {}"
opt_dav = opt_dav + ";ApplicationMgr.OutStream -= {'DstWriter'}"
opt_dav = opt_dav + ";ApplicationMgr.OutStream = {'Sequencer/SeqWriteTag'}"
opt_dav = opt_dav + ";ApplicationMgr.TopAlg -= { \"GaudiSequencer/SeqPreselHWZ2bbl\" }"
opt_dav = opt_dav + ";MessageSvc.Format = '%u % F%18W%S%7W%R%T %0W%M';MessageSvc.timeFormat = '%Y-%m-%d %H:%M:%S UTC'"
opt_dav = opt_dav + \
";WR.Output = \"Collection=\'EVTTAGS/TagCreator/1\' ADDRESS=\'/Event\' " +\
"DATAFILE=\'@{outputData}\' TYP=\'POOL_ROOTTREE\' OPT=\'RECREATE\'\""
opt_dav = opt_dav + ";EventPersistencySvc.CnvServices += { \"LHCb::RawDataCnvSvc\" }"
opt_dav = opt_dav + ";ApplicationMgr.TopAlg += {\"StoreExplorerAlg\"}"
opt_dav = opt_dav + ";StoreExplorerAlg.Load = 1"
opt_dav = opt_dav + ";StoreExplorerAlg.PrintFreq = 0.99"
opt_dav = opt_dav + ";StoreExplorerAlg.AccessForeign = true"
#etcf = "SETC_@{STEP_ID}.root"
opt_brunel = "#include \"$BRUNELOPTS/SuppressWarnings.opts\""
opt_brunel = opt_brunel + ";MessageSvc.Format = '%u % F%18W%S%7W%R%T %0W%M';" +\
"MessageSvc.timeFormat = '%Y-%m-%d %H:%M:%S UTC'"
opt_brunel = opt_brunel + ";EventLoopMgr.OutputLevel = 3"
opt_brunel = opt_brunel + \
";DstWriter.Output = \"DATAFILE=\'PFN:@{outputData}\' TYP=\'POOL_ROOTTREE\' OPT=\'RECREATE\'\""
opt_brunel = opt_brunel + \
";EvtTupleSvc.Output = {\"EVTTAGS2 DATAFILE=\'PFN:@{etcf}\' TYP=\'POOL_ROOTTREE\' OPT=\'RECREATE\'\"}"
#opt_brunel = opt_brunel+";EventSelector.Input = {\"COLLECTION=\'TagCreator/1\' DATAFILE=\'@{InputData}\' TYPE=\'POOL_ROOTTREE\' SEL=\'(GlobalOr>=1)\' OPT=\'READ\'\"}"
opt_brunel = opt_brunel + ";EventPersistencySvc.CnvServices += { \"LHCb::RawDataCnvSvc\" }"
opt_brunel = opt_brunel + ";ApplicationMgr.TopAlg += {\"StoreExplorerAlg\"}"
opt_brunel = opt_brunel + ";StoreExplorerAlg.Load = 1"
opt_brunel = opt_brunel + ";StoreExplorerAlg.PrintFreq = 0.99"
opt_brunel = opt_brunel + ";IODataManager.AgeLimit = 2"
step3 = StepDefinition('Job_Finalization')
step3.addModule(module6)
moduleInstance6 = step3.createModuleInstance('JobFinalization', 'module6')
step3.addParameterLinked(module6.parameters)
step3.unlink(["poolXMLCatName", "SourceData", "DataType", "CONFIG_NAME", "CONFIG_VERSION"])
############## WORKFLOW #################################
workflow1 = Workflow(name='CCRC-strip-test')
workflow1.setDescription('Workflow of GaudiApplication')
workflow1.addStep(step1)
step1_prefix = "step1_"
stepInstance1 = workflow1.createStepInstance('Gaudi_App_Step', 'Step1')
# lets link all parameters them up with the level of workflow
stepInstance1.linkUp(stepInstance1.parameters, step1_prefix)
stepInstance1.setLink("systemConfig", "self", "SystemConfig") # capital letter corrected
stepInstance1.unlink(["listoutput", "STEP_ID", "optionsFile", "optionsLine", "appLog",
"appName", "etcf", "appType", "outputData", "EVENTTYPE"])
stepInstance1.setValue("appName", "DaVinci")
stepInstance1.setValue("appType", "root")
stepInstance1.setValue("outputData", "@{PRODUCTION_ID}_@{JOB_ID}_@{STEP_NUMBER}.@{appType}")
stepInstance1.setValue("optionsFile", "DVOfficialStrippingFile.opts")
stepInstance1.setValue("optionsLine", opt_dav)
stepInstance1.linkUp("CONFIG_NAME")
stepInstance1.linkUp("CONFIG_VERSION")
stepInstance1.linkUp("DataType")
stepInstance1.linkUp("SourceData")
stepInstance1.linkUp("poolXMLCatName")
stepInstance1.setLink("inputData", "self", "InputData") # KGG linked with InputData of the Workflow
list1_out = [{"outputDataName": "@{PRODUCTION_ID}_@{JOB_ID}_@{STEP_NUMBER}.@{appType}",
"outputType": "FETC", "outputDataSE": "Tier1_M-DST"}]
stepInstance1.setValue("listoutput", list1_out)
step2_prefix = "step2_"
stepInstance2 = workflow1.createStepInstance('Gaudi_App_Step', 'Step2')
# lets link all parameters them up with the level of workflow
stepInstance2.linkUp(stepInstance2.parameters, step2_prefix)
stepInstance2.setLink("systemConfig", "self", "SystemConfig") # capital letter corrected
# except "STEP_ID", "appLog"
stepInstance2.unlink(["listoutput", "STEP_ID", "optionsFile", "outputData",
"optionsLine", "appLog", "appName", "appType", "etcf", "EVENTTYPE"])
stepInstance2.setValue("appName", "Brunel")
stepInstance2.setValue("appType", "dst")
stepInstance2.setValue("outputData", "@{PRODUCTION_ID}_@{JOB_ID}_@{STEP_NUMBER}.@{appType}")
stepInstance2.setValue("etcf", "SETC_@{PRODUCTION_ID}_@{JOB_ID}_@{STEP_NUMBER}.root")
stepInstance2.setValue("optionsFile", "RealData-ETC.opts")
stepInstance2.setValue("optionsLine", opt_brunel)
# stepInstance2.setValue("outputDataSE","Tier1_M-DST")
stepInstance2.linkUp("CONFIG_NAME")
stepInstance2.linkUp("CONFIG_VERSION")
stepInstance2.linkUp("DataType")
stepInstance2.linkUp("SourceData")
stepInstance2.linkUp("poolXMLCatName")
stepInstance2.setLink("inputData", stepInstance1.getName(), "outputData")
list2_out = [{"outputDataName": "@{PRODUCTION_ID}_@{JOB_ID}_@{STEP_NUMBER}.@{appType}",
"outputType": "dst",
"outputDataSE": "Tier1_M-DST"},
{"outputDataName": "SETC_@{PRODUCTION_ID}_@{JOB_ID}_@{STEP_NUMBER}.root",
"outputType": "SETC",
"outputDataSE": "Tier1_M-DST"}]
stepInstance2.setValue("listoutput", list2_out)
workflow1.addStep(step3)
step3_prefix = "step3_"
stepInstance3 = workflow1.createStepInstance('Job_Finalization', 'Step3')
#stepInstance3.linkUp(stepInstance3.parameters, step3_prefix)
stepInstance3.linkUp("CONFIG_NAME")
stepInstance3.linkUp("CONFIG_VERSION")
stepInstance3.linkUp("DataType")
stepInstance3.linkUp("SourceData")
stepInstance3.linkUp("poolXMLCatName")
# I am adding
stepInstance3.unlink(["listoutput_1", "listoutput_2", "listoutput_3", "listoutput_4", "listoutput_5", "listoutput_6"])
stepInstance3.setLink("listoutput_1", stepInstance1.getName(), "listoutput")
stepInstance3.setLink("listoutput_2", stepInstance2.getName(), "listoutput")
# **************************
workflow1.addParameterLinked(step1.parameters, step1_prefix)
workflow1.addParameterLinked(step1.parameters, step2_prefix)
# and finally we can unlink them because we inherit them linked
workflow1.unlink(workflow1.parameters)
workflow1.setValue(step1_prefix + "appVersion", "v19r11")
workflow1.setValue(step2_prefix + "appVersion", "v32r4")
workflow1.setValue(step1_prefix + "NUMBER_OF_EVENTS", "5")
workflow1.setValue(step2_prefix + "NUMBER_OF_EVENTS", "-1")
workflow1.removeParameter(step1_prefix + "inputData") # KGG wrong parameter
workflow1.setValue(step1_prefix + "inputDataType", "RDST")
workflow1.setValue(step2_prefix + "inputDataType", "ETC")
workflow1.setValue(step1_prefix + "OUTPUT_MAX", "20")
# remove unwanted
workflow1.removeParameter(step1_prefix + "outputData")
workflow1.removeParameter(step1_prefix + "systemConfig")
workflow1.removeParameter(step2_prefix + "outputData")
workflow1.removeParameter(step2_prefix + "systemConfig")
# add syspem config which common for all modules
#workflow1.addParameter(Parameter("systemConfig","slc4_ia32_gcc34","string","","",True, False, "Application Name"))
workflow1.addParameter(
Parameter(
"SystemConfig",
"x86_64-slc5-gcc43-opt",
"JDLReqt",
"",
"",
True,
False,
"Application Name"))
# Now lets define parameters on the top
#indata = "LFN:/lhcb/production/DC06/phys-v2-lumi2/00001820/SIM/0000/00001820_00000001_1.sim;LFN:/lhcb/production/DC06/phys-v2-lumi2/00001820/SIM/0000/00001820_00000001_2.sim;LFN:/lhcb/production/DC06/phys-v2-lumi2/00001820/SIM/0000/00001820_00000001_3.sim"
indata = "LFN:/lhcb/data/CCRC08/RDST/00000106/0000/00000106_00007918_1.rdst"
#indata = "LFN:/lhcb/data/CCRC08/RDST/00000106/0000/00000106_00007918_1.rdst;/lhcb/data/CCRC08/RAW/LHCb/CCRC/420217/420217_0000116193.raw"
#etcf = "joel.root"
#indata = "LFN:/lhcb/data/CCRC08/RAW/LHCb/CCRC/402154/402154_0000047096.raw;LFN:/lhcb/data/CCRC08/RAW/LHCb/CCRC/402154/402154_0000047097.raw"
# lets specify parameters on the level of workflow
workflow1.addParameter(Parameter("InputData", indata, "JDL", "", "", True, False, "Application Name"))
workflow1.addParameter(Parameter("JobType", "test", "JDL", "", "", True, False, "Job TYpe"))
workflow1.addParameter(Parameter("AncestorDepth", "2", "JDL", "", "", True, False, "Ancestor Depth"))
workflow1.addParameter(Parameter("Owner", "joel", "JDL", "", "", True, False, "user Name"))
workflow1.addParameter(Parameter("StdError", "std.err", "JDL", "", "", True, False, "user Name"))
workflow1.addParameter(Parameter("StdOutput", "std.out", "JDL", "", "", True, False, "user Name"))
workflow1.addParameter(Parameter("SoftwarePackages", "Brunel.v32r4", "JDL", "", "", True, False, "software"))
workflow1.addParameter(Parameter("CPUTime", 300000, "JDLReqt", "", "", True, False, "Application Name"))
#workflow1.addParameter(Parameter("Site","LCG.CERN.ch","JDLReqt","","",True, False, "Site"))
workflow1.addParameter(Parameter("Platform", "gLite", "JDLReqt", "", "", True, False, "platform"))
# and finally we can unlink them because we inherit them linked
workflow1.unlink(workflow1.parameters)
workflow1.addParameter(Parameter("PRODUCTION_ID", "00003033", "string", "", "", True, False, "Temporary fix"))
workflow1.addParameter(Parameter("JOB_ID", "00000011", "string", "", "", True, False, "Temporary fix"))
workflow1.addParameter(
Parameter(
"EMAILNAME",
"joel.closier@cern.ch",
"string",
"",
"",
True,
False,
"Email to send a report from the LogCheck module"))
workflow1.addParameter(Parameter("DataType", "DATA", "string", "", "", True, False, "type of Datatype"))
workflow1.addParameter(Parameter("SourceData", indata, "string", "", "", True, False, "Application Name"))
workflow1.addParameter(
Parameter(
"poolXMLCatName",
"pool_xml_catalog.xml",
"string",
"",
"",
True,
False,
"Application Name"))
workflow1.addParameter(Parameter("CONFIG_NAME", "LHCb", "string", "", "", True, False, "Configuration Name"))
workflow1.addParameter(Parameter("CONFIG_VERSION", "CCRC08", "string", "", "", True, False, "Configuration Version"))
#workflow1.addParameter(Parameter("NUMBER_OF_EVENTS","5","string","","",True, False, "number of events requested"))
workflow1.toXMLFile('wkf_CCRC_3.xml')
#w4 = fromXMLFile("/afs/cern.ch/user/g/gkuznets/test1.xml")
#print 'Creating code for the workflow'
# eval(compile(workflow1.createCode(),'<string>','exec'))
# workflow1.execute()
|
yujikato/DIRAC
|
src/DIRAC/Core/Workflow/test/step_g.py
|
Python
|
gpl-3.0
| 22,676
|
[
"DIRAC"
] |
c3cb199c38b2d5580b95ade438bdb63aefd7d22c6bca7602ee0f2f377c09dddf
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import os
import sys
from hashlib import md5
from docutils import nodes
from docutils.parsers.rst import directives
import warnings
from matplotlib import rcParams
from matplotlib.mathtext import MathTextParser
rcParams['mathtext.fontset'] = 'cm'
mathtext_parser = MathTextParser("Bitmap")
# Define LaTeX math node:
class latex_math(nodes.General, nodes.Element):
pass
def fontset_choice(arg):
return directives.choice(arg, ['cm', 'stix', 'stixsans'])
options_spec = {'fontset': fontset_choice}
def math_role(role, rawtext, text, lineno, inliner,
options={}, content=[]):
i = rawtext.find('`')
latex = rawtext[i+1:-1]
node = latex_math(rawtext)
node['latex'] = latex
node['fontset'] = options.get('fontset', 'cm')
return [node], []
math_role.options = options_spec
def math_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
latex = ''.join(content)
node = latex_math(block_text)
node['latex'] = latex
node['fontset'] = options.get('fontset', 'cm')
return [node]
# This uses mathtext to render the expression
def latex2png(latex, filename, fontset='cm'):
latex = "$%s$" % latex
orig_fontset = rcParams['mathtext.fontset']
rcParams['mathtext.fontset'] = fontset
if os.path.exists(filename):
depth = mathtext_parser.get_depth(latex, dpi=100)
else:
try:
depth = mathtext_parser.to_png(filename, latex, dpi=100)
except:
warnings.warn("Could not render math expression %s" % latex,
Warning)
depth = 0
rcParams['mathtext.fontset'] = orig_fontset
sys.stdout.write("#")
sys.stdout.flush()
return depth
# LaTeX to HTML translation stuff:
def latex2html(node, source):
inline = isinstance(node.parent, nodes.TextElement)
latex = node['latex']
name = 'math-%s' % md5(latex.encode()).hexdigest()[-10:]
destdir = os.path.join(setup.app.builder.outdir, '_images', 'mathmpl')
if not os.path.exists(destdir):
os.makedirs(destdir)
dest = os.path.join(destdir, '%s.png' % name)
path = '/'.join((setup.app.builder.imgpath, 'mathmpl'))
depth = latex2png(latex, dest, node['fontset'])
if inline:
cls = ''
else:
cls = 'class="center" '
if inline and depth != 0:
style = 'style="position: relative; bottom: -%dpx"' % (depth + 1)
else:
style = ''
return '<img src="%s/%s.png" %s%s/>' % (path, name, cls, style)
def setup(app):
setup.app = app
# Add visit/depart methods to HTML-Translator:
def visit_latex_math_html(self, node):
source = self.document.attributes['source']
self.body.append(latex2html(node, source))
def depart_latex_math_html(self, node):
pass
# Add visit/depart methods to LaTeX-Translator:
def visit_latex_math_latex(self, node):
inline = isinstance(node.parent, nodes.TextElement)
if inline:
self.body.append('$%s$' % node['latex'])
else:
self.body.extend(['\\begin{equation}',
node['latex'],
'\\end{equation}'])
def depart_latex_math_latex(self, node):
pass
app.add_node(latex_math,
html=(visit_latex_math_html, depart_latex_math_html),
latex=(visit_latex_math_latex, depart_latex_math_latex))
app.add_role('math', math_role)
app.add_directive('math', math_directive,
True, (0, 0, 0), **options_spec)
|
yuanagain/seniorthesis
|
venv/lib/python2.7/site-packages/matplotlib/sphinxext/mathmpl.py
|
Python
|
mit
| 3,754
|
[
"VisIt"
] |
e9a3176b71f00ef53524735cb0cc840a8241ab7b609e98c1d6f886b67b66b389
|
#! PsiAPI pubchem access
import psi4
hartree2ev = psi4.constants.hartree2ev
psi4.set_output_file("output.dat", False)
benz = psi4.geometry("""
pubchem:benzene
""")
psi4.set_options({"REFERENCE" : "RHF",
"MAX_ENERGY_G_CONVERGENCE" : 8,
"BASIS" : "STO-3G",
"DF_BASIS_SCF" : "CC-PVDZ-RI"})
psi4.optimize('scf')
psi4.set_options({"REFERENCE" : "RHF",
"BASIS" : "CC-PVDZ",
"DF_BASIS_SCF" : "CC-PVDZ-JKFIT"})
e_sing_rhf = psi4.energy('scf')
benz.set_multiplicity(3)
psi4.set_options({"REFERENCE" : "ROHF"})
e_trip_rohf = psi4.energy('scf')
psi4.set_options({"REFERENCE" : "UHF"})
e_trip_uhf = psi4.energy('scf')
vertical_uhf = hartree2ev * (e_trip_uhf - e_sing_rhf)
vertical_rohf = hartree2ev * (e_trip_rohf - e_sing_rhf)
psi4.core.print_out("\nSinglet-Triplet gap (vertical, UHF) = %8.2f eV\n" % vertical_uhf)
psi4.core.print_out("\nSinglet-Triplet gap (vertical, ROHF) = %8.2f eV\n" % vertical_rohf)
enuc = 204.531600152395043 #TEST
erhf = -230.72190557842444 #TEST
psi4.compare_values(enuc, benz.nuclear_repulsion_energy(), 3, "Nuclear repulsion energy") #TEST
psi4.compare_values(erhf, e_sing_rhf, 6, "Singlet benzene RHF energy") #TEST
|
CDSherrill/psi4
|
tests/python/pubchem/input.py
|
Python
|
lgpl-3.0
| 1,449
|
[
"Psi4"
] |
e1eb74b558253d08816cea1fe486048f2a5ce7232e77417df6081da8b622e577
|
tests = [
("python", "UnitTestDraw.py", {}),
("python", "UnitTestSimilarityMaps.py", {}),
("python", "UnitTestIPython.py", {}),
]
longTests = []
if __name__ == '__main__':
import sys
from rdkit import TestRunner
failed, tests = TestRunner.RunScript('test_list.py', 0, 1)
sys.exit(len(failed))
|
ptosco/rdkit
|
rdkit/Chem/Draw/test_list.py
|
Python
|
bsd-3-clause
| 309
|
[
"RDKit"
] |
3856e423a89f8f11fe37f099c6e0ce1269b641048f85a26b01f466a278b24481
|
from datetime import datetime, timedelta
from flask import jsonify, Blueprint, request, json
from werkzeug.datastructures import MultiDict
from projectp import db, socketio
from projectp.auth import requires_auth
from projectp.locations.models import Location
from projectp.visits.forms import VisitForm, DateForm
from .models import Visit
visits = Blueprint('visits', __name__)
@visits.route('/')
def all():
"""Get all visits"""
visits = Visit.query.all()
return jsonify(Visit.serialize_list(visits))
@visits.route('/', methods=['POST'])
@requires_auth
def insert():
"""Insert a visit"""
print(request.form)
if request.method == 'POST':
# Get location based on token
hash = request.headers.get('authorization')
location = Location.query \
.join(Location.token) \
.filter_by(hash=hash.replace('Bearer ', '')) \
.first()
visit = Visit(request.form.get('start_time'), request.form.get('end_time'), location.id)
db.session.add(visit)
i = visit.serialize()
socketio.emit('visit', json.dumps(i), broadcast=True)
# Recalculate average
location.calculate_average()
# Save in DB
db.session.commit()
# Return JSON 201 successfully created response
return jsonify(
message='Visit {} has been uploaded successfully.'.format(visit.id),
visit=visit.serialize(),
code=201
), 201
# return jsonify(message='Form errors!', error=form.errors, code=400), 400
# return render_template('visit.html', form=form)
# return jsonify(), 201, {'Location': '/visits/' + str(visit.id)}
@visits.route('/recent')
def recent():
"""Get all visits from the past day"""
interval = datetime.now() - timedelta(days=1)
visits = Visit.query \
.filter(Visit.start_time >= interval) \
.order_by(Visit.start_time) \
.all()
# visits = [visit.serialize() for visit in visits]
return jsonify(Visit.serialize_list(visits))
@visits.route('/<start>/<end>')
def visits_range(location_id, start, end):
"""Get all visits by a certain period"""
form = DateForm(MultiDict(request.view_args))
if not form.validate():
return jsonify(message='Form errors!', error=form.errors, code=400), 400
visits = Visit.query \
.filter(Visit.start_time.between(start, end)) \
.order_by(Visit.start_time) \
.all()
return jsonify(Visit.serialize_list(visits))
@visits.route('/<int:id>')
def visit(id):
"""Get a visit by id"""
visit = Visit.query.get(id)
if visit:
return jsonify(visit.serialize())
return jsonify(message='Visit {} not found.'.format(id), code=404), 404
|
Proj-P/project-p-api
|
projectp/visits/views.py
|
Python
|
mit
| 2,768
|
[
"VisIt"
] |
7a4c171c0f8d4dd3e2cbb6c5172dcea73486db8caa51b5a0fdb8f68c7fbf7eec
|
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
from __future__ import division
from pandas import *
import os, os.path
import sys
import numpy as np
sys.path.append('/home/will/HIVReportGen/AnalysisCode/')
sys.path.append('/home/will/PySeqUtils/')
# <codecell>
store = HDFStore('/home/will/HIVReportGen/Data/SplitRedcap/2013-01-16/EntireCohort.hdf')
# <codecell>
redcap_data = store['redcap']
seq_data = store['seq_data']
t = redcap_data['Event Name'].dropna().apply(lambda x: x.split(' - ')[0])
t.unique()
redcap_data['Patient visit number'] = redcap_data['Patient visit number'].combine_first(t)
# <codecell>
wanted_cols = ['Patient ID', 'Patient visit number', 'Date of visit', 'Latest CD4 count (cells/uL)', 'Latest viral load', 'Current ART status']
wanted_redcap = redcap_data[wanted_cols]
data = merge(wanted_redcap, seq_data[['LTR-bin-align']],
left_on = ['Patient ID', 'Patient visit number'],
right_index = True, how = 'inner').dropna()
data = data.rename(columns= {
'Patient visit number':'VisitNum',
'Date of visit':'Date',
'Latest CD4 count (cells/uL)':'CD4',
'Latest viral load':'VL',
'Current ART status':'ART'
})
print data
# <codecell>
data['ART'].unique()
# <codecell>
def QuantitateChange(seq_list):
delta = 0
for a, b in zip(seq_list, seq_list[1:]):
delta =+ (np.array(a[200:-100]) == np.array(b[200:-100])).sum()
return delta/len(seq_list)
# <codecell>
def ProcessPatient(df):
if len(df) < 2:
return Series({
'NiaveVisits':np.nan,
'ARTVisits':np.nan,
'NumWellControlled':np.nan,
'LongestWellControlled':np.nan,
'NumUnControlled':np.nan,
'ControlledVariation':np.nan,
'UnControlledVariation':np.nan})
num_niave = (df['ART'] == 'naive').sum()
num_on = (df['ART'] == 'on').sum()
well_controlled = (df['VL'] <= 100) & (df['CD4'] >= 250)
num_well = well_controlled.sum()
num_uncontrolled = (~well_controlled).sum()
run = 0
longest_run = 0
for well in well_controlled.values:
if well:
run += 1
else:
longest_run = max(longest_run, run)
run = 0
longest_well_controlled = longest_run
well_controlled_variation = 0
if num_well > 1:
well_controlled_variation = QuantitateChange(df['LTR-bin-align'][well_controlled])
uncontrolled_variation = 0
if num_uncontrolled > 1:
uncontrolled_variation = QuantitateChange(df['LTR-bin-align'][~well_controlled])
return Series({
'NiaveVisits':num_niave,
'ARTVisits':num_on,
'NumWellControlled':num_well,
'LongestWellControlled':longest_well_controlled,
'NumUnControlled':num_uncontrolled,
'ControlledVariation':well_controlled_variation,
'UnControlledVariation':uncontrolled_variation})
ndata = data.groupby('Patient ID').apply(ProcessPatient)
print ndata
# <codecell>
niave_to_art = (ndata['NiaveVisits']>0) & (ndata['ARTVisits']>1)
print ndata.ix[niave_to_art].to_string()
uncontrolled_to_well_controlled = (ndata['LongestWellControlled'] >= 2) & (ndata['NumUnControlled'] >= 1)
print ndata.ix[uncontrolled_to_well_controlled].to_string()
# <codecell>
max_day = ndata['DateFromImpaired'].max()
min_day = ndata['DateFromImpaired'].min()
time_axis = np.arange(min_day, max_day)
good_max = ndata['DateFromImpaired'][ndata['DateFromImpaired']>0].median()
good_min = ndata['DateFromImpaired'][ndata['DateFromImpaired']<0].median()
print good_max, good_min
# <codecell>
npats = len(ndata['Patient ID'].unique())
ltr_len = len(data['LTR-bin-align'].values[0])
img_mat = np.ones((len(time_axis), ltr_len, npats))*np.nan
for depth, (pat, df) in enumerate(ndata.groupby('Patient ID')):
ndf = df.copy()
ndf.sort('DateFromImpaired')
#ltr_mat = np.vstack(ndf['LTR-bin-align'].values)
seqs = list(ndf['LTR-bin-align'].values)
nseqs = [seqs[0]] + seqs + [seqs[-1]]
times = list(ndf['DateFromImpaired'].values)
ntimes = [min_day] + times + [max_day]
for start_time, stop_time, ltr in zip(ntimes, ntimes[1:], nseqs):
start_ind = int(start_time - min_day)
stop_ind = int(stop_time - min_day)
#print ntimes, start_time, start_ind, stop_time, stop_ind, ltr
#raise KeyError
img_mat[start_ind:stop_ind,:,depth] = ltr
#print ltr_mat
#raise KeyError
# <codecell>
from scipy.stats import ttest_ind
t_img = nanmean(img_mat, axis = 2)
pre_mask = time_axis>0
pvals = []
for n in range(ltr_len):
pre_data = t_img[pre_mask, n]
post_data = t_img[~pre_mask, n]
if len(post_data)>5 and len(pre_data)>5:
_, pval = ttest_ind(pre_data[pre_data == pre_data],
post_data[post_data == post_data])
else:
pval = np.nan
pvals.append(pval)
pvals = np.array(pvals)
# <codecell>
from pylab import get_cmap
plt.figure(figsize = (10,10))
good_min = -24
good_max = 24
plot_mask = (time_axis > good_min) & (time_axis < good_max)
tmp = pvals < 1e-30
n_img = t_img.copy()
n_img = n_img[plot_mask, :]
n_img = n_img[:, tmp]
ticks = np.where(tmp)
plt.imshow(np.flipud(n_img),
interpolation = 'nearest',
cmap = get_cmap('gray'),
aspect = 'auto', vmin = 0, vmax = 1.0,
extent = (0, len(ticks[0]), good_max, good_min))
plt.xticks(np.arange(0, len(ticks[0]), 20), ticks[0][::20], rotation=90)
plt.colorbar()
plt.ylabel('Months from Last Impaired visit');
plt.xlabel('LTR pos');
plt.title('Significant LTR')
# <codecell>
tmp = np.abs(np.diff(t_img, axis = 0)).sum(axis = 0)
#print tmp
plt.hist(tmp, bins = 50)
# <codecell>
t_img.shape
# <codecell>
|
JudoWill/ResearchNotebooks
|
TimeCourseSeqs.py
|
Python
|
mit
| 6,164
|
[
"VisIt"
] |
78a646f58fc93bff8ea9a1b21c97b1ffa7b775e02bd8ab194d793f43179b24ef
|
# 10.07.2007, c
# last revision: 25.03.2008
filename_meshes = ['database/kostka_medium_tetra.mesh',
'database/kostka_medium_tetra.mesh',
'database/kostka_medium.mesh']
all_your_bases = [{'Omega' : '3_4_P1'},
{'Omega' : '3_4_P2'},
{'Omega' : '3_8_Q1'}]
filename_mesh = None
field_1 = {
'name' : '3_displacement',
'dim' : (3,1),
'flags' : (),
'domain' : 'Omega',
'bases' : None
}
def get_pars( dim, full = False ):
import numpy as nm
sym = (dim + 1) * dim / 2
lam = 1e1
mu = 1e0
o = nm.array( [1.] * dim + [0.] * (sym - dim), dtype = nm.float64 )
oot = nm.outer( o, o )
if full:
return lam * oot + mu * nm.diag( o + 1.0 )
else:
return {'lambda' : lam, 'mu' : mu}
material_1 = {
'name' : 'solid',
'mode' : 'here',
'region' : 'Omega',
'lame' : get_pars( 3 ),
'Dijkl' : get_pars( 3, True ),
}
material_2 = {
'name' : 'spring',
'mode' : 'here',
'region' : 'Omega',
'pars' : {'stiffness' : 1e0, 'projection' : None},
}
variable_1 = {
'name' : 'u',
'kind' : 'unknown field',
'field' : '3_displacement',
'order' : 0,
}
variable_2 = {
'name' : 'v',
'kind' : 'test field',
'field' : '3_displacement',
'dual' : 'u',
}
region_1000 = {
'name' : 'Omega',
'select' : 'all',
}
region_1 = {
'name' : 'Bottom',
'select' : 'nodes in (z < -0.499)',
}
region_2 = {
'name' : 'Top',
'select' : 'nodes in (z > 0.499)',
}
ebc_1 = {
'name' : 'Load',
'region' : 'Top',
'dofs' : {'u.2' : 0.1},
}
integral_1 = {
'name' : 'i1',
'kind' : 'v',
'quadrature' : 'gauss_o2_d3',
}
equations_iso = {
'balance_of_forces' :
"""dw_lin_elastic_iso.i1.Omega( solid.lame, v, u )
= dw_point_lspring.i1.Bottom( spring.pars, v, u )""",
}
equations_general = {
'balance_of_forces' :
"""dw_lin_elastic.i1.Omega( solid.Dijkl, v, u )
= dw_point_lspring.i1.Bottom( spring.pars, v, u )""",
}
solver_0 = {
'name' : 'ls',
'kind' : 'ls.umfpack',
}
solver_1 = {
'name' : 'newton',
'kind' : 'nls.newton',
'i_max' : 1,
'eps_a' : 1e-10,
'eps_r' : 1.0,
'macheps' : 1e-16,
'lin_red' : 1e-2, # Linear system error < (eps_a * lin_red).
'ls_red' : 0.1,
'ls_red_warp' : 0.001,
'ls_on' : 1.1,
'ls_min' : 1e-5,
'check' : 0,
'delta' : 1e-6,
'is_plot' : False,
'lin_solver' : 'umfpack',
'matrix' : 'internal', # 'external' or 'internal'
'problem' : 'nonlinear', # 'nonlinear' or 'linear' (ignore i_max)
}
##
# FE assembling parameters.
fe = {
'chunk_size' : 1000
}
from sfepy.base.testing import TestCommon
##
# 10.07.2007, c
class Test( TestCommon ):
tests = ['test_get_solution', 'test_linear_terms']
##
# 10.07.2007, c
def from_conf( conf, options ):
return Test( conf = conf, options = options )
from_conf = staticmethod( from_conf )
##
# c: 25.03.2008, r: 25.03.2008
def test_linear_terms( self ):
ok = True
for sols in self.solutions:
ok = ok and self.compare_vectors( sols[0], sols[1],
label1 = 'isotropic',
label2 = 'general' )
return ok
##
# c: 10.07.2007, r: 25.03.2008
def test_get_solution( self ):
from sfepy.solvers.generic import solve_stationary
from sfepy.base.base import IndexedStruct
import os.path as op
ok = True
self.solutions = []
for ii, bases in enumerate( all_your_bases ):
fname = filename_meshes[ii]
self.conf.filename_mesh = fname
self.conf.fields['field_1'].bases = bases
self.report( 'mesh: %s, base: %s' % (fname, bases) )
status = IndexedStruct()
self.report( 'isotropic' )
self.conf.equations = self.conf.equations_iso
problem, vec1, data = solve_stationary( self.conf,
nls_status = status )
converged = status.condition == 0
ok = ok and converged
self.report( 'converged: %s' % converged )
self.report( 'general' )
self.conf.equations = self.conf.equations_general
problem, vec2, data = solve_stationary( self.conf,
nls_status = status )
converged = status.condition == 0
ok = ok and converged
self.report( 'converged: %s' % converged )
self.solutions.append( (vec1, vec2) )
name = op.join( self.options.out_dir,
'_'.join( ('test_elasticity_small_strain',
op.splitext( op.basename( fname ) )[0],
bases.values()[0] ))
+ '.vtk' )
problem.save_state( name, vec1 )
## trunk = op.join( self.options.out_dir,
## op.splitext( op.basename( fname ) )[0] )
## problem.save_field_meshes( trunk )
## problem.save_regions( trunk )
return ok
|
certik/sfepy
|
tests/test_elasticity_small_strain.py
|
Python
|
bsd-3-clause
| 5,322
|
[
"VTK"
] |
2875cb9693afc7ae0ff05ec5b1657512709a84d86cd26b89da82e8a8b96f10f3
|
#!/usr/bin/env python
# Author: Benjamin Menkuec
# Copyright 2015 Benjamin Menkuec
# License: LGPL
import sys
import os.path
import subprocess
import argparse
import platform
import multiprocessing
# this function calls a subroutine that will call samtools to
# sort the bam file and then build an index of it
def indexBamFile(filename, script_path):
args = ("python", script_path + "/bam_indexer.py", filename)
print args
print "Creating indexed bam file..."
popen = subprocess.Popen(args, stdout=subprocess.PIPE)
popen.wait()
output = popen.stdout.read()
if results.verbose == True:
print output
if popen.returncode != 0:
print "error"
sys.exit()
return;
script_path = os.path.dirname(os.path.realpath(__file__))
dataDir = os.getcwd() + "/"
parser = argparse.ArgumentParser(description="Preprocess fastq files and do mapping")
parser.add_argument('--adapters', type=str, default = "/../data/adapters.fa")
parser.add_argument('--barcodes', type=str, default = "/../data/barcodes.fa")
parser.add_argument('--flexcat_er', type=str, default = "0.2")
parser.add_argument('--flexcat_ol', type=str, default = "4")
parser.add_argument('--flexcat_fm', type=str, default = "19")
parser.add_argument('--flexcat_ml', type=str, default = "19")
parser.add_argument('--flexcat_oh', type=str, default = "0")
parser.add_argument('--flexcat_times', type=str, default = "1")
parser.add_argument('--exo', action='store_true')
parser.add_argument('--clean', action='store_true')
parser.add_argument('--overwrite', action='store_true')
parser.add_argument('--verbose', action='store_true')
parser.add_argument('--bowtie_location', nargs='?', default = "")
parser.add_argument('--num_threads', nargs='?', default =
str(multiprocessing.cpu_count()))
parser.add_argument('input_file')
parser.add_argument('--output_dir', type=str)
parser.add_argument('--filter_chromosomes', type=str, default="(.*)[H|U|M|_]+(.*)")
parser.add_argument('--random_split', action='store_true')
parser.add_argument('genome', type=str)
results, leftovers = parser.parse_known_args()
flexcatAdapterFilename = (os.path.dirname(os.path.realpath(__file__)) +
results.adapters)
flexcatBarcodeFilename = (os.path.dirname(os.path.realpath(__file__)) +
results.barcodes)
print "Reads: " + results.input_file
print "Genome: " + results.genome
genomeFilename = results.genome
bowtieLocation = results.bowtie_location
if len(bowtieLocation) > 0:
bowtieLocation = bowtieLocation + "/"
if(platform.system() == "Windows" and results.bowtie_location == ""):
print "Bowtie location is required under windows"
sys.exit()
inputFile = os.path.abspath(results.input_file)
temp, inFileExtension = inputFile.split(os.extsep, 1)
# The output file of flexcat can not be a zipped fastq,
# because bowtie can not handle it.
# Therefore, remove .gz ending if its a fastq.gz file
inFileExtension, temp = os.path.splitext(inFileExtension)
inFileExtension = "." + inFileExtension
inFilenamePrefixWithoutPath = os.path.basename(inputFile)
inFilenamePrefixWithoutPath, temp = inFilenamePrefixWithoutPath.split(os.extsep, 1)
# set the output filename of flexcat
if results.output_dir is not None:
outputDir = os.path.abspath(results.output_dir)
head, tail = os.path.split(results.output_dir)
if len(tail) > 0:
inFilenamePrefixWithoutPath = tail;
outputDir = outputDir + "/"
else:
outputDir = inFilenamePrefixWithoutPath
flexcatOutputFilename = (outputDir + "/" + inFilenamePrefixWithoutPath +
inFileExtension)
# if the exo option is set, there wont be any fixed barcode matching.
# therefore the output file of flexcat does not have a postfix
if results.exo:
bowtieInputFilename = (outputDir + "/" + inFilenamePrefixWithoutPath +
inFileExtension)
else:
bowtieInputFilename = (outputDir + "/" + inFilenamePrefixWithoutPath +
"_matched_barcode" + inFileExtension)
bowtieOutputFilename = outputDir + "/" + inFilenamePrefixWithoutPath + ".sam"
# platform independant way of calling flexcat
flexcat_path = script_path+"/../bin/flexcat"
if(platform.system() == "Windows"):
flexcat_path += ".exe"
if results.exo:
args = (flexcat_path, results.input_file, "-tt", "-t", "-ss", "-st", "-app",
"-tnum", results.num_threads, "-times", results.flexcat_times, "-er",
results.flexcat_er, "-ol", results.flexcat_ol, "-oh", results.flexcat_oh,
"-fm", results.flexcat_fm, "-ml", results.flexcat_ml, "-a",
flexcatAdapterFilename,"-o", flexcatOutputFilename)
else:
args = (flexcat_path, results.input_file, "-tl", "5", "-tt", "-t", "-ss", "-st",
"-app", "-tnum", results.num_threads, "-times", results.flexcat_times, "-er",
results.flexcat_er, "-ol", results.flexcat_ol, "-oh", results.flexcat_oh,
"-fm", results.flexcat_fm, "-ml", results.flexcat_ml, "-b",
flexcatBarcodeFilename, "-a", flexcatAdapterFilename,"-o", flexcatOutputFilename)
if not os.path.exists(outputDir):
os.makedirs(outputDir)
if (os.path.isfile(bowtieInputFilename) == False or results.overwrite == True):
print "Filtering pre-mapping barcodes and trimming adapters..."
if results.verbose == True:
popen = subprocess.Popen(args + tuple(leftovers))
else:
popen = subprocess.Popen(args + tuple(leftovers), stdout=subprocess.PIPE)
popen.wait()
if popen.returncode != 0:
print "error"
sys.exit()
if results.verbose == True:
print flexcatOutputFilename + " created"
head, tail = os.path.split(genomeFilename)
genomeIndex, file_extension = os.path.splitext(tail)
# check if bowtie index already exists
# if yes, skip building the index
genomeIndexFile = os.path.dirname(genomeFilename) + "/" + genomeIndex;
if (os.path.isfile(genomeIndexFile + ".1.ebwt") == False):
if(platform.system() == "Linux" or platform.system() == "Linux2"):
args = (bowtieLocation + "bowtie-build", "-o", "1", genomeFilename,
genomeIndexFile)
else:
args = ("python", bowtieLocation + "bowtie-build", "-o", "1",
genomeFilename, genomeIndexFile)
popen = subprocess.Popen(args, stdout=subprocess.PIPE)
popen.wait()
output = popen.stdout.read()
if results.verbose == True:
print output
if popen.returncode != 0:
print "error"
sys.exit()
# call bowtie if mapped file does not exist or overwrite is true
if (os.path.isfile(bowtieOutputFilename) == False or results.overwrite == True):
args = (bowtieLocation + "bowtie", "-S", "-p", results.num_threads,
"--chunkmbs", "512", "-k", "1", "-m", "1", "-v", "2", "--strata", "--best",
genomeIndexFile, bowtieInputFilename, bowtieOutputFilename)
if(platform.system() != "Linux" and platform.system() != "Linux2"):
args = ("python",) + args
print "Mapping reads..."
popen = subprocess.Popen(args, stdout=subprocess.PIPE)
popen.wait()
output = popen.stdout.read()
if results.verbose == True:
print output
if popen.returncode != 0:
print "error"
sys.exit()
# nexus-pre
nexusOutputFilename = (outputDir + "/" + inFilenamePrefixWithoutPath +
"_filtered.bam")
if results.random_split == True:
nexusOutputFilenameSplit1 = (outputDir + "/" + inFilenamePrefixWithoutPath +
"_filtered_split1.bam")
nexusOutputFilenameSplit2 = (outputDir + "/" + inFilenamePrefixWithoutPath +
"_filtered_split2.bam")
# platform independant way of calling nexus-pre
nexcat_path = script_path+"/../bin/nexcat"
if(platform.system() == "Windows"):
nexcat_path += ".exe"
# call nexcat if overwrite is true or output files dont exist
if (os.path.isfile(nexusOutputFilename) == False or
(results.random_split == True and
(os.path.isfile(nexusOutputFilenameSplit1) == False or
os.path.isfile(nexusOutputFilenameSplit2) == False)) or
results.overwrite == True):
args = (nexcat_path, bowtieOutputFilename, "-fc", results.filter_chromosomes)
if results.random_split == True:
args += ("-rs",)
print "Filtering post-mapping barcodes..."
popen = subprocess.Popen(args, stdout=subprocess.PIPE)
popen.wait()
output = popen.stdout.read()
if results.verbose == True:
print output
if popen.returncode != 0:
print "error"
sys.exit()
# special option for binding characteristic analysis
indexBamFile(nexusOutputFilename, script_path)
if results.random_split == True:
indexBamFile(nexusOutputFilenameSplit2, script_path)
indexBamFile(nexusOutputFilenameSplit1, script_path)
# cleanup
if results.clean:
print "deleting intermediate files..."
os.remove(bowtieOutputFilename)
os.remove(nexusOutputFilename)
if results.exo:
os.remove(flexcatOutputFilename)
else:
os.remove(outputDir + "/" + inFilenamePrefixWithoutPath +
"_matched_barcode" + inFileExtension)
os.remove(outputDir + "/" + inFilenamePrefixWithoutPath +
"_unidentified" + inFileExtension)
|
charite/Q
|
scripts/preprocess.py
|
Python
|
bsd-2-clause
| 8,778
|
[
"Bowtie"
] |
958918259050555ae7d7c078eebdc67d77e80b4a89953b3aff1de9c3cd20c5c9
|
#!/usr/bin/env python
# vim:fileencoding=UTF-8
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
from collections import OrderedDict
from calibre import as_unicode
from calibre.ebooks.metadata.sources.base import Source, Option
class GoogleImages(Source):
name = 'Google Images'
description = _('Downloads covers from a Google Image search. Useful to find larger/alternate covers.')
capabilities = frozenset(['cover'])
config_help_message = _('Configure the Google Image Search plugin')
can_get_multiple_covers = True
options = (Option('max_covers', 'number', 5, _('Maximum number of covers to get'),
_('The maximum number of covers to process from the google search result')),
Option('size', 'choices', 'svga', _('Cover size'),
_('Search for covers larger than the specified size'),
choices=OrderedDict((
('any', _('Any size'),),
('l', _('Large'),),
('qsvga', _('Larger than %s')%'400x300',),
('vga', _('Larger than %s')%'640x480',),
('svga', _('Larger than %s')%'600x800',),
('xga', _('Larger than %s')%'1024x768',),
('2mp', _('Larger than %s')%'2 MP',),
('4mp', _('Larger than %s')%'4 MP',),
))),
)
def download_cover(self, log, result_queue, abort,
title=None, authors=None, identifiers={}, timeout=30, get_best_cover=False):
if not title:
return
timeout = max(60, timeout) # Needs at least a minute
title = ' '.join(self.get_title_tokens(title))
author = ' '.join(self.get_author_tokens(authors))
urls = self.get_image_urls(title, author, log, abort, timeout)
self.download_multiple_covers(title, authors, urls, get_best_cover, timeout, result_queue, abort, log)
def get_image_urls(self, title, author, log, abort, timeout):
from calibre.utils.ipc.simple_worker import fork_job, WorkerError
try:
return fork_job('calibre.ebooks.metadata.sources.google_images',
'search', args=(title, author, self.prefs['size'], timeout), no_output=True, abort=abort, timeout=timeout)['result']
except WorkerError as e:
if e.orig_tb:
log.error(e.orig_tb)
log.exception('Searching google failed:' + as_unicode(e))
except Exception as e:
log.exception('Searching google failed:' + as_unicode(e))
return []
USER_AGENT = 'Mozilla/5.0 (X11; Linux x86_64; rv:25.0) Gecko/20100101 Firefox/25.0'
def find_image_urls(br, ans):
import urlparse
for w in br.page.mainFrame().documentElement().findAll('div#ires a.rg_l[href]'):
try:
imgurl = urlparse.parse_qs(urlparse.urlparse(unicode(w.attribute('href'))).query)['imgurl'][0]
except:
# import traceback
# traceback.print_exc()
continue
if imgurl not in ans:
ans.append(imgurl)
def search(title, author, size, timeout, debug=False):
import time
from calibre.web.jsbrowser.browser import Browser, LoadWatcher, Timeout
ans = []
start_time = time.time()
br = Browser(user_agent=USER_AGENT, enable_developer_tools=debug)
br.visit('https://www.google.com/advanced_image_search')
f = br.select_form('form[action="/search"]')
f['as_q'] = '%s %s'%(title, author)
if size != 'any':
f['imgsz'] = size
f['imgar'] = 't|xt'
f['as_filetype'] = 'jpg'
br.submit(wait_for_load=False)
# Loop until the page finishes loading or at least five image urls are
# found
lw = LoadWatcher(br.page, br)
while lw.is_loading and len(ans) < 5:
br.run_for_a_time(0.2)
find_image_urls(br, ans)
if time.time() - start_time > timeout:
raise Timeout('Timed out trying to load google image search page')
find_image_urls(br, ans)
if debug:
br.show_browser()
br.close()
del br # Needed to prevent PyQt from segfaulting
return ans
def test_google():
import pprint
pprint.pprint(search('heroes', 'abercrombie', 'svga', 60, debug=True))
def test():
from Queue import Queue
from threading import Event
from calibre.utils.logging import default_log
p = GoogleImages(None)
rq = Queue()
p.download_cover(default_log, rq, Event(), title='The Heroes',
authors=('Joe Abercrombie',))
print ('Downloaded', rq.qsize(), 'covers')
if __name__ == '__main__':
test()
|
sharad/calibre
|
src/calibre/ebooks/metadata/sources/google_images.py
|
Python
|
gpl-3.0
| 4,869
|
[
"VisIt"
] |
3f9fb34752988d3b8fc8abc056f7cb49fe605169269fdbbe81acf28248dd9080
|
import galaxy.datatypes.data as data
import galaxy.datatypes.binary
from galaxy.datatypes.binary import Binary
class Matlab(Binary):
file_ext = "mat"
def __init__( self, **kwd ):
Binary.__init__( self, **kwd )
def sniff( self, filename ):
# The header of any psysound matlab file starts with "MATLAB 5.0 MAT-file", and the file is binary.
# (see: https://maxwell.ict.griffith.edu.au/spl/matlab-page/matfile_format.pdf)
try:
header = open( filename ).read()
if "MATLAB" in header and "5.0" in header:
return True
else:
return False
except:
return False
def set_peek( self, dataset, is_multi_byte=False ):
if not dataset.dataset.purged:
dataset.peek = "Matlab Binary file"
dataset.blurb = data.nice_size( dataset.get_size() )
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek( self, dataset ):
try:
return dataset.peek
except:
return "Matlab Binary file (%s)" % ( data.nice_size( dataset.get_size() ) )
def display_data(self, trans, dataset, preview=False, filename=None, to_ext=None, size=None, offset=None, **kwd):
if preview:
return ("MATLAB data files cannot be previewed.")
else:
return super(Matlab, self).display_data( trans, dataset, preview, filename, to_ext, size, offset, **kwd)
Binary.register_sniffable_binary_format("mat", "mat", Matlab)
class Wav(Binary):
file_ext = "wav"
def __init__( self, **kwd ):
Binary.__init__( self, **kwd )
def sniff( self, filename ):
try:
header = open( filename ).read()
if header.starts_with("RIFF"):
return True
else:
return False
except:
return False
def set_peek( self, dataset, is_multi_byte=False ):
if not dataset.dataset.purged:
dataset.peek = "Audio (WAV) file"
dataset.blurb = data.nice_size( dataset.get_size() )
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek( self, dataset ):
try:
return dataset.peek
except:
return "Audio (WAV) file (%s)" % ( data.nice_size( dataset.get_size() ) )
def display_data(self, trans, dataset, preview=False, filename=None, to_ext=None, size=None, offset=None, **kwd):
if preview:
return ("WAV audio files cannot be previewed.")
else:
return super(Wav, self).display_data( trans, dataset, preview, filename, to_ext, size, offset, **kwd)
Binary.register_unsniffable_binary_ext("wav")
class Mp3(Binary):
file_ext = "mp3"
def __init__( self, **kwd ):
Binary.__init__( self, **kwd )
def set_peek( self, dataset, is_multi_byte=False ):
if not dataset.dataset.purged:
dataset.peek = "Audio (MP3) file"
dataset.blurb = data.nice_size( dataset.get_size() )
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek( self, dataset ):
try:
return dataset.peek
except:
return "Audio (MP3) file (%s)" % ( data.nice_size( dataset.get_size() ) )
def display_data(self, trans, dataset, preview=False, filename=None, to_ext=None, size=None, offset=None, **kwd):
if preview:
return ("MP3 audio files cannot be previewed.")
else:
return super(Mp3, self).display_data( trans, dataset, preview, filename, to_ext, size, offset, **kwd)
Binary.register_unsniffable_binary_ext("mp3")
class Avi(Binary):
file_ext = "avi"
def __init__( self, **kwd ):
Binary.__init__( self, **kwd )
def set_peek( self, dataset, is_multi_byte=False ):
if not dataset.dataset.purged:
dataset.peek = "Video (AVI) file"
dataset.blurb = data.nice_size( dataset.get_size() )
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek( self, dataset ):
try:
return dataset.peek
except:
return "Video (AVI) file (%s)" % ( data.nice_size( dataset.get_size() ) )
def display_data(self, trans, dataset, preview=False, filename=None, to_ext=None, size=None, offset=None, **kwd):
if preview:
return ("AVI video files cannot be previewed.")
else:
return super(Avi, self).display_data( trans, dataset, preview, filename, to_ext, size, offset, **kwd)
Binary.register_unsniffable_binary_ext("avi")
class Ogg(Binary):
file_ext = "ogg"
def __init__( self, **kwd ):
Binary.__init__( self, **kwd )
def set_peek( self, dataset, is_multi_byte=False ):
if not dataset.dataset.purged:
dataset.peek = "Media (OGG) file"
dataset.blurb = data.nice_size( dataset.get_size() )
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek( self, dataset ):
try:
return dataset.peek
except:
return "Media (OGG) file (%s)" % ( data.nice_size( dataset.get_size() ) )
def display_data(self, trans, dataset, preview=False, filename=None, to_ext=None, size=None, offset=None, **kwd):
if preview:
return ("OGG media files cannot be previewed.")
else:
return super(Ogg, self).display_data( trans, dataset, preview, filename, to_ext, size, offset, **kwd)
Binary.register_unsniffable_binary_ext("ogg")
|
IntersectAustralia/hcsvlab-galaxy
|
tools/psysound/psysound.py
|
Python
|
gpl-3.0
| 5,884
|
[
"Galaxy"
] |
eb7598ded294c75d59bac43f056e862b5b7cbf0a5313224a3cfc8c6ee0163ab1
|
r"""OS routines for Mac, DOS, NT, or Posix depending on what system we're on.
This exports:
- all functions from posix, nt, os2, mac, or ce, e.g. unlink, stat, etc.
- os.path is one of the modules posixpath, ntpath, or macpath
- os.name is 'posix', 'nt', 'os2', 'mac', 'ce' or 'riscos'
- os.curdir is a string representing the current directory ('.' or ':')
- os.pardir is a string representing the parent directory ('..' or '::')
- os.sep is the (or a most common) pathname separator ('/' or ':' or '\\')
- os.extsep is the extension separator ('.' or '/')
- os.altsep is the alternate pathname separator (None or '/')
- os.pathsep is the component separator used in $PATH etc
- os.linesep is the line separator in text files ('\r' or '\n' or '\r\n')
- os.defpath is the default search path for executables
- os.devnull is the file path of the null device ('/dev/null', etc.)
Programs that import and use 'os' stand a better chance of being
portable between different platforms. Of course, they must then
only use functions that are defined by all platforms (e.g., unlink
and opendir), and leave all pathname manipulation to os.path
(e.g., split and join).
"""
#'
import sys
_names = sys.builtin_module_names
# Note: more names are added to __all__ later.
__all__ = ["altsep", "curdir", "pardir", "sep", "pathsep", "linesep",
"defpath", "name", "path", "devnull"]
def _get_exports_list(module):
try:
return list(module.__all__)
except AttributeError:
return [n for n in dir(module) if n[0] != '_']
if 'posix' in _names:
name = 'posix'
linesep = '\n'
from posix import *
try:
from posix import _exit
except ImportError:
pass
import posixpath as path
import posix
__all__.extend(_get_exports_list(posix))
del posix
elif 'nt' in _names:
name = 'nt'
linesep = '\r\n'
from nt import *
try:
from nt import _exit
except ImportError:
pass
import ntpath as path
import nt
__all__.extend(_get_exports_list(nt))
del nt
elif 'os2' in _names:
name = 'os2'
linesep = '\r\n'
from os2 import *
try:
from os2 import _exit
except ImportError:
pass
if sys.version.find('EMX GCC') == -1:
import ntpath as path
else:
import os2emxpath as path
from _emx_link import link
import os2
__all__.extend(_get_exports_list(os2))
del os2
elif 'mac' in _names:
name = 'mac'
linesep = '\r'
from mac import *
try:
from mac import _exit
except ImportError:
pass
import macpath as path
import mac
__all__.extend(_get_exports_list(mac))
del mac
elif 'ce' in _names:
name = 'ce'
linesep = '\r\n'
from ce import *
try:
from ce import _exit
except ImportError:
pass
# We can use the standard Windows path.
import ntpath as path
import ce
__all__.extend(_get_exports_list(ce))
del ce
elif 'riscos' in _names:
name = 'riscos'
linesep = '\n'
from riscos import *
try:
from riscos import _exit
except ImportError:
pass
import riscospath as path
import riscos
__all__.extend(_get_exports_list(riscos))
del riscos
else:
raise ImportError, 'no os specific module found'
sys.modules['os.path'] = path
from os.path import (curdir, pardir, sep, pathsep, defpath, extsep, altsep,
devnull)
del _names
#'
# Super directory utilities.
# (Inspired by Eric Raymond; the doc strings are mostly his)
def makedirs(name, mode=0777):
"""makedirs(path [, mode=0777])
Super-mkdir; create a leaf directory and all intermediate ones.
Works like mkdir, except that any intermediate path segment (not
just the rightmost) will be created if it does not exist. This is
recursive.
"""
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
if head and tail and not path.exists(head):
makedirs(head, mode)
if tail == curdir: # xxx/newdir/. exists if xxx/newdir exists
return
mkdir(name, mode)
def removedirs(name):
"""removedirs(path)
Super-rmdir; remove a leaf directory and empty all intermediate
ones. Works like rmdir except that, if the leaf directory is
successfully removed, directories corresponding to rightmost path
segments will be pruned away until either the whole path is
consumed or an error occurs. Errors during this latter phase are
ignored -- they generally mean that a directory was not empty.
"""
rmdir(name)
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
while head and tail:
try:
rmdir(head)
except error:
break
head, tail = path.split(head)
def renames(old, new):
"""renames(old, new)
Super-rename; create directories as necessary and delete any left
empty. Works like rename, except creation of any intermediate
directories needed to make the new pathname good is attempted
first. After the rename, directories corresponding to rightmost
path segments of the old name will be pruned way until either the
whole path is consumed or a nonempty directory is found.
Note: this function can fail with the new directory structure made
if you lack permissions needed to unlink the leaf directory or
file.
"""
head, tail = path.split(new)
if head and tail and not path.exists(head):
makedirs(head)
rename(old, new)
head, tail = path.split(old)
if head and tail:
try:
removedirs(head)
except error:
pass
__all__.extend(["makedirs", "removedirs", "renames"])
def walk(top, topdown=True, onerror=None):
"""Directory tree generator.
For each directory in the directory tree rooted at top (including top
itself, but excluding '.' and '..'), yields a 3-tuple
dirpath, dirnames, filenames
dirpath is a string, the path to the directory. dirnames is a list of
the names of the subdirectories in dirpath (excluding '.' and '..').
filenames is a list of the names of the non-directory files in dirpath.
Note that the names in the lists are just names, with no path components.
To get a full path (which begins with top) to a file or directory in
dirpath, do os.path.join(dirpath, name).
If optional arg 'topdown' is true or not specified, the triple for a
directory is generated before the triples for any of its subdirectories
(directories are generated top down). If topdown is false, the triple
for a directory is generated after the triples for all of its
subdirectories (directories are generated bottom up).
When topdown is true, the caller can modify the dirnames list in-place
(e.g., via del or slice assignment), and walk will only recurse into the
subdirectories whose names remain in dirnames; this can be used to prune
the search, or to impose a specific order of visiting. Modifying
dirnames when topdown is false is ineffective, since the directories in
dirnames have already been generated by the time dirnames itself is
generated.
By default errors from the os.listdir() call are ignored. If
optional arg 'onerror' is specified, it should be a function; it
will be called with one argument, an os.error instance. It can
report the error to continue with the walk, or raise the exception
to abort the walk. Note that the filename is available as the
filename attribute of the exception object.
Caution: if you pass a relative pathname for top, don't change the
current working directory between resumptions of walk. walk never
changes the current directory, and assumes that the client doesn't
either.
Example:
from os.path import join, getsize
for root, dirs, files in walk('python/Lib/email'):
print root, "consumes",
print sum([getsize(join(root, name)) for name in files]),
print "bytes in", len(files), "non-directory files"
if 'CVS' in dirs:
dirs.remove('CVS') # don't visit CVS directories
"""
from os.path import join, isdir, islink
# We may not have read permission for top, in which case we can't
# get a list of the files the directory contains. os.path.walk
# always suppressed the exception then, rather than blow up for a
# minor reason when (say) a thousand readable directories are still
# left to visit. That logic is copied here.
try:
# Note that listdir and error are globals in this module due
# to earlier import-*.
names = listdir(top)
except error, err:
if onerror is not None:
onerror(err)
return
dirs, nondirs = [], []
for name in names:
if isdir(join(top, name)):
dirs.append(name)
else:
nondirs.append(name)
if topdown:
yield top, dirs, nondirs
for name in dirs:
path = join(top, name)
if not islink(path):
for x in walk(path, topdown, onerror):
yield x
if not topdown:
yield top, dirs, nondirs
__all__.append("walk")
# Make sure os.environ exists, at least
try:
environ
except NameError:
environ = {}
def execl(file, *args):
"""execl(file, *args)
Execute the executable file with argument list args, replacing the
current process. """
execv(file, args)
def execle(file, *args):
"""execle(file, *args, env)
Execute the executable file with argument list args and
environment env, replacing the current process. """
env = args[-1]
execve(file, args[:-1], env)
def execlp(file, *args):
"""execlp(file, *args)
Execute the executable file (which is searched for along $PATH)
with argument list args, replacing the current process. """
execvp(file, args)
def execlpe(file, *args):
"""execlpe(file, *args, env)
Execute the executable file (which is searched for along $PATH)
with argument list args and environment env, replacing the current
process. """
env = args[-1]
execvpe(file, args[:-1], env)
def execvp(file, args):
"""execp(file, args)
Execute the executable file (which is searched for along $PATH)
with argument list args, replacing the current process.
args may be a list or tuple of strings. """
_execvpe(file, args)
def execvpe(file, args, env):
"""execvpe(file, args, env)
Execute the executable file (which is searched for along $PATH)
with argument list args and environment env , replacing the
current process.
args may be a list or tuple of strings. """
_execvpe(file, args, env)
__all__.extend(["execl","execle","execlp","execlpe","execvp","execvpe"])
def _execvpe(file, args, env=None):
from errno import ENOENT, ENOTDIR
if env is not None:
func = execve
argrest = (args, env)
else:
func = execv
argrest = (args,)
env = environ
head, tail = path.split(file)
if head:
func(file, *argrest)
return
if 'PATH' in env:
envpath = env['PATH']
else:
envpath = defpath
PATH = envpath.split(pathsep)
saved_exc = None
saved_tb = None
for dir in PATH:
fullname = path.join(dir, file)
try:
func(fullname, *argrest)
except error, e:
tb = sys.exc_info()[2]
if (e.errno != ENOENT and e.errno != ENOTDIR
and saved_exc is None):
saved_exc = e
saved_tb = tb
if saved_exc:
raise error, saved_exc, saved_tb
raise error, e, tb
# Change environ to automatically call putenv() if it exists
try:
# This will fail if there's no putenv
putenv
except NameError:
pass
else:
import UserDict
# Fake unsetenv() for Windows
# not sure about os2 here but
# I'm guessing they are the same.
if name in ('os2', 'nt'):
def unsetenv(key):
putenv(key, "")
if name == "riscos":
# On RISC OS, all env access goes through getenv and putenv
from riscosenviron import _Environ
elif name in ('os2', 'nt'): # Where Env Var Names Must Be UPPERCASE
# But we store them as upper case
class _Environ(UserDict.IterableUserDict):
def __init__(self, environ):
UserDict.UserDict.__init__(self)
data = self.data
for k, v in environ.items():
data[k.upper()] = v
def __setitem__(self, key, item):
putenv(key, item)
self.data[key.upper()] = item
def __getitem__(self, key):
return self.data[key.upper()]
try:
unsetenv
except NameError:
def __delitem__(self, key):
del self.data[key.upper()]
else:
def __delitem__(self, key):
unsetenv(key)
del self.data[key.upper()]
def has_key(self, key):
return key.upper() in self.data
def __contains__(self, key):
return key.upper() in self.data
def get(self, key, failobj=None):
return self.data.get(key.upper(), failobj)
def copy(self):
return dict(self)
else: # Where Env Var Names Can Be Mixed Case
class _Environ(UserDict.IterableUserDict):
def __init__(self, environ):
UserDict.UserDict.__init__(self)
self.data = environ
def __setitem__(self, key, item):
putenv(key, item)
self.data[key] = item
try:
unsetenv
except NameError:
pass
else:
def __delitem__(self, key):
unsetenv(key)
del self.data[key]
def copy(self):
return dict(self)
environ = _Environ(environ)
def getenv(key, default=None):
"""Get an environment variable, return None if it doesn't exist.
The optional second argument can specify an alternate default."""
return environ.get(key, default)
__all__.append("getenv")
def _exists(name):
try:
eval(name)
return True
except NameError:
return False
# Supply spawn*() (probably only for Unix)
if _exists("fork") and not _exists("spawnv") and _exists("execv"):
P_WAIT = 0
P_NOWAIT = P_NOWAITO = 1
# XXX Should we support P_DETACH? I suppose it could fork()**2
# and close the std I/O streams. Also, P_OVERLAY is the same
# as execv*()?
def _spawnvef(mode, file, args, env, func):
# Internal helper; func is the exec*() function to use
pid = fork()
if not pid:
# Child
try:
if env is None:
func(file, args)
else:
func(file, args, env)
except:
_exit(127)
else:
# Parent
if mode == P_NOWAIT:
return pid # Caller is responsible for waiting!
while 1:
wpid, sts = waitpid(pid, 0)
if WIFSTOPPED(sts):
continue
elif WIFSIGNALED(sts):
return -WTERMSIG(sts)
elif WIFEXITED(sts):
return WEXITSTATUS(sts)
else:
raise error, "Not stopped, signaled or exited???"
def spawnv(mode, file, args):
"""spawnv(mode, file, args) -> integer
Execute file with arguments from args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, None, execv)
def spawnve(mode, file, args, env):
"""spawnve(mode, file, args, env) -> integer
Execute file with arguments from args in a subprocess with the
specified environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, env, execve)
# Note: spawnvp[e] is't currently supported on Windows
def spawnvp(mode, file, args):
"""spawnvp(mode, file, args) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, None, execvp)
def spawnvpe(mode, file, args, env):
"""spawnvpe(mode, file, args, env) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, env, execvpe)
if _exists("spawnv"):
# These aren't supplied by the basic Windows code
# but can be easily implemented in Python
def spawnl(mode, file, *args):
"""spawnl(mode, file, *args) -> integer
Execute file with arguments from args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return spawnv(mode, file, args)
def spawnle(mode, file, *args):
"""spawnle(mode, file, *args, env) -> integer
Execute file with arguments from args in a subprocess with the
supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
env = args[-1]
return spawnve(mode, file, args[:-1], env)
__all__.extend(["spawnv", "spawnve", "spawnl", "spawnle",])
if _exists("spawnvp"):
# At the moment, Windows doesn't implement spawnvp[e],
# so it won't have spawnlp[e] either.
def spawnlp(mode, file, *args):
"""spawnlp(mode, file, *args) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return spawnvp(mode, file, args)
def spawnlpe(mode, file, *args):
"""spawnlpe(mode, file, *args, env) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
env = args[-1]
return spawnvpe(mode, file, args[:-1], env)
__all__.extend(["spawnvp", "spawnvpe", "spawnlp", "spawnlpe",])
# Supply popen2 etc. (for Unix)
if _exists("fork"):
if not _exists("popen2"):
def popen2(cmd, mode="t", bufsize=-1):
"""Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd'
may be a sequence, in which case arguments will be passed directly to
the program without shell intervention (as with os.spawnv()). If 'cmd'
is a string it will be passed to the shell (as with os.system()). If
'bufsize' is specified, it sets the buffer size for the I/O pipes. The
file objects (child_stdin, child_stdout) are returned."""
import popen2
stdout, stdin = popen2.popen2(cmd, bufsize)
return stdin, stdout
__all__.append("popen2")
if not _exists("popen3"):
def popen3(cmd, mode="t", bufsize=-1):
"""Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd'
may be a sequence, in which case arguments will be passed directly to
the program without shell intervention (as with os.spawnv()). If 'cmd'
is a string it will be passed to the shell (as with os.system()). If
'bufsize' is specified, it sets the buffer size for the I/O pipes. The
file objects (child_stdin, child_stdout, child_stderr) are returned."""
import popen2
stdout, stdin, stderr = popen2.popen3(cmd, bufsize)
return stdin, stdout, stderr
__all__.append("popen3")
if not _exists("popen4"):
def popen4(cmd, mode="t", bufsize=-1):
"""Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd'
may be a sequence, in which case arguments will be passed directly to
the program without shell intervention (as with os.spawnv()). If 'cmd'
is a string it will be passed to the shell (as with os.system()). If
'bufsize' is specified, it sets the buffer size for the I/O pipes. The
file objects (child_stdin, child_stdout_stderr) are returned."""
import popen2
stdout, stdin = popen2.popen4(cmd, bufsize)
return stdin, stdout
__all__.append("popen4")
import copy_reg as _copy_reg
def _make_stat_result(tup, dict):
return stat_result(tup, dict)
def _pickle_stat_result(sr):
(type, args) = sr.__reduce__()
return (_make_stat_result, args)
try:
_copy_reg.pickle(stat_result, _pickle_stat_result, _make_stat_result)
except NameError: # stat_result may not exist
pass
def _make_statvfs_result(tup, dict):
return statvfs_result(tup, dict)
def _pickle_statvfs_result(sr):
(type, args) = sr.__reduce__()
return (_make_statvfs_result, args)
try:
_copy_reg.pickle(statvfs_result, _pickle_statvfs_result,
_make_statvfs_result)
except NameError: # statvfs_result may not exist
pass
if not _exists("urandom"):
_urandomfd = None
def urandom(n):
"""urandom(n) -> str
Return a string of n random bytes suitable for cryptographic use.
"""
global _urandomfd
if _urandomfd is None:
try:
_urandomfd = open("/dev/urandom", O_RDONLY)
except:
_urandomfd = NotImplementedError
if _urandomfd is NotImplementedError:
raise NotImplementedError("/dev/urandom (or equivalent) not found")
bytes = ""
while len(bytes) < n:
bytes += read(_urandomfd, n - len(bytes))
return bytes
|
MalloyPower/parsing-python
|
front-end/testsuite-python-lib/Python-2.4/Lib/os.py
|
Python
|
mit
| 23,147
|
[
"VisIt"
] |
e57dbe4d9fb2c030e2d7ec9d4d1a9f348d55a40419fcd186e25974333dd841f4
|
"""
test views
"""
import datetime
import json
import re
import pytz
from mock import patch
from nose.plugins.attrib import attr
from capa.tests.response_xml_factory import StringResponseXMLFactory
from courseware.field_overrides import OverrideFieldData # pylint: disable=import-error
from courseware.tests.factories import StudentModuleFactory # pylint: disable=import-error
from courseware.tests.helpers import LoginEnrollmentTestCase # pylint: disable=import-error
from django.core.urlresolvers import reverse
from django.test.utils import override_settings
from edxmako.shortcuts import render_to_response # pylint: disable=import-error
from student.roles import CourseCcxCoachRole # pylint: disable=import-error
from student.tests.factories import ( # pylint: disable=import-error
AdminFactory,
CourseEnrollmentFactory,
UserFactory,
)
from xmodule.x_module import XModuleMixin
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import (
CourseFactory,
ItemFactory,
)
from ..models import (
CustomCourseForEdX,
CcxMembership,
CcxFutureMembership,
)
from ..overrides import get_override_for_ccx, override_field_for_ccx
from .. import ACTIVE_CCX_KEY
from .factories import (
CcxFactory,
CcxMembershipFactory,
CcxFutureMembershipFactory,
)
def intercept_renderer(path, context):
"""
Intercept calls to `render_to_response` and attach the context dict to the
response for examination in unit tests.
"""
# I think Django already does this for you in their TestClient, except
# we're bypassing that by using edxmako. Probably edxmako should be
# integrated better with Django's rendering and event system.
response = render_to_response(path, context)
response.mako_context = context
response.mako_template = path
return response
@attr('shard_1')
class TestCoachDashboard(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Tests for Custom Courses views.
"""
def setUp(self):
"""
Set up tests
"""
super(TestCoachDashboard, self).setUp()
self.course = course = CourseFactory.create()
# Create instructor account
self.coach = coach = AdminFactory.create()
self.client.login(username=coach.username, password="test")
# Create a course outline
self.mooc_start = start = datetime.datetime(
2010, 5, 12, 2, 42, tzinfo=pytz.UTC)
self.mooc_due = due = datetime.datetime(
2010, 7, 7, 0, 0, tzinfo=pytz.UTC)
chapters = [ItemFactory.create(start=start, parent=course)
for _ in xrange(2)]
sequentials = flatten([
[
ItemFactory.create(parent=chapter) for _ in xrange(2)
] for chapter in chapters
])
verticals = flatten([
[
ItemFactory.create(
due=due, parent=sequential, graded=True, format='Homework'
) for _ in xrange(2)
] for sequential in sequentials
])
blocks = flatten([ # pylint: disable=unused-variable
[
ItemFactory.create(parent=vertical) for _ in xrange(2)
] for vertical in verticals
])
def make_coach(self):
"""
create coach user
"""
role = CourseCcxCoachRole(self.course.id)
role.add_users(self.coach)
def make_ccx(self):
"""
create ccx
"""
ccx = CcxFactory(course_id=self.course.id, coach=self.coach)
return ccx
def get_outbox(self):
"""
get fake outbox
"""
from django.core import mail
return mail.outbox
def test_not_a_coach(self):
"""
User is not a coach, should get Forbidden response.
"""
url = reverse(
'ccx_coach_dashboard',
kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url)
self.assertEqual(response.status_code, 403)
def test_no_ccx_created(self):
"""
No CCX is created, coach should see form to add a CCX.
"""
self.make_coach()
url = reverse(
'ccx_coach_dashboard',
kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertTrue(re.search(
'<form action=".+create_ccx"',
response.content))
def test_create_ccx(self):
"""
Create CCX. Follow redirect to coach dashboard, confirm we see
the coach dashboard for the new CCX.
"""
self.make_coach()
url = reverse(
'create_ccx',
kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'name': 'New CCX'})
self.assertEqual(response.status_code, 302)
url = response.get('location') # pylint: disable=no-member
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertTrue(re.search('id="ccx-schedule"', response.content))
@patch('ccx.views.render_to_response', intercept_renderer)
@patch('ccx.views.TODAY')
def test_edit_schedule(self, today):
"""
Get CCX schedule, modify it, save it.
"""
today.return_value = datetime.datetime(2014, 11, 25, tzinfo=pytz.UTC)
self.test_create_ccx()
url = reverse(
'ccx_coach_dashboard',
kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url)
schedule = json.loads(response.mako_context['schedule']) # pylint: disable=no-member
self.assertEqual(len(schedule), 2)
self.assertEqual(schedule[0]['hidden'], True)
self.assertEqual(schedule[0]['start'], None)
self.assertEqual(schedule[0]['children'][0]['start'], None)
self.assertEqual(schedule[0]['due'], None)
self.assertEqual(schedule[0]['children'][0]['due'], None)
self.assertEqual(
schedule[0]['children'][0]['children'][0]['due'], None
)
url = reverse(
'save_ccx',
kwargs={'course_id': self.course.id.to_deprecated_string()})
def unhide(unit):
"""
Recursively unhide a unit and all of its children in the CCX
schedule.
"""
unit['hidden'] = False
for child in unit.get('children', ()):
unhide(child)
unhide(schedule[0])
schedule[0]['start'] = u'2014-11-20 00:00'
schedule[0]['children'][0]['due'] = u'2014-12-25 00:00' # what a jerk!
response = self.client.post(
url, json.dumps(schedule), content_type='application/json'
)
schedule = json.loads(response.content)['schedule']
self.assertEqual(schedule[0]['hidden'], False)
self.assertEqual(schedule[0]['start'], u'2014-11-20 00:00')
self.assertEqual(
schedule[0]['children'][0]['due'], u'2014-12-25 00:00'
)
# Make sure start date set on course, follows start date of earliest
# scheduled chapter
ccx = CustomCourseForEdX.objects.get()
course_start = get_override_for_ccx(ccx, self.course, 'start')
self.assertEqual(str(course_start)[:-9], u'2014-11-20 00:00')
# Make sure grading policy adjusted
policy = get_override_for_ccx(ccx, self.course, 'grading_policy',
self.course.grading_policy)
self.assertEqual(policy['GRADER'][0]['type'], 'Homework')
self.assertEqual(policy['GRADER'][0]['min_count'], 4)
self.assertEqual(policy['GRADER'][1]['type'], 'Lab')
self.assertEqual(policy['GRADER'][1]['min_count'], 0)
self.assertEqual(policy['GRADER'][2]['type'], 'Midterm Exam')
self.assertEqual(policy['GRADER'][2]['min_count'], 0)
self.assertEqual(policy['GRADER'][3]['type'], 'Final Exam')
self.assertEqual(policy['GRADER'][3]['min_count'], 0)
def test_enroll_member_student(self):
"""enroll a list of students who are members of the class
"""
self.make_coach()
ccx = self.make_ccx()
enrollment = CourseEnrollmentFactory(course_id=self.course.id)
student = enrollment.user
outbox = self.get_outbox()
self.assertEqual(outbox, [])
url = reverse(
'ccx_invite',
kwargs={'course_id': self.course.id.to_deprecated_string()}
)
data = {
'enrollment-button': 'Enroll',
'student-ids': u','.join([student.email, ]), # pylint: disable=no-member
'email-students': 'Notify-students-by-email',
}
response = self.client.post(url, data=data, follow=True)
self.assertEqual(response.status_code, 200)
# we were redirected to our current location
self.assertEqual(len(response.redirect_chain), 1)
self.assertTrue(302 in response.redirect_chain[0])
self.assertEqual(len(outbox), 1)
self.assertTrue(student.email in outbox[0].recipients()) # pylint: disable=no-member
# a CcxMembership exists for this student
self.assertTrue(
CcxMembership.objects.filter(ccx=ccx, student=student).exists()
)
def test_unenroll_member_student(self):
"""unenroll a list of students who are members of the class
"""
self.make_coach()
ccx = self.make_ccx()
enrollment = CourseEnrollmentFactory(course_id=self.course.id)
student = enrollment.user
outbox = self.get_outbox()
self.assertEqual(outbox, [])
# student is member of CCX:
CcxMembershipFactory(ccx=ccx, student=student)
url = reverse(
'ccx_invite',
kwargs={'course_id': self.course.id.to_deprecated_string()}
)
data = {
'enrollment-button': 'Unenroll',
'student-ids': u','.join([student.email, ]), # pylint: disable=no-member
'email-students': 'Notify-students-by-email',
}
response = self.client.post(url, data=data, follow=True)
self.assertEqual(response.status_code, 200)
# we were redirected to our current location
self.assertEqual(len(response.redirect_chain), 1)
self.assertTrue(302 in response.redirect_chain[0])
self.assertEqual(len(outbox), 1)
self.assertTrue(student.email in outbox[0].recipients()) # pylint: disable=no-member
# the membership for this student is gone
self.assertFalse(
CcxMembership.objects.filter(ccx=ccx, student=student).exists()
)
def test_enroll_non_user_student(self):
"""enroll a list of students who are not users yet
"""
test_email = "nobody@nowhere.com"
self.make_coach()
ccx = self.make_ccx()
outbox = self.get_outbox()
self.assertEqual(outbox, [])
url = reverse(
'ccx_invite',
kwargs={'course_id': self.course.id.to_deprecated_string()}
)
data = {
'enrollment-button': 'Enroll',
'student-ids': u','.join([test_email, ]),
'email-students': 'Notify-students-by-email',
}
response = self.client.post(url, data=data, follow=True)
self.assertEqual(response.status_code, 200)
# we were redirected to our current location
self.assertEqual(len(response.redirect_chain), 1)
self.assertTrue(302 in response.redirect_chain[0])
self.assertEqual(len(outbox), 1)
self.assertTrue(test_email in outbox[0].recipients())
self.assertTrue(
CcxFutureMembership.objects.filter(
ccx=ccx, email=test_email
).exists()
)
def test_unenroll_non_user_student(self):
"""unenroll a list of students who are not users yet
"""
test_email = "nobody@nowhere.com"
self.make_coach()
ccx = self.make_ccx()
outbox = self.get_outbox()
CcxFutureMembershipFactory(ccx=ccx, email=test_email)
self.assertEqual(outbox, [])
url = reverse(
'ccx_invite',
kwargs={'course_id': self.course.id.to_deprecated_string()}
)
data = {
'enrollment-button': 'Unenroll',
'student-ids': u','.join([test_email, ]),
'email-students': 'Notify-students-by-email',
}
response = self.client.post(url, data=data, follow=True)
self.assertEqual(response.status_code, 200)
# we were redirected to our current location
self.assertEqual(len(response.redirect_chain), 1)
self.assertTrue(302 in response.redirect_chain[0])
self.assertEqual(len(outbox), 1)
self.assertTrue(test_email in outbox[0].recipients())
self.assertFalse(
CcxFutureMembership.objects.filter(
ccx=ccx, email=test_email
).exists()
)
def test_manage_add_single_student(self):
"""enroll a single student who is a member of the class already
"""
self.make_coach()
ccx = self.make_ccx()
enrollment = CourseEnrollmentFactory(course_id=self.course.id)
student = enrollment.user
# no emails have been sent so far
outbox = self.get_outbox()
self.assertEqual(outbox, [])
url = reverse(
'ccx_manage_student',
kwargs={'course_id': self.course.id.to_deprecated_string()}
)
data = {
'student-action': 'add',
'student-id': u','.join([student.email, ]), # pylint: disable=no-member
}
response = self.client.post(url, data=data, follow=True)
self.assertEqual(response.status_code, 200)
# we were redirected to our current location
self.assertEqual(len(response.redirect_chain), 1)
self.assertTrue(302 in response.redirect_chain[0])
self.assertEqual(outbox, [])
# a CcxMembership exists for this student
self.assertTrue(
CcxMembership.objects.filter(ccx=ccx, student=student).exists()
)
def test_manage_remove_single_student(self):
"""unenroll a single student who is a member of the class already
"""
self.make_coach()
ccx = self.make_ccx()
enrollment = CourseEnrollmentFactory(course_id=self.course.id)
student = enrollment.user
CcxMembershipFactory(ccx=ccx, student=student)
# no emails have been sent so far
outbox = self.get_outbox()
self.assertEqual(outbox, [])
url = reverse(
'ccx_manage_student',
kwargs={'course_id': self.course.id.to_deprecated_string()}
)
data = {
'student-action': 'revoke',
'student-id': u','.join([student.email, ]), # pylint: disable=no-member
}
response = self.client.post(url, data=data, follow=True)
self.assertEqual(response.status_code, 200)
# we were redirected to our current location
self.assertEqual(len(response.redirect_chain), 1)
self.assertTrue(302 in response.redirect_chain[0])
self.assertEqual(outbox, [])
# a CcxMembership exists for this student
self.assertFalse(
CcxMembership.objects.filter(ccx=ccx, student=student).exists()
)
GET_CHILDREN = XModuleMixin.get_children
def patched_get_children(self, usage_key_filter=None): # pylint: disable=missing-docstring
def iter_children(): # pylint: disable=missing-docstring
print self.__dict__
for child in GET_CHILDREN(self, usage_key_filter=usage_key_filter):
child._field_data_cache = {} # pylint: disable=protected-access
if not child.visible_to_staff_only:
yield child
return list(iter_children())
@attr('shard_1')
@override_settings(FIELD_OVERRIDE_PROVIDERS=(
'ccx.overrides.CustomCoursesForEdxOverrideProvider',))
@patch('xmodule.x_module.XModuleMixin.get_children', patched_get_children, spec=True)
class TestCCXGrades(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Tests for Custom Courses views.
"""
def setUp(self):
"""
Set up tests
"""
super(TestCCXGrades, self).setUp()
self.course = course = CourseFactory.create()
# Create instructor account
self.coach = coach = AdminFactory.create()
self.client.login(username=coach.username, password="test")
# Create a course outline
self.mooc_start = start = datetime.datetime(
2010, 5, 12, 2, 42, tzinfo=pytz.UTC)
chapter = ItemFactory.create(
start=start, parent=course, category='sequential')
sections = [
ItemFactory.create(
parent=chapter,
category="sequential",
metadata={'graded': True, 'format': 'Homework'})
for _ in xrange(4)]
role = CourseCcxCoachRole(self.course.id)
role.add_users(coach)
self.ccx = ccx = CcxFactory(course_id=self.course.id, coach=self.coach)
self.student = student = UserFactory.create()
CourseEnrollmentFactory.create(user=student, course_id=self.course.id)
CcxMembershipFactory(ccx=ccx, student=student, active=True)
for i, section in enumerate(sections):
for j in xrange(4):
item = ItemFactory.create(
parent=section,
category="problem",
data=StringResponseXMLFactory().build_xml(answer='foo'),
metadata={'rerandomize': 'always'}
)
StudentModuleFactory.create(
grade=1 if i < j else 0,
max_grade=1,
student=student,
course_id=self.course.id,
module_state_key=item.location
)
# Apparently the test harness doesn't use LmsFieldStorage, and I'm not
# sure if there's a way to poke the test harness to do so. So, we'll
# just inject the override field storage in this brute force manner.
OverrideFieldData.provider_classes = None
# pylint: disable=protected-access
for block in iter_blocks(course):
block._field_data = OverrideFieldData.wrap(coach, block._field_data)
new_cache = {'tabs': [], 'discussion_topics': []}
if 'grading_policy' in block._field_data_cache:
new_cache['grading_policy'] = block._field_data_cache['grading_policy']
block._field_data_cache = new_cache
def cleanup_provider_classes():
"""
After everything is done, clean up by un-doing the change to the
OverrideFieldData object that is done during the wrap method.
"""
OverrideFieldData.provider_classes = None
self.addCleanup(cleanup_provider_classes)
patch_context = patch('ccx.views.get_course_by_id')
get_course = patch_context.start()
get_course.return_value = course
self.addCleanup(patch_context.stop)
override_field_for_ccx(ccx, course, 'grading_policy', {
'GRADER': [
{'drop_count': 0,
'min_count': 2,
'short_label': 'HW',
'type': 'Homework',
'weight': 1}
],
'GRADE_CUTOFFS': {'Pass': 0.75},
})
override_field_for_ccx(
ccx, sections[-1], 'visible_to_staff_only', True)
@patch('ccx.views.render_to_response', intercept_renderer)
def test_gradebook(self):
url = reverse(
'ccx_gradebook',
kwargs={'course_id': self.course.id.to_deprecated_string()}
)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
student_info = response.mako_context['students'][0] # pylint: disable=no-member
self.assertEqual(student_info['grade_summary']['percent'], 0.5)
self.assertEqual(
student_info['grade_summary']['grade_breakdown'][0]['percent'],
0.5)
self.assertEqual(
len(student_info['grade_summary']['section_breakdown']), 4)
def test_grades_csv(self):
url = reverse(
'ccx_grades_csv',
kwargs={'course_id': self.course.id.to_deprecated_string()}
)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
headers, row = (
row.strip().split(',') for row in
response.content.strip().split('\n')
)
data = dict(zip(headers, row))
self.assertEqual(data['HW 01'], '0.75')
self.assertEqual(data['HW 02'], '0.5')
self.assertEqual(data['HW 03'], '0.25')
self.assertEqual(data['HW Avg'], '0.5')
self.assertTrue('HW 04' not in data)
@patch('courseware.views.render_to_response', intercept_renderer)
def test_student_progress(self):
patch_context = patch('courseware.views.get_course_with_access')
get_course = patch_context.start()
get_course.return_value = self.course
self.addCleanup(patch_context.stop)
self.client.login(username=self.student.username, password="test")
session = self.client.session
session[ACTIVE_CCX_KEY] = self.ccx.id # pylint: disable=no-member
session.save()
self.client.session.get(ACTIVE_CCX_KEY)
url = reverse(
'progress',
kwargs={'course_id': self.course.id.to_deprecated_string()}
)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
grades = response.mako_context['grade_summary'] # pylint: disable=no-member
self.assertEqual(grades['percent'], 0.5)
self.assertEqual(grades['grade_breakdown'][0]['percent'], 0.5)
self.assertEqual(len(grades['section_breakdown']), 4)
@attr('shard_1')
class TestSwitchActiveCCX(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""Verify the view for switching which CCX is active, if any
"""
def setUp(self):
super(TestSwitchActiveCCX, self).setUp()
self.course = course = CourseFactory.create()
coach = AdminFactory.create()
role = CourseCcxCoachRole(course.id)
role.add_users(coach)
self.ccx = CcxFactory(course_id=course.id, coach=coach)
enrollment = CourseEnrollmentFactory.create(course_id=course.id)
self.user = enrollment.user
self.target_url = reverse(
'course_root', args=[course.id.to_deprecated_string()]
)
def register_user_in_ccx(self, active=False):
"""create registration of self.user in self.ccx
registration will be inactive unless active=True
"""
CcxMembershipFactory(ccx=self.ccx, student=self.user, active=active)
def revoke_ccx_registration(self):
"""
delete membership
"""
membership = CcxMembership.objects.filter(
ccx=self.ccx, student=self.user
)
membership.delete()
def verify_active_ccx(self, request, id=None): # pylint: disable=redefined-builtin, invalid-name
"""verify that we have the correct active ccx"""
if id:
id = str(id)
self.assertEqual(id, request.session.get(ACTIVE_CCX_KEY, None))
def test_unauthorized_cannot_switch_to_ccx(self):
switch_url = reverse(
'switch_active_ccx',
args=[self.course.id.to_deprecated_string(), self.ccx.id]
)
response = self.client.get(switch_url)
self.assertEqual(response.status_code, 302)
def test_unauthorized_cannot_switch_to_mooc(self):
switch_url = reverse(
'switch_active_ccx',
args=[self.course.id.to_deprecated_string()]
)
response = self.client.get(switch_url)
self.assertEqual(response.status_code, 302)
def test_enrolled_inactive_user_cannot_select_ccx(self):
self.register_user_in_ccx(active=False)
self.client.login(username=self.user.username, password="test")
switch_url = reverse(
'switch_active_ccx',
args=[self.course.id.to_deprecated_string(), self.ccx.id]
)
response = self.client.get(switch_url)
self.assertEqual(response.status_code, 302)
self.assertTrue(response.get('Location', '').endswith(self.target_url)) # pylint: disable=no-member
# if the ccx were active, we'd need to pass the ID of the ccx here.
self.verify_active_ccx(self.client)
def test_enrolled_user_can_select_ccx(self):
self.register_user_in_ccx(active=True)
self.client.login(username=self.user.username, password="test")
switch_url = reverse(
'switch_active_ccx',
args=[self.course.id.to_deprecated_string(), self.ccx.id]
)
response = self.client.get(switch_url)
self.assertEqual(response.status_code, 302)
self.assertTrue(response.get('Location', '').endswith(self.target_url)) # pylint: disable=no-member
self.verify_active_ccx(self.client, self.ccx.id)
def test_enrolled_user_can_select_mooc(self):
self.register_user_in_ccx(active=True)
self.client.login(username=self.user.username, password="test")
# pre-seed the session with the ccx id
session = self.client.session
session[ACTIVE_CCX_KEY] = str(self.ccx.id)
session.save()
switch_url = reverse(
'switch_active_ccx',
args=[self.course.id.to_deprecated_string()]
)
response = self.client.get(switch_url)
self.assertEqual(response.status_code, 302)
self.assertTrue(response.get('Location', '').endswith(self.target_url)) # pylint: disable=no-member
self.verify_active_ccx(self.client)
def test_unenrolled_user_cannot_select_ccx(self):
self.client.login(username=self.user.username, password="test")
switch_url = reverse(
'switch_active_ccx',
args=[self.course.id.to_deprecated_string(), self.ccx.id]
)
response = self.client.get(switch_url)
self.assertEqual(response.status_code, 302)
self.assertTrue(response.get('Location', '').endswith(self.target_url)) # pylint: disable=no-member
# if the ccx were active, we'd need to pass the ID of the ccx here.
self.verify_active_ccx(self.client)
def test_unenrolled_user_switched_to_mooc(self):
self.client.login(username=self.user.username, password="test")
# pre-seed the session with the ccx id
session = self.client.session
session[ACTIVE_CCX_KEY] = str(self.ccx.id)
session.save()
switch_url = reverse(
'switch_active_ccx',
args=[self.course.id.to_deprecated_string(), self.ccx.id]
)
response = self.client.get(switch_url)
self.assertEqual(response.status_code, 302)
self.assertTrue(response.get('Location', '').endswith(self.target_url)) # pylint: disable=no-member
# we tried to select the ccx but are not registered, so we are switched
# back to the mooc view
self.verify_active_ccx(self.client)
def test_unassociated_course_and_ccx_not_selected(self):
new_course = CourseFactory.create()
self.client.login(username=self.user.username, password="test")
expected_url = reverse(
'course_root', args=[new_course.id.to_deprecated_string()]
)
# the ccx and the course are not related.
switch_url = reverse(
'switch_active_ccx',
args=[new_course.id.to_deprecated_string(), self.ccx.id]
)
response = self.client.get(switch_url)
self.assertEqual(response.status_code, 302)
self.assertTrue(response.get('Location', '').endswith(expected_url)) # pylint: disable=no-member
# the mooc should be active
self.verify_active_ccx(self.client)
def test_missing_ccx_cannot_be_selected(self):
self.register_user_in_ccx()
self.client.login(username=self.user.username, password="test")
switch_url = reverse(
'switch_active_ccx',
args=[self.course.id.to_deprecated_string(), self.ccx.id]
)
# delete the ccx
self.ccx.delete() # pylint: disable=no-member
response = self.client.get(switch_url)
self.assertEqual(response.status_code, 302)
self.assertTrue(response.get('Location', '').endswith(self.target_url)) # pylint: disable=no-member
# we tried to select the ccx it doesn't exist anymore, so we are
# switched back to the mooc view
self.verify_active_ccx(self.client)
def test_revoking_ccx_membership_revokes_active_ccx(self):
self.register_user_in_ccx(active=True)
self.client.login(username=self.user.username, password="test")
# ensure ccx is active in the request session
switch_url = reverse(
'switch_active_ccx',
args=[self.course.id.to_deprecated_string(), self.ccx.id]
)
self.client.get(switch_url)
self.verify_active_ccx(self.client, self.ccx.id)
# unenroll the user from the ccx
self.revoke_ccx_registration()
# request the course root and verify that the ccx is not active
self.client.get(self.target_url)
self.verify_active_ccx(self.client)
def flatten(seq):
"""
For [[1, 2], [3, 4]] returns [1, 2, 3, 4]. Does not recurse.
"""
return [x for sub in seq for x in sub]
def iter_blocks(course):
"""
Returns an iterator over all of the blocks in a course.
"""
def visit(block):
""" get child blocks """
yield block
for child in block.get_children():
for descendant in visit(child): # wish they'd backport yield from
yield descendant
return visit(course)
|
antonve/s4-project-mooc
|
lms/djangoapps/ccx/tests/test_views.py
|
Python
|
agpl-3.0
| 30,393
|
[
"VisIt"
] |
aa178dde99606ec5205d7ba70a2215b524a030383a8f790a3d15368286c80023
|
#!/usr/bin/env python
'''
Master loader for CANON October (Fall) Campaign 2020
'''
import os
import sys
from datetime import datetime
parentDir = os.path.join(os.path.dirname(__file__), "../")
sys.path.insert(0, parentDir)
from CANON import CANONLoader
import timing
cl = CANONLoader('stoqs_canon_october2020', 'CANON - October 2020',
description='October 2020 shipless campaign in Monterey Bay (CN20F)',
x3dTerrains={
'https://stoqs.mbari.org/x3d/Monterey25_10x/Monterey25_10x_scene.x3d': {
'name': 'Monterey25_10x',
'position': '-2822317.31255 -4438600.53640 3786150.85474',
'orientation': '0.89575 -0.31076 -0.31791 1.63772',
'centerOfRotation': '-2711557.9403829873 -4331414.329506527 3801353.4691465236',
'VerticalExaggeration': '10',
},
},
grdTerrain=os.path.join(parentDir, 'Monterey25.grd')
)
startdate = datetime(2020, 10, 4)
enddate = datetime(2020, 10, 24)
# default location of thredds and dods data:
cl.tdsBase = 'http://odss.mbari.org/thredds/'
cl.dodsBase = cl.tdsBase + 'dodsC/'
######################################################################
# GLIDERS
######################################################################
# Glider data files from CeNCOOS thredds server
# L_662a updated parameter names in netCDF file
cl.l_662a_base = 'http://legacy.cencoos.org/thredds/dodsC/gliders/Line67/'
cl.l_662a_files = [ 'OS_Glider_L_662_20200615_TS.nc', ]
cl.l_662a_parms = ['temperature', 'salinity', 'fluorescence','oxygen']
cl.l_662a_startDatetime = startdate
cl.l_662a_endDatetime = enddate
# NPS_34 ##
cl.nps34_base = 'http://legacy.cencoos.org/thredds/dodsC/gliders/MBARI/'
cl.nps34_files = [ 'OS_Glider_NPS_G34_20201006_TS.nc' ]
cl.nps34_parms = ['TEMP', 'PSAL', 'FLU2', 'OXYG']
cl.nps34_startDatetime = startdate
cl.nps34_endDatetime = enddate
# NPS_29 ##
cl.nps29_base = 'http://legacy.cencoos.org/thredds/dodsC/gliders/MBARI/'
cl.nps29_files = [ 'OS_Glider_NPS_G29_20201006_TS.nc' ]
cl.nps29_parms = ['TEMP', 'PSAL', 'FLU2', 'OXYG']
cl.nps29_startDatetime = startdate
cl.nps29_endDatetime = enddate
######################################################################
# Wavegliders
######################################################################
# WG Tex - All instruments combined into one file - one time coordinate
##cl.wg_tex_base = cl.dodsBase + 'CANON_september2013/Platforms/Gliders/WG_Tex/final/'
##cl.wg_tex_files = [ 'WG_Tex_all_final.nc' ]
##cl.wg_tex_parms = [ 'wind_dir', 'wind_spd', 'atm_press', 'air_temp', 'water_temp', 'sal', 'density', 'bb_470', 'bb_650', 'chl' ]
##cl.wg_tex_startDatetime = startdate
##cl.wg_tex_endDatetime = enddate
# WG Hansen - All instruments combined into one file - one time coordinate
cl.wg_Hansen_base = 'http://dods.mbari.org/opendap/data/waveglider/deployment_data/'
cl.wg_Hansen_files = [
'wgHansen/20201005/realTime/20201005.nc'
]
cl.wg_Hansen_parms = [ 'wind_dir', 'avg_wind_spd', 'max_wind_spd', 'atm_press', 'air_temp', 'water_temp_float', 'sal_float', 'water_temp_sub',
'sal_sub', 'bb_470', 'bb_650', 'chl', 'beta_470', 'beta_650', 'pH', 'O2_conc_float','O2_conc_sub' ] # two ctds (_float, _sub), no CO2
cl.wg_Hansen_depths = [ 0 ]
cl.wg_Hansen_startDatetime = startdate
cl.wg_Hansen_endDatetime = enddate
# WG Tiny - All instruments combined into one file - one time coordinate
cl.wg_Tiny_base = 'http://dods.mbari.org/opendap/data/waveglider/deployment_data/'
cl.wg_Tiny_files = [
'wgTiny/20201006/realTime/20201005.nc'
]
cl.wg_Tiny_parms = [ 'wind_dir', 'avg_wind_spd', 'max_wind_spd', 'atm_press', 'air_temp', 'water_temp', 'sal', 'bb_470', 'bb_650', 'chl',
'beta_470', 'beta_650', 'pCO2_water', 'pCO2_air', 'pH', 'O2_conc' ]
cl.wg_Tiny_depths = [ 0 ]
cl.wg_Tiny_startDatetime = startdate
cl.wg_Tiny_endDatetime = enddate
######################################################################
# MOORINGS
######################################################################
cl.m1_base = 'http://dods.mbari.org/opendap/data/ssdsdata/deployments/m1/'
cl.m1_files = [
'202008/OS_M1_20200825hourly_CMSTV.nc', ]
cl.m1_parms = [
'eastward_sea_water_velocity_HR', 'northward_sea_water_velocity_HR',
'SEA_WATER_SALINITY_HR', 'SEA_WATER_TEMPERATURE_HR', 'SW_FLUX_HR', 'AIR_TEMPERATURE_HR',
'EASTWARD_WIND_HR', 'NORTHWARD_WIND_HR', 'WIND_SPEED_HR'
]
cl.m1_startDatetime = startdate
cl.m1_endDatetime = enddate
# Execute the load
cl.process_command_line()
if cl.args.test:
cl.stride = 10
elif cl.args.stride:
cl.stride = cl.args.stride
cl.loadM1()
##cl.loadL_662a()
cl.load_NPS29()
cl.load_NPS34()
cl.load_wg_Tiny()
cl.load_wg_Hansen()
# Problem with loading both temperature & salinity - for now load just one of them
#_cl.makai_base = 'http://dods.mbari.org/opendap/data/lrauv/makai/realtime/sbdlogs/2020/202010/'
#_cl.makai_files = ['20201008T014813/shore_i.nc']
#_cl.makai_parms = ['chlorophyll', 'temperature', 'salinity']
#_cl.loadLRAUV('makai', critSimpleDepthTime=0.1, build_attrs=False)
# Previously "fixed" load
#_cl.whoidhs_base = 'http://dods.mbari.org/opendap/data/lrauv/whoidhs/realtime/sbdlogs/2019/201906/'
#_cl.whoidhs_files = ['20190612T024430/shore_i.nc']
#_cl.whoidhs_parms = ['concentration_of_chromophoric_dissolved_organic_matter_in_sea_water', 'mass_concentration_of_chlorophyll_in_sea_water', ]
##cl.whoidhs_parms = ['concentration_of_chromophoric_dissolved_organic_matter_in_sea_water']
##cl.whoidhs_parms = ['mass_concentration_of_chlorophyll_in_sea_water', ]
#_cl.loadLRAUV('whoidhs', critSimpleDepthTime=0.1, build_attrs=False)
##cl.loadLRAUV('makai', startdate, enddate, critSimpleDepthTime=0.1, sbd_logs=True,
## parameters=['chlorophyll', 'temperature'])
##cl.loadLRAUV('pontus', startdate, enddate, critSimpleDepthTime=0.1, sbd_logs=True,
## parameters=['chlorophyll', 'temperature'])
cl.loadLRAUV('makai', startdate, enddate)
cl.loadLRAUV('pontus', startdate, enddate)
cl.loadDorado(startdate, enddate, build_attrs=True)
##cl.loadSubSamples()
# Add any X3D Terrain information specified in the constructor to the database - must be done after a load is executed
cl.addTerrainResources()
print("All Done.")
|
duane-edgington/stoqs
|
stoqs/loaders/CANON/loadCANON_october2020.py
|
Python
|
gpl-3.0
| 6,445
|
[
"NetCDF"
] |
21c7fc23be905e1485b79b5ae6a6de5281814b9a387605f4ff7018337343f08a
|
# Lint as: python2, python3
# Copyright 2020 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions related to preprocessing inputs."""
import numpy as np
from six.moves import range
from six.moves import zip
import tensorflow as tf
def flip_dim(tensor_list, prob=0.5, dim=1):
"""Randomly flips a dimension of the given tensor.
The decision to randomly flip the `Tensors` is made together. In other words,
all or none of the images pass in are flipped.
Note that tf.random_flip_left_right and tf.random_flip_up_down isn't used so
that we can control for the probability as well as ensure the same decision
is applied across the images.
Args:
tensor_list: A list of `Tensors` with the same number of dimensions.
prob: The probability of a left-right flip.
dim: The dimension to flip, 0, 1, ..
Returns:
outputs: A list of the possibly flipped `Tensors` as well as an indicator
`Tensor` at the end whose value is `True` if the inputs were flipped and
`False` otherwise.
Raises:
ValueError: If dim is negative or greater than the dimension of a `Tensor`.
"""
random_value = tf.random.uniform([])
def flip():
flipped = []
for tensor in tensor_list:
if dim < 0 or dim >= len(tensor.get_shape().as_list()):
raise ValueError('dim must represent a valid dimension.')
flipped.append(tf.compat.v1.reverse_v2(tensor, [dim]))
return flipped
is_flipped = tf.less_equal(random_value, prob)
outputs = tf.cond(is_flipped, flip, lambda: tensor_list)
if not isinstance(outputs, (list, tuple)):
outputs = [outputs]
outputs.append(is_flipped)
return outputs
def _image_dimensions(image, rank):
"""Returns the dimensions of an image tensor.
Args:
image: A rank-D Tensor. For 3-D of shape: `[height, width, channels]`.
rank: The expected rank of the image
Returns:
A list of corresponding to the dimensions of the input image. Dimensions
that are statically known are python integers, otherwise they are integer
scalar tensors.
"""
if image.get_shape().is_fully_defined():
return image.get_shape().as_list()
else:
static_shape = image.get_shape().with_rank(rank).as_list()
dynamic_shape = tf.unstack(tf.shape(image), rank)
return [
s if s is not None else d for s, d in zip(static_shape, dynamic_shape)
]
def get_label_resize_method(label):
"""Returns the resize method of labels depending on label dtype.
Args:
label: Groundtruth label tensor.
Returns:
tf.image.ResizeMethod.BILINEAR, if label dtype is floating.
tf.image.ResizeMethod.NEAREST_NEIGHBOR, if label dtype is integer.
Raises:
ValueError: If label is neither floating nor integer.
"""
if label.dtype.is_floating:
return tf.image.ResizeMethod.BILINEAR
elif label.dtype.is_integer:
return tf.image.ResizeMethod.NEAREST_NEIGHBOR
else:
raise ValueError('Label type must be either floating or integer.')
def pad_to_bounding_box(image, offset_height, offset_width, target_height,
target_width, pad_value):
"""Pads the given image with the given pad_value.
Works like tf.image.pad_to_bounding_box, except it can pad the image
with any given arbitrary pad value and also handle images whose sizes are not
known during graph construction.
Args:
image: 3-D tensor with shape [height, width, channels]
offset_height: Number of rows of zeros to add on top.
offset_width: Number of columns of zeros to add on the left.
target_height: Height of output image.
target_width: Width of output image.
pad_value: Value to pad the image tensor with.
Returns:
3-D tensor of shape [target_height, target_width, channels].
Raises:
ValueError: If the shape of image is incompatible with the offset_* or
target_* arguments.
"""
with tf.compat.v1.name_scope(None, 'pad_to_bounding_box', [image]):
image = tf.convert_to_tensor(image, name='image')
original_dtype = image.dtype
if original_dtype != tf.float32 and original_dtype != tf.float64:
# If image dtype is not float, we convert it to int32 to avoid overflow.
image = tf.cast(image, tf.int32)
image_rank_assert = tf.Assert(
tf.logical_or(
tf.equal(tf.rank(image), 3),
tf.equal(tf.rank(image), 4)),
['Wrong image tensor rank.'])
with tf.control_dependencies([image_rank_assert]):
image -= pad_value
image_shape = image.get_shape()
is_batch = True
if image_shape.ndims == 3:
is_batch = False
image = tf.expand_dims(image, 0)
elif image_shape.ndims is None:
is_batch = False
image = tf.expand_dims(image, 0)
image.set_shape([None] * 4)
elif image.get_shape().ndims != 4:
raise ValueError('Input image must have either 3 or 4 dimensions.')
_, height, width, _ = _image_dimensions(image, rank=4)
target_width_assert = tf.Assert(
tf.greater_equal(
target_width, width),
['target_width must be >= width'])
target_height_assert = tf.Assert(
tf.greater_equal(target_height, height),
['target_height must be >= height'])
with tf.control_dependencies([target_width_assert]):
after_padding_width = target_width - offset_width - width
with tf.control_dependencies([target_height_assert]):
after_padding_height = target_height - offset_height - height
offset_assert = tf.Assert(
tf.logical_and(
tf.greater_equal(after_padding_width, 0),
tf.greater_equal(after_padding_height, 0)),
['target size not possible with the given target offsets'])
batch_params = tf.stack([0, 0])
height_params = tf.stack([offset_height, after_padding_height])
width_params = tf.stack([offset_width, after_padding_width])
channel_params = tf.stack([0, 0])
with tf.control_dependencies([offset_assert]):
paddings = tf.stack([batch_params, height_params, width_params,
channel_params])
padded = tf.pad(image, paddings)
if not is_batch:
padded = tf.squeeze(padded, axis=[0])
outputs = padded + pad_value
if outputs.dtype != original_dtype:
outputs = tf.cast(outputs, original_dtype)
return outputs
def _crop(image, offset_height, offset_width, crop_height, crop_width):
"""Crops the given image using the provided offsets and sizes.
Note that the method doesn't assume we know the input image size but it does
assume we know the input image rank.
Args:
image: an image of shape [height, width, channels].
offset_height: a scalar tensor indicating the height offset.
offset_width: a scalar tensor indicating the width offset.
crop_height: the height of the cropped image.
crop_width: the width of the cropped image.
Returns:
The cropped (and resized) image.
Raises:
ValueError: if `image` doesn't have rank of 3.
InvalidArgumentError: if the rank is not 3 or if the image dimensions are
less than the crop size.
"""
original_shape = tf.shape(image)
if len(image.get_shape().as_list()) != 3:
raise ValueError('input must have rank of 3')
original_channels = image.get_shape().as_list()[2]
rank_assertion = tf.Assert(
tf.equal(tf.rank(image), 3),
['Rank of image must be equal to 3.'])
with tf.control_dependencies([rank_assertion]):
cropped_shape = tf.stack([crop_height, crop_width, original_shape[2]])
size_assertion = tf.Assert(
tf.logical_and(
tf.greater_equal(original_shape[0], crop_height),
tf.greater_equal(original_shape[1], crop_width)),
['Crop size greater than the image size.'])
offsets = tf.cast(tf.stack([offset_height, offset_width, 0]), tf.int32)
# Use tf.slice instead of crop_to_bounding box as it accepts tensors to
# define the crop size.
with tf.control_dependencies([size_assertion]):
image = tf.slice(image, offsets, cropped_shape)
image = tf.reshape(image, cropped_shape)
image.set_shape([crop_height, crop_width, original_channels])
return image
def random_crop(image_list, crop_height, crop_width):
"""Crops the given list of images.
The function applies the same crop to each image in the list. This can be
effectively applied when there are multiple image inputs of the same
dimension such as:
image, depths, normals = random_crop([image, depths, normals], 120, 150)
Args:
image_list: a list of image tensors of the same dimension but possibly
varying channel.
crop_height: the new height.
crop_width: the new width.
Returns:
the image_list with cropped images.
Raises:
ValueError: if there are multiple image inputs provided with different size
or the images are smaller than the crop dimensions.
"""
if not image_list:
raise ValueError('Empty image_list.')
# Compute the rank assertions.
rank_assertions = []
for i in range(len(image_list)):
image_rank = tf.rank(image_list[i])
rank_assert = tf.Assert(
tf.equal(image_rank, 3), [
'Wrong rank for tensor %d in image_list [expected] [actual]', i, 3,
image_rank
])
rank_assertions.append(rank_assert)
with tf.control_dependencies([rank_assertions[0]]):
image_shape = tf.shape(image_list[0])
image_height = image_shape[0]
image_width = image_shape[1]
crop_size_assert = tf.Assert(
tf.logical_and(
tf.greater_equal(image_height, crop_height),
tf.greater_equal(image_width, crop_width)),
['Crop size greater than the image size.'])
asserts = [rank_assertions[0], crop_size_assert]
for i in range(1, len(image_list)):
image = image_list[i]
asserts.append(rank_assertions[i])
with tf.control_dependencies([rank_assertions[i]]):
shape = tf.shape(image)
height = shape[0]
width = shape[1]
height_assert = tf.Assert(
tf.equal(height, image_height), [
'Wrong height for tensor %d in image_list [expected][actual]', i,
height, image_height
])
width_assert = tf.Assert(
tf.equal(width, image_width), [
'Wrong width for tensor %d in image_list [expected][actual]', i,
width, image_width
])
asserts.extend([height_assert, width_assert])
# Create a random bounding box.
#
# Use tf.random.uniform and not numpy.random.rand as doing the former would
# generate random numbers at graph eval time, unlike the latter which
# generates random numbers at graph definition time.
with tf.control_dependencies(asserts):
max_offset_height = tf.reshape(image_height - crop_height + 1, [])
max_offset_width = tf.reshape(image_width - crop_width + 1, [])
offset_height = tf.random.uniform([],
maxval=max_offset_height,
dtype=tf.int32)
offset_width = tf.random.uniform([], maxval=max_offset_width, dtype=tf.int32)
return [_crop(image, offset_height, offset_width,
crop_height, crop_width) for image in image_list]
def get_random_scale(min_scale_factor, max_scale_factor, step_size):
"""Gets a random scale value.
Args:
min_scale_factor: Minimum scale value.
max_scale_factor: Maximum scale value.
step_size: The step size from minimum to maximum value.
Returns:
A tensor with random scale value selected between minimum and maximum value.
If `min_scale_factor` and `max_scale_factor` are the same, a number is
returned instead.
Raises:
ValueError: min_scale_factor has unexpected value.
"""
if min_scale_factor < 0 or min_scale_factor > max_scale_factor:
raise ValueError('Unexpected value of min_scale_factor.')
if min_scale_factor == max_scale_factor:
return np.float32(min_scale_factor)
# When step_size = 0, we sample the value uniformly from [min, max).
if step_size == 0:
return tf.random.uniform([1],
minval=min_scale_factor,
maxval=max_scale_factor)
# When step_size != 0, we randomly select one discrete value from [min, max].
num_steps = int((max_scale_factor - min_scale_factor) / step_size + 1)
scale_factors = tf.lin_space(min_scale_factor, max_scale_factor,
num_steps)
shuffled_scale_factors = tf.compat.v1.random_shuffle(scale_factors)
return shuffled_scale_factors[0]
def randomly_scale_image_and_label(image, label=None, scale=1.0):
"""Randomly scales image and label.
Args:
image: Image with shape [height, width, 3].
label: Label with shape [height, width, 1].
scale: The value to scale image and label.
Returns:
Scaled image and label.
"""
# No random scaling if scale == 1.
if scale == 1.0:
return image, label
image_shape = tf.shape(image)
new_dim = tf.cast(
tf.cast([image_shape[0], image_shape[1]], tf.float32) * scale,
tf.int32)
# Need squeeze and expand_dims because image interpolation takes
# 4D tensors as input.
## tf1 op without anti-aliasing
# image = tf.squeeze(
# tf.compat.v1.image.resize_bilinear(
# tf.expand_dims(image, 0), new_dim, align_corners=True), [0])
## tf2 op with anti-aliasing
image = tf.compat.v2.image.resize(
image, new_dim, method='bilinear', antialias=True)
if label is not None:
label = tf.compat.v1.image.resize(
label,
new_dim,
method=get_label_resize_method(label),
align_corners=True)
return image, label
def resolve_shape(tensor, rank=None, scope=None):
"""Fully resolves the shape of a Tensor.
Use as much as possible the shape components already known during graph
creation and resolve the remaining ones during runtime.
Args:
tensor: Input tensor whose shape we query.
rank: The rank of the tensor, provided that we know it.
scope: Optional name scope.
Returns:
shape: The full shape of the tensor.
"""
with tf.compat.v1.name_scope(scope, 'resolve_shape', [tensor]):
if rank is not None:
shape = tensor.get_shape().with_rank(rank).as_list()
else:
shape = tensor.get_shape().as_list()
if None in shape:
shape_dynamic = tf.shape(tensor)
for i in range(len(shape)):
if shape[i] is None:
shape[i] = shape_dynamic[i]
return shape
def resize_to_range_helper(input_shape, min_size, max_size=None, factor=None,
keep_aspect_ratio=True):
"""Determines output size in specified range.
Adapted from //image/understanding/object_detection/core/preprocessor.py
The output size can be described by two cases:
1. If the image can be rescaled so its minimum size is equal to min_size
without the other side exceeding max_size, then do so.
2. Otherwise, resize so the largest side is equal to max_size.
An integer in `range(factor)` is added to the computed sides so that the
final dimensions are multiples of `factor` plus one.
Args:
input_shape: A 2-element list with the [height, width] of the input image.
min_size: (scalar) desired size of the smaller image side.
max_size: (optional) (scalar) maximum allowed size of the larger image
side.
factor: Make output size multiple of factor plus one.
keep_aspect_ratio: Boolean, keep aspect ratio or not. If True, the input
will be resized while keeping the original aspect ratio. If False, the
input will be resized to [max_resize_value, max_resize_value] without
keeping the original aspect ratio.
Returns:
A 1-D tensor containing the [new_height, new_width].
"""
input_height, input_width = input_shape
input_height = tf.cast(input_height, tf.float32)
input_width = tf.cast(input_width, tf.float32)
input_min_size = tf.minimum(input_height, input_width)
# Calculate the larger of the possible sizes
min_size = tf.cast(min_size, tf.float32)
large_scale_factor = min_size / input_min_size
large_height = tf.cast(tf.floor(input_height * large_scale_factor), tf.int32)
large_width = tf.cast(tf.floor(input_width * large_scale_factor), tf.int32)
large_size = tf.stack([large_height, large_width])
if max_size is not None:
# Calculate the smaller of the possible sizes, use that if the larger
# is too big.
input_max_size = tf.maximum(input_height, input_width)
max_size = tf.cast(max_size, tf.float32)
small_scale_factor = max_size / input_max_size
small_height = tf.cast(
tf.floor(input_height * small_scale_factor), tf.int32)
small_width = tf.cast(tf.floor(input_width * small_scale_factor), tf.int32)
small_size = tf.stack([small_height, small_width])
output_shape = tf.cond(
tf.cast(tf.reduce_max(large_size), tf.float32) > max_size,
lambda: small_size,
lambda: large_size)
else:
output_shape = large_size
# Ensure that both output sides are multiples of factor plus one.
if factor is not None:
output_shape += (factor - (output_shape - 1) % factor) % factor
if not keep_aspect_ratio:
# If not keep the aspect ratio, we resize everything to max_size, allowing
# us to do pre-processing without extra padding.
output_shape = [tf.reduce_max(output_shape), tf.reduce_max(output_shape)]
return output_shape
def resize_to_range(image,
label=None,
min_size=None,
max_size=None,
factor=None,
keep_aspect_ratio=True,
align_corners=True,
label_layout_is_chw=False,
scope=None,
method=tf.image.ResizeMethod.BILINEAR):
"""Resizes image or label so their sides are within the provided range.
The output size can be described by two cases:
1. If the image can be rescaled so its minimum size is equal to min_size
without the other side exceeding max_size, then do so.
2. Otherwise, resize so the largest side is equal to max_size.
An integer in `range(factor)` is added to the computed sides so that the
final dimensions are multiples of `factor` plus one.
Args:
image: A 3D tensor of shape [height, width, channels].
label: (optional) A 3D tensor of shape [height, width, channels] (default)
or [channels, height, width] when label_layout_is_chw = True.
min_size: (scalar) desired size of the smaller image side.
max_size: (scalar) maximum allowed size of the larger image side. Note
that the output dimension is no larger than max_size and may be slightly
smaller than max_size when factor is not None.
factor: Make output size multiple of factor plus one.
keep_aspect_ratio: Boolean, keep aspect ratio or not. If True, the input
will be resized while keeping the original aspect ratio. If False, the
input will be resized to [max_resize_value, max_resize_value] without
keeping the original aspect ratio.
align_corners: If True, exactly align all 4 corners of input and output.
label_layout_is_chw: If true, the label has shape [channel, height, width].
We support this case because for some instance segmentation dataset, the
instance segmentation is saved as [num_instances, height, width].
scope: Optional name scope.
method: Image resize method. Defaults to tf.image.ResizeMethod.BILINEAR.
Returns:
A 3-D tensor of shape [new_height, new_width, channels], where the image
has been resized (with the specified method) so that
min(new_height, new_width) == ceil(min_size) or
max(new_height, new_width) == ceil(max_size).
Raises:
ValueError: If the image is not a 3D tensor.
"""
with tf.compat.v1.name_scope(scope, 'resize_to_range', [image]):
new_tensor_list = []
min_size = tf.cast(min_size, tf.float32)
if max_size is not None:
max_size = tf.cast(max_size, tf.float32)
# Modify the max_size to be a multiple of factor plus 1 and make sure the
# max dimension after resizing is no larger than max_size.
if factor is not None:
max_size = (max_size - (max_size - 1) % factor)
[orig_height, orig_width, _] = resolve_shape(image, rank=3)
new_size = resize_to_range_helper(input_shape=[orig_height, orig_width],
min_size=min_size,
max_size=max_size,
factor=factor,
keep_aspect_ratio=keep_aspect_ratio)
new_tensor_list.append(tf.image.resize(
image, new_size, method=method, align_corners=align_corners))
if label is not None:
if label_layout_is_chw:
# Input label has shape [channel, height, width].
resized_label = tf.expand_dims(label, 3)
resized_label = tf.image.resize(
resized_label,
new_size,
method=get_label_resize_method(label),
align_corners=align_corners)
resized_label = tf.squeeze(resized_label, 3)
else:
# Input label has shape [height, width, channel].
resized_label = tf.image.resize(
label,
new_size,
method=get_label_resize_method(label),
align_corners=align_corners)
new_tensor_list.append(resized_label)
else:
new_tensor_list.append(None)
return new_tensor_list
def gaussian_blur(image, kernel_size, sigma, padding='SAME'):
"""Blurs the image with separable convolution.
Args:
image: Tensor of shape [height, width, channels], dtype float
kernel_size: kernel size of the filter
sigma: Sigma value for the Gaussian (std)
padding: Padding mode for the convolution. 'SAME' or 'VALID'
Returns:
outputs: A list of the possibly flipped `Tensors` as well as an indicator
`Tensor` at the end whose value is `True` if the inputs were flipped and
`False` otherwise.
"""
radius = tf.to_int32(kernel_size / 2)
kernel_size = radius * 2 + 1
x = tf.to_float(tf.range(-radius, radius + 1))
blur_filter = tf.exp(
-tf.pow(x, 2.0) / (2.0 * tf.pow(tf.to_float(sigma), 2.0)))
blur_filter /= tf.reduce_sum(blur_filter)
# One vertical and one horizontal filter.
blur_v = tf.reshape(blur_filter, [kernel_size, 1, 1, 1])
blur_h = tf.reshape(blur_filter, [1, kernel_size, 1, 1])
num_channels = tf.shape(image)[-1]
blur_h = tf.tile(blur_h, [1, 1, num_channels, 1])
blur_v = tf.tile(blur_v, [1, 1, num_channels, 1])
expand_batch_dim = image.shape.ndims == 3
if expand_batch_dim:
# Tensorflow requires batched input to convolutions, which we can fake with
# an extra dimension.
image = tf.expand_dims(image, axis=0)
blurred = tf.nn.depthwise_conv2d(
image, blur_h, strides=[1, 1, 1, 1], padding=padding)
blurred = tf.nn.depthwise_conv2d(
blurred, blur_v, strides=[1, 1, 1, 1], padding=padding)
if expand_batch_dim:
blurred = tf.squeeze(blurred, axis=0)
return blurred
def random_gaussian_blur(image, prob=0.5):
"""Randomly blur an image.
Args:
image: Tensor
prob: probability to apply Gaussian blur
Returns:
output: blurred image
"""
random_value = tf.random.uniform([])
is_blurred = tf.less_equal(random_value, prob)
## EfficientSeg style
sigma = tf.random.uniform([]) * 1.15 + 0.15
radius = tf.cast(sigma * 4.0 + 0.5, tf.int32)
kernel_size = radius * 2 + 1
blurred = gaussian_blur(image, kernel_size, sigma)
output = tf.cond(is_blurred, lambda: blurred, lambda: image)
return output
def color_jitter(image, brightness=0, contrast=0, saturation=0, hue=0):
"""Distorts the color of the image (jittering order is random).
Args:
image: The input image tensor. Must be in [0, 1]!
brightness: A float, specifying the brightness for color jitter.
contrast: A float, specifying the contrast for color jitter.
saturation: A float, specifying the saturation for color jitter.
hue: A float, specifying the hue for color jitter.
Returns:
The distorted image tensor.
"""
with tf.name_scope('distort_color'):
def apply_transform(i, x):
"""Apply the i-th transformation."""
def brightness_foo():
if brightness == 0:
return x
else:
return tf.image.random_brightness(x, max_delta=brightness)
def contrast_foo():
if contrast == 0:
return x
else:
return tf.image.random_contrast(x, lower=1-contrast, upper=1+contrast)
def saturation_foo():
if saturation == 0:
return x
else:
return tf.image.random_saturation(
x, lower=1-saturation, upper=1+saturation)
def hue_foo():
if hue == 0:
return x
else:
return tf.image.random_hue(x, max_delta=hue)
x = tf.cond(tf.less(i, 2),
lambda: tf.cond(tf.less(i, 1), brightness_foo, contrast_foo),
lambda: tf.cond(tf.less(i, 3), saturation_foo, hue_foo))
return x
perm = tf.random_shuffle(tf.range(4))
for i in range(4):
image = apply_transform(perm[i], image)
image = tf.clip_by_value(image, 0., 1.)
return image
def to_grayscale(image, keep_channels=True):
image = tf.image.rgb_to_grayscale(image)
if keep_channels:
image = tf.tile(image, [1, 1, 3])
return image
def random_color_jitter(image, prob=1.0):
"""Randomly do color jittering on the given image.
Args:
image: Tensor
prob: probability to apply color jittering
Returns:
output: blurred image
"""
brightness = 0.5
contrast = 0.5
saturation = 0.5
hue = 0.25
random_value = tf.random.uniform([])
is_jittered = tf.less_equal(random_value, prob)
jittered = color_jitter(image, brightness, contrast, saturation, hue)
output = tf.cond(is_jittered, lambda: jittered, lambda: image)
return output
def cutout_with_mask(image,
label,
pad_size,
mean_pixel,
ignore_label=255,
valid=None):
"""Apply cutout (https://arxiv.org/abs/1708.04552) to image.
This operation applies a (2*pad_size x 2*pad_size) mask of zeros to
a random location within `img`. The pixel values filled in will be of the
value `replace`. The located where the mask will be applied is randomly
chosen uniformly over the whole image.
Args:
image: An image Tensor of type float32.
label: An image Tensor of type int32.
pad_size: Specifies how big the zero mask that will be generated is that
is applied to the image. The mask will be of size
(2*pad_size x 2*pad_size).
mean_pixel: What pixel value to fill in the image in the area that has
the cutout mask applied to it.
ignore_label: What value to fill in the label in the area that has the
cutout mask applied to it.
Returns:
An image Tensor that is of type float32.
"""
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
# Sample the center location in the image where the zero mask will be applied.
cutout_center_height = tf.random_uniform(
shape=[], minval=0, maxval=image_height,
dtype=tf.int32)
cutout_center_width = tf.random_uniform(
shape=[], minval=0, maxval=image_width,
dtype=tf.int32)
lower_pad = tf.maximum(0, cutout_center_height - pad_size)
upper_pad = tf.maximum(0, image_height - cutout_center_height - pad_size)
left_pad = tf.maximum(0, cutout_center_width - pad_size)
right_pad = tf.maximum(0, image_width - cutout_center_width - pad_size)
cutout_shape = [image_height - (lower_pad + upper_pad),
image_width - (left_pad + right_pad)]
padding_dims = [[lower_pad, upper_pad], [left_pad, right_pad]]
mask = tf.pad(
tf.zeros(cutout_shape, dtype=image.dtype),
padding_dims, constant_values=1)
mask = tf.expand_dims(mask, -1)
label = tf.where(
tf.equal(mask, 0),
tf.ones_like(label, dtype=label.dtype) * ignore_label,
label)
im_mask = tf.tile(mask, [1, 1, 3])
image = tf.where(
tf.equal(im_mask, 0),
tf.ones_like(image, dtype=image.dtype) * mean_pixel,
image)
if valid is not None:
valid = tf.where(
tf.equal(mask, 0),
tf.zeros_like(valid, dtype=valid.dtype),
valid)
return image, label, valid
return image, label
|
googleinterns/wss
|
core/preprocess_utils.py
|
Python
|
apache-2.0
| 28,986
|
[
"Gaussian"
] |
962dd59d2cd442fe2118ff186f20c4781f64c2470107d30e00327c302d89dcde
|
"""
==================
Test the SeqSearch
==================
"""
# Modules #
import os, inspect
from seqenv.fasta import FASTA
from seqenv.seqsearch.parallel import ParallelSeqSearch
# Constants #
current_script = inspect.getframeinfo(inspect.currentframe()).filename
current_dir = os.path.dirname(os.path.abspath(current_script)) + '/'
fasta = FASTA(current_dir + "../examples/samples/community.fasta")
################################################################################
search = ParallelSeqSearch(fasta, 'nucl', 'nt', algorithm='blast', num_threads=3)
search.run()
|
xapple/seqenv
|
tests/seqsearch.py
|
Python
|
mit
| 582
|
[
"BLAST"
] |
746d5499d8b2d82da2860c5e9c1f748e8b2be39342a0abcc979bdb98ce609e32
|
#===============================================================================
# LICENSE XOT-Framework - CC BY-NC-ND
#===============================================================================
# This work is licenced under the Creative Commons
# Attribution-Non-Commercial-No Derivative Works 3.0 Unported License. To view a
# copy of this licence, visit http://creativecommons.org/licenses/by-nc-nd/3.0/
# or send a letter to Creative Commons, 171 Second Street, Suite 300,
# San Francisco, California 94105, USA.
#===============================================================================
from regexer import Regexer
from helpers import taghelperbase
#from logger import Logger
class HtmlHelper(taghelperbase.TagHelperBase):
"""Class that could help with parsing of simple HTML"""
def GetTagContent(self, tag, *args, **kwargs):
"""Gets the content of an HTML <tag>
Arguments:
tag : string - name of tag to search for.
**args : dictionary - each argument is interpreted as a html
attribute. 'cls' is translated to class
attribute. The attribute with value None
is retrieved.
Keyword Arguments:
firstOnly : [opt] boolean - only return the first result. Default: True
Returns:
The content of the found tag. If no match is found an empty string is
returned.
Example: ('div', {'cls':'test'}, {'id':'divTest'}) will match
<div class="test" id="divTest">...content...</div>
"""
firstOnly = True
if kwargs.keys().count("firstOnly") > 0:
firstOnly = kwargs["firstOnly"]
#Logger.Trace("Setting 'firstOnly' to '%s'", firstOnly)
htmlRegex = "<%s" % (tag,)
#Logger.Debug(args)
for arg in args:
name = arg.keys()[0]
value = arg[arg.keys()[0]]
#Logger.Debug("Name: %s, Value: %s", name, value)
# to keep working with older versions where class could not be passed
if name == "cls":
name = "class"
htmlRegex = htmlRegex + '[^>]*%s\W*=\W*["\']%s["\']' % (name, value)
htmlRegex = htmlRegex + "[^>]*>([^<]+)</"
#Logger.Debug("HtmlRegex = %s", htmlRegex)
result = Regexer.DoRegex(htmlRegex, self.data)
if len(result) > 0:
if firstOnly:
return result[0].strip()
else:
return result
else:
return ""
|
SMALLplayer/smallplayer-image-creator
|
storage/.xbmc/addons/net.rieter.xot.smallplayer/resources/libs/helpers/htmlhelper.py
|
Python
|
gpl-2.0
| 2,786
|
[
"VisIt"
] |
a1e911a29f10085c3080527a0516e7346fbea3ec56a768ec15df936a07168f51
|
../../../../../../../share/pyshared/orca/scripts/apps/gdmlogin/script.py
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/lib/python2.7/dist-packages/orca/scripts/apps/gdmlogin/script.py
|
Python
|
gpl-3.0
| 72
|
[
"ORCA"
] |
8643df406ba4a4cdd6013098a8a49f7f0f452e7c2cb8a6666a6b666e4a31a504
|
import nipype.interfaces.utility as util # utility
import nipype.pipeline.engine as pe # pypeline engine
import nipype.interfaces.camino as camino
import nipype.interfaces.fsl as fsl
import nipype.interfaces.camino2trackvis as cam2trk
import nipype.algorithms.misc as misc
from ...misc.utils import get_affine, get_data_dims, get_vox_dims
def create_camino_dti_pipeline(name="dtiproc"):
"""Creates a pipeline that does the same diffusion processing as in the
:ref:`dmri_camino_dti` example script. Given a diffusion-weighted image,
b-values, and b-vectors, the workflow will return the tractography
computed from diffusion tensors and from PICo probabilistic tractography.
Example
-------
>>> import os
>>> nipype_camino_dti = create_camino_dti_pipeline("nipype_camino_dti")
>>> nipype_camino_dti.inputs.inputnode.dwi = os.path.abspath('dwi.nii')
>>> nipype_camino_dti.inputs.inputnode.bvecs = os.path.abspath('bvecs')
>>> nipype_camino_dti.inputs.inputnode.bvals = os.path.abspath('bvals')
>>> nipype_camino_dti.run() # doctest: +SKIP
Inputs::
inputnode.dwi
inputnode.bvecs
inputnode.bvals
Outputs::
outputnode.fa
outputnode.trace
outputnode.tracts_pico
outputnode.tracts_dt
outputnode.tensors
"""
inputnode1 = pe.Node(interface=util.IdentityInterface(fields=["dwi", "bvecs", "bvals"]), name="inputnode1")
"""
Setup for Diffusion Tensor Computation
--------------------------------------
In this section we create the nodes necessary for diffusion analysis.
First, the diffusion image is converted to voxel order.
"""
image2voxel = pe.Node(interface=camino.Image2Voxel(), name="image2voxel")
fsl2scheme = pe.Node(interface=camino.FSL2Scheme(), name="fsl2scheme")
fsl2scheme.inputs.usegradmod = True
"""
Second, diffusion tensors are fit to the voxel-order data.
"""
dtifit = pe.Node(interface=camino.DTIFit(),name='dtifit')
"""
Next, a lookup table is generated from the schemefile and the
signal-to-noise ratio (SNR) of the unweighted (q=0) data.
"""
dtlutgen = pe.Node(interface=camino.DTLUTGen(), name="dtlutgen")
dtlutgen.inputs.snr = 16.0
dtlutgen.inputs.inversion = 1
"""
In this tutorial we implement probabilistic tractography using the PICo algorithm.
PICo tractography requires an estimate of the fibre direction and a model of its
uncertainty in each voxel; this is produced using the following node.
"""
picopdfs = pe.Node(interface=camino.PicoPDFs(), name="picopdfs")
picopdfs.inputs.inputmodel = 'dt'
"""
An FSL BET node creates a brain mask is generated from the diffusion image for seeding the PICo tractography.
"""
bet = pe.Node(interface=fsl.BET(), name="bet")
bet.inputs.mask = True
"""
Finally, tractography is performed.
First DT streamline tractography.
"""
trackdt = pe.Node(interface=camino.TrackDT(), name="trackdt")
"""
Now camino's Probablistic Index of connectivity algorithm.
In this tutorial, we will use only 1 iteration for time-saving purposes.
"""
trackpico = pe.Node(interface=camino.TrackPICo(), name="trackpico")
trackpico.inputs.iterations = 1
"""
Currently, the best program for visualizing tracts is TrackVis. For this reason, a node is included to convert the raw tract data to .trk format. Solely for testing purposes, another node is added to perform the reverse.
"""
cam2trk_dt = pe.Node(interface=cam2trk.Camino2Trackvis(), name="cam2trk_dt")
cam2trk_dt.inputs.min_length = 30
cam2trk_dt.inputs.voxel_order = 'LAS'
cam2trk_pico = pe.Node(interface=cam2trk.Camino2Trackvis(), name="cam2trk_pico")
cam2trk_pico.inputs.min_length = 30
cam2trk_pico.inputs.voxel_order = 'LAS'
"""
Tracts can also be converted to VTK and OOGL formats, for use in programs such as GeomView and Paraview, using the following two nodes.
"""
#vtkstreamlines = pe.Node(interface=camino.VtkStreamlines(), name="vtkstreamlines")
#procstreamlines = pe.Node(interface=camino.ProcStreamlines(), name="procstreamlines")
#procstreamlines.inputs.outputtracts = 'oogl'
"""
We can also produce a variety of scalar values from our fitted tensors. The following nodes generate the fractional anisotropy and diffusivity trace maps and their associated headers.
"""
fa = pe.Node(interface=camino.ComputeFractionalAnisotropy(),name='fa')
#md = pe.Node(interface=camino.MD(),name='md')
trace = pe.Node(interface=camino.ComputeTensorTrace(),name='trace')
dteig = pe.Node(interface=camino.ComputeEigensystem(), name='dteig')
analyzeheader_fa = pe.Node(interface= camino.AnalyzeHeader(), name = "analyzeheader_fa")
analyzeheader_fa.inputs.datatype = "double"
analyzeheader_trace = analyzeheader_fa.clone('analyzeheader_trace')
#analyzeheader_md = pe.Node(interface= camino.AnalyzeHeader(), name = "analyzeheader_md")
#analyzeheader_md.inputs.datatype = "double"
#analyzeheader_trace = analyzeheader_md.clone('analyzeheader_trace')
fa2nii = pe.Node(interface=misc.CreateNifti(),name='fa2nii')
trace2nii = fa2nii.clone("trace2nii")
"""
Since we have now created all our nodes, we can now define our workflow and start making connections.
"""
tractography = pe.Workflow(name='tractography')
tractography.connect([(inputnode1, bet,[("dwi","in_file")])])
"""
File format conversion
"""
tractography.connect([(inputnode1, image2voxel, [("dwi", "in_file")]),
(inputnode1, fsl2scheme, [("bvecs", "bvec_file"),
("bvals", "bval_file")])
])
"""
Tensor fitting
"""
tractography.connect([(image2voxel, dtifit,[['voxel_order','in_file']]),
(fsl2scheme, dtifit,[['scheme','scheme_file']])
])
"""
Workflow for applying DT streamline tractogpahy
"""
tractography.connect([(bet, trackdt,[("mask_file","seed_file")])])
tractography.connect([(dtifit, trackdt,[("tensor_fitted","in_file")])])
"""
Workflow for applying PICo
"""
tractography.connect([(bet, trackpico,[("mask_file","seed_file")])])
tractography.connect([(fsl2scheme, dtlutgen,[("scheme","scheme_file")])])
tractography.connect([(dtlutgen, picopdfs,[("dtLUT","luts")])])
tractography.connect([(dtifit, picopdfs,[("tensor_fitted","in_file")])])
tractography.connect([(picopdfs, trackpico,[("pdfs","in_file")])])
# Mean diffusivity still appears broken
#tractography.connect([(dtifit, md,[("tensor_fitted","in_file")])])
#tractography.connect([(md, analyzeheader_md,[("md","in_file")])])
#tractography.connect([(inputnode, analyzeheader_md,[(('dwi', get_vox_dims), 'voxel_dims'),
#(('dwi', get_data_dims), 'data_dims')])])
#This line is commented out because the ProcStreamlines node keeps throwing memory errors
#tractography.connect([(track, procstreamlines,[("tracked","in_file")])])
"""
Connecting the Fractional Anisotropy and Trace nodes is simple, as they obtain their input from the
tensor fitting.
This is also where our voxel- and data-grabbing functions come in. We pass these functions, along with the original DWI image from the input node, to the header-generating nodes. This ensures that the files will be correct and readable.
"""
tractography.connect([(dtifit, fa,[("tensor_fitted","in_file")])])
tractography.connect([(fa, analyzeheader_fa,[("fa","in_file")])])
tractography.connect([(inputnode1, analyzeheader_fa,[(('dwi', get_vox_dims), 'voxel_dims'),
(('dwi', get_data_dims), 'data_dims')])])
tractography.connect([(fa, fa2nii,[('fa','data_file')])])
tractography.connect([(inputnode1, fa2nii,[(('dwi', get_affine), 'affine')])])
tractography.connect([(analyzeheader_fa, fa2nii,[('header', 'header_file')])])
tractography.connect([(dtifit, trace,[("tensor_fitted","in_file")])])
tractography.connect([(trace, analyzeheader_trace,[("trace","in_file")])])
tractography.connect([(inputnode1, analyzeheader_trace,[(('dwi', get_vox_dims), 'voxel_dims'),
(('dwi', get_data_dims), 'data_dims')])])
tractography.connect([(trace, trace2nii,[('trace','data_file')])])
tractography.connect([(inputnode1, trace2nii,[(('dwi', get_affine), 'affine')])])
tractography.connect([(analyzeheader_trace, trace2nii,[('header', 'header_file')])])
tractography.connect([(dtifit, dteig,[("tensor_fitted","in_file")])])
tractography.connect([(trackpico, cam2trk_pico, [('tracked','in_file')])])
tractography.connect([(trackdt, cam2trk_dt, [('tracked','in_file')])])
tractography.connect([(inputnode1, cam2trk_pico,[(('dwi', get_vox_dims), 'voxel_dims'),
(('dwi', get_data_dims), 'data_dims')])])
tractography.connect([(inputnode1, cam2trk_dt,[(('dwi', get_vox_dims), 'voxel_dims'),
(('dwi', get_data_dims), 'data_dims')])])
inputnode= pe.Node(interface = util.IdentityInterface(fields=["dwi", "bvecs", "bvals"]), name="inputnode")
outputnode = pe.Node(interface = util.IdentityInterface(fields=["fa",
"trace",
"tracts_pico",
"tracts_dt",
"tensors"]),
name="outputnode")
workflow = pe.Workflow(name=name)
workflow.base_output_dir=name
workflow.connect([(inputnode, tractography, [("dwi", "inputnode1.dwi"),
("bvals", "inputnode1.bvals"),
("bvecs", "inputnode1.bvecs")])])
workflow.connect([(tractography, outputnode, [("cam2trk_dt.trackvis", "tracts_dt"),
("cam2trk_pico.trackvis", "tracts_pico"),
("fa2nii.nifti_file", "fa"),
("trace2nii.nifti_file", "trace"),
("dtifit.tensor_fitted", "tensors")])
])
return workflow
|
christianbrodbeck/nipype
|
nipype/workflows/dmri/camino/diffusion.py
|
Python
|
bsd-3-clause
| 10,454
|
[
"ParaView",
"VTK"
] |
cb4d3b5fb054f93d8d73d490b1d3fcda5da8dd511bbae259efa653a5d379dcf0
|
"""
Linear Discriminant Analysis and Quadratic Discriminant Analysis
"""
# Authors: Clemens Brunner
# Martin Billinger
# Matthieu Perrot
# Mathieu Blondel
# License: BSD 3-Clause
import warnings
import numpy as np
from scipy import linalg
from scipy.special import expit
from .base import BaseEstimator, TransformerMixin, ClassifierMixin
from .linear_model._base import LinearClassifierMixin
from .covariance import ledoit_wolf, empirical_covariance, shrunk_covariance
from .utils.multiclass import unique_labels
from .utils import check_array
from .utils.validation import check_is_fitted
from .utils.multiclass import check_classification_targets
from .utils.extmath import softmax
from .preprocessing import StandardScaler
from .utils.validation import _deprecate_positional_args
__all__ = ['LinearDiscriminantAnalysis', 'QuadraticDiscriminantAnalysis']
def _cov(X, shrinkage=None):
"""Estimate covariance matrix (using optional shrinkage).
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data.
shrinkage : {'empirical', 'auto'} or float, default=None
Shrinkage parameter, possible values:
- None or 'empirical': no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
s : ndarray of shape (n_features, n_features)
Estimated covariance matrix.
"""
shrinkage = "empirical" if shrinkage is None else shrinkage
if isinstance(shrinkage, str):
if shrinkage == 'auto':
sc = StandardScaler() # standardize features
X = sc.fit_transform(X)
s = ledoit_wolf(X)[0]
# rescale
s = sc.scale_[:, np.newaxis] * s * sc.scale_[np.newaxis, :]
elif shrinkage == 'empirical':
s = empirical_covariance(X)
else:
raise ValueError('unknown shrinkage parameter')
elif isinstance(shrinkage, float) or isinstance(shrinkage, int):
if shrinkage < 0 or shrinkage > 1:
raise ValueError('shrinkage parameter must be between 0 and 1')
s = shrunk_covariance(empirical_covariance(X), shrinkage)
else:
raise TypeError('shrinkage must be of string or int type')
return s
def _class_means(X, y):
"""Compute class means.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values.
Returns
-------
means : array-like of shape (n_classes, n_features)
Class means.
"""
classes, y = np.unique(y, return_inverse=True)
cnt = np.bincount(y)
means = np.zeros(shape=(len(classes), X.shape[1]))
np.add.at(means, y, X)
means /= cnt[:, None]
return means
def _class_cov(X, y, priors, shrinkage=None):
"""Compute weighted within-class covariance matrix.
The per-class covariance are weighted by the class priors.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values.
priors : array-like of shape (n_classes,)
Class priors.
shrinkage : 'auto' or float, default=None
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
cov : array-like of shape (n_features, n_features)
Weighted within-class covariance matrix
"""
classes = np.unique(y)
cov = np.zeros(shape=(X.shape[1], X.shape[1]))
for idx, group in enumerate(classes):
Xg = X[y == group, :]
cov += priors[idx] * np.atleast_2d(_cov(Xg, shrinkage))
return cov
class LinearDiscriminantAnalysis(LinearClassifierMixin,
TransformerMixin,
BaseEstimator):
"""Linear Discriminant Analysis
A classifier with a linear decision boundary, generated by fitting class
conditional densities to the data and using Bayes' rule.
The model fits a Gaussian density to each class, assuming that all classes
share the same covariance matrix.
The fitted model can also be used to reduce the dimensionality of the input
by projecting it to the most discriminative directions, using the
`transform` method.
.. versionadded:: 0.17
*LinearDiscriminantAnalysis*.
Read more in the :ref:`User Guide <lda_qda>`.
Parameters
----------
solver : {'svd', 'lsqr', 'eigen'}, default='svd'
Solver to use, possible values:
- 'svd': Singular value decomposition (default).
Does not compute the covariance matrix, therefore this solver is
recommended for data with a large number of features.
- 'lsqr': Least squares solution, can be combined with shrinkage.
- 'eigen': Eigenvalue decomposition, can be combined with shrinkage.
shrinkage : 'auto' or float, default=None
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Note that shrinkage works only with 'lsqr' and 'eigen' solvers.
priors : array-like of shape (n_classes,), default=None
The class prior probabilities. By default, the class proportions are
inferred from the training data.
n_components : int, default=None
Number of components (<= min(n_classes - 1, n_features)) for
dimensionality reduction. If None, will be set to
min(n_classes - 1, n_features). This parameter only affects the
`transform` method.
store_covariance : bool, default=False
If True, explicitely compute the weighted within-class covariance
matrix when solver is 'svd'. The matrix is always computed
and stored for the other solvers.
.. versionadded:: 0.17
tol : float, default=1.0e-4
Absolute threshold for a singular value of X to be considered
significant, used to estimate the rank of X. Dimensions whose
singular values are non-significant are discarded. Only used if
solver is 'svd'.
.. versionadded:: 0.17
Attributes
----------
coef_ : ndarray of shape (n_features,) or (n_classes, n_features)
Weight vector(s).
intercept_ : ndarray of shape (n_classes,)
Intercept term.
covariance_ : array-like of shape (n_features, n_features)
Weighted within-class covariance matrix. It corresponds to
`sum_k prior_k * C_k` where `C_k` is the covariance matrix of the
samples in class `k`. The `C_k` are estimated using the (potentially
shrunk) biased estimator of covariance. If solver is 'svd', only
exists when `store_covariance` is True.
explained_variance_ratio_ : ndarray of shape (n_components,)
Percentage of variance explained by each of the selected components.
If ``n_components`` is not set then all components are stored and the
sum of explained variances is equal to 1.0. Only available when eigen
or svd solver is used.
means_ : array-like of shape (n_classes, n_features)
Class-wise means.
priors_ : array-like of shape (n_classes,)
Class priors (sum to 1).
scalings_ : array-like of shape (rank, n_classes - 1)
Scaling of the features in the space spanned by the class centroids.
Only available for 'svd' and 'eigen' solvers.
xbar_ : array-like of shape (n_features,)
Overall mean. Only present if solver is 'svd'.
classes_ : array-like of shape (n_classes,)
Unique class labels.
See also
--------
sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis: Quadratic
Discriminant Analysis
Examples
--------
>>> import numpy as np
>>> from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = LinearDiscriminantAnalysis()
>>> clf.fit(X, y)
LinearDiscriminantAnalysis()
>>> print(clf.predict([[-0.8, -1]]))
[1]
"""
@_deprecate_positional_args
def __init__(self, *, solver='svd', shrinkage=None, priors=None,
n_components=None, store_covariance=False, tol=1e-4):
self.solver = solver
self.shrinkage = shrinkage
self.priors = priors
self.n_components = n_components
self.store_covariance = store_covariance # used only in svd solver
self.tol = tol # used only in svd solver
def _solve_lsqr(self, X, y, shrinkage):
"""Least squares solver.
The least squares solver computes a straightforward solution of the
optimal decision rule based directly on the discriminant functions. It
can only be used for classification (with optional shrinkage), because
estimation of eigenvectors is not performed. Therefore, dimensionality
reduction with the transform is not supported.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,) or (n_samples, n_classes)
Target values.
shrinkage : 'auto', float or None
Shrinkage parameter, possible values:
- None: no shrinkage.
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Notes
-----
This solver is based on [1]_, section 2.6.2, pp. 39-41.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
self.coef_ = linalg.lstsq(self.covariance_, self.means_.T)[0].T
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T)) +
np.log(self.priors_))
def _solve_eigen(self, X, y, shrinkage):
"""Eigenvalue solver.
The eigenvalue solver computes the optimal solution of the Rayleigh
coefficient (basically the ratio of between class scatter to within
class scatter). This solver supports both classification and
dimensionality reduction (with optional shrinkage).
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values.
shrinkage : 'auto', float or None
Shrinkage parameter, possible values:
- None: no shrinkage.
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage constant.
Notes
-----
This solver is based on [1]_, section 3.8.3, pp. 121-124.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
Sw = self.covariance_ # within scatter
St = _cov(X, shrinkage) # total scatter
Sb = St - Sw # between scatter
evals, evecs = linalg.eigh(Sb, Sw)
self.explained_variance_ratio_ = np.sort(evals / np.sum(evals)
)[::-1][:self._max_components]
evecs = evecs[:, np.argsort(evals)[::-1]] # sort eigenvectors
self.scalings_ = evecs
self.coef_ = np.dot(self.means_, evecs).dot(evecs.T)
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T)) +
np.log(self.priors_))
def _solve_svd(self, X, y):
"""SVD solver.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values.
"""
n_samples, n_features = X.shape
n_classes = len(self.classes_)
self.means_ = _class_means(X, y)
if self.store_covariance:
self.covariance_ = _class_cov(X, y, self.priors_)
Xc = []
for idx, group in enumerate(self.classes_):
Xg = X[y == group, :]
Xc.append(Xg - self.means_[idx])
self.xbar_ = np.dot(self.priors_, self.means_)
Xc = np.concatenate(Xc, axis=0)
# 1) within (univariate) scaling by with classes std-dev
std = Xc.std(axis=0)
# avoid division by zero in normalization
std[std == 0] = 1.
fac = 1. / (n_samples - n_classes)
# 2) Within variance scaling
X = np.sqrt(fac) * (Xc / std)
# SVD of centered (within)scaled data
U, S, Vt = linalg.svd(X, full_matrices=False)
rank = np.sum(S > self.tol)
# Scaling of within covariance is: V' 1/S
scalings = (Vt[:rank] / std).T / S[:rank]
# 3) Between variance scaling
# Scale weighted centers
X = np.dot(((np.sqrt((n_samples * self.priors_) * fac)) *
(self.means_ - self.xbar_).T).T, scalings)
# Centers are living in a space with n_classes-1 dim (maximum)
# Use SVD to find projection in the space spanned by the
# (n_classes) centers
_, S, Vt = linalg.svd(X, full_matrices=0)
self.explained_variance_ratio_ = (S**2 / np.sum(
S**2))[:self._max_components]
rank = np.sum(S > self.tol * S[0])
self.scalings_ = np.dot(scalings, Vt.T[:, :rank])
coef = np.dot(self.means_ - self.xbar_, self.scalings_)
self.intercept_ = (-0.5 * np.sum(coef ** 2, axis=1) +
np.log(self.priors_))
self.coef_ = np.dot(coef, self.scalings_.T)
self.intercept_ -= np.dot(self.xbar_, self.coef_.T)
def fit(self, X, y):
"""Fit LinearDiscriminantAnalysis model according to the given
training data and parameters.
.. versionchanged:: 0.19
*store_covariance* has been moved to main constructor.
.. versionchanged:: 0.19
*tol* has been moved to main constructor.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,)
Target values.
"""
X, y = self._validate_data(X, y, ensure_min_samples=2, estimator=self,
dtype=[np.float64, np.float32])
self.classes_ = unique_labels(y)
n_samples, _ = X.shape
n_classes = len(self.classes_)
if n_samples == n_classes:
raise ValueError("The number of samples must be more "
"than the number of classes.")
if self.priors is None: # estimate priors from sample
_, y_t = np.unique(y, return_inverse=True) # non-negative ints
self.priors_ = np.bincount(y_t) / float(len(y))
else:
self.priors_ = np.asarray(self.priors)
if (self.priors_ < 0).any():
raise ValueError("priors must be non-negative")
if not np.isclose(self.priors_.sum(), 1.0):
warnings.warn("The priors do not sum to 1. Renormalizing",
UserWarning)
self.priors_ = self.priors_ / self.priors_.sum()
# Maximum number of components no matter what n_components is
# specified:
max_components = min(len(self.classes_) - 1, X.shape[1])
if self.n_components is None:
self._max_components = max_components
else:
if self.n_components > max_components:
raise ValueError(
"n_components cannot be larger than min(n_features, "
"n_classes - 1)."
)
self._max_components = self.n_components
if self.solver == 'svd':
if self.shrinkage is not None:
raise NotImplementedError('shrinkage not supported')
self._solve_svd(X, y)
elif self.solver == 'lsqr':
self._solve_lsqr(X, y, shrinkage=self.shrinkage)
elif self.solver == 'eigen':
self._solve_eigen(X, y, shrinkage=self.shrinkage)
else:
raise ValueError("unknown solver {} (valid solvers are 'svd', "
"'lsqr', and 'eigen').".format(self.solver))
if self.classes_.size == 2: # treat binary case as a special case
self.coef_ = np.array(self.coef_[1, :] - self.coef_[0, :], ndmin=2,
dtype=X.dtype)
self.intercept_ = np.array(self.intercept_[1] - self.intercept_[0],
ndmin=1, dtype=X.dtype)
return self
def transform(self, X):
"""Project data to maximize class separation.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
Transformed data.
"""
if self.solver == 'lsqr':
raise NotImplementedError("transform not implemented for 'lsqr' "
"solver (use 'svd' or 'eigen').")
check_is_fitted(self)
X = check_array(X)
if self.solver == 'svd':
X_new = np.dot(X - self.xbar_, self.scalings_)
elif self.solver == 'eigen':
X_new = np.dot(X, self.scalings_)
return X_new[:, :self._max_components]
def predict_proba(self, X):
"""Estimate probability.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data.
Returns
-------
C : ndarray of shape (n_samples, n_classes)
Estimated probabilities.
"""
check_is_fitted(self)
decision = self.decision_function(X)
if self.classes_.size == 2:
proba = expit(decision)
return np.vstack([1-proba, proba]).T
else:
return softmax(decision)
def predict_log_proba(self, X):
"""Estimate log probability.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data.
Returns
-------
C : ndarray of shape (n_samples, n_classes)
Estimated log probabilities.
"""
prediction = self.predict_proba(X)
prediction[prediction == 0.0] += np.finfo(prediction.dtype).tiny
return np.log(prediction)
def decision_function(self, X):
"""Apply decision function to an array of samples.
The decision function is equal (up to a constant factor) to the
log-posterior of the model, i.e. `log p(y = k | x)`. In a binary
classification setting this instead corresponds to the difference
`log p(y = 1 | x) - log p(y = 0 | x)`. See :ref:`lda_qda_math`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Array of samples (test vectors).
Returns
-------
C : ndarray of shape (n_samples,) or (n_samples, n_classes)
Decision function values related to each class, per sample.
In the two-class case, the shape is (n_samples,), giving the
log likelihood ratio of the positive class.
"""
# Only override for the doc
return super().decision_function(X)
class QuadraticDiscriminantAnalysis(ClassifierMixin, BaseEstimator):
"""Quadratic Discriminant Analysis
A classifier with a quadratic decision boundary, generated
by fitting class conditional densities to the data
and using Bayes' rule.
The model fits a Gaussian density to each class.
.. versionadded:: 0.17
*QuadraticDiscriminantAnalysis*
Read more in the :ref:`User Guide <lda_qda>`.
Parameters
----------
priors : ndarray of shape (n_classes,), default=None
Class priors. By default, the class proportions are inferred from the
training data.
reg_param : float, default=0.0
Regularizes the per-class covariance estimates by transforming S2 as
``S2 = (1 - reg_param) * S2 + reg_param * np.eye(n_features)``,
where S2 corresponds to the `scaling_` attribute of a given class.
store_covariance : bool, default=False
If True, the class covariance matrices are explicitely computed and
stored in the `self.covariance_` attribute.
.. versionadded:: 0.17
tol : float, default=1.0e-4
Absolute threshold for a singular value to be considered significant,
used to estimate the rank of `Xk` where `Xk` is the centered matrix
of samples in class k. This parameter does not affect the
predictions. It only controls a warning that is raised when features
are considered to be colinear.
.. versionadded:: 0.17
Attributes
----------
covariance_ : list of len n_classes of ndarray \
of shape (n_features, n_features)
For each class, gives the covariance matrix estimated using the
samples of that class. The estimations are unbiased. Only present if
`store_covariance` is True.
means_ : array-like of shape (n_classes, n_features)
Class-wise means.
priors_ : array-like of shape (n_classes,)
Class priors (sum to 1).
rotations_ : list of len n_classes of ndarray of shape (n_features, n_k)
For each class k an array of shape (n_features, n_k), where
``n_k = min(n_features, number of elements in class k)``
It is the rotation of the Gaussian distribution, i.e. its
principal axis. It corresponds to `V`, the matrix of eigenvectors
coming from the SVD of `Xk = U S Vt` where `Xk` is the centered
matrix of samples from class k.
scalings_ : list of len n_classes of ndarray of shape (n_k,)
For each class, contains the scaling of
the Gaussian distributions along its principal axes, i.e. the
variance in the rotated coordinate system. It corresponds to `S^2 /
(n_samples - 1)`, where `S` is the diagonal matrix of singular values
from the SVD of `Xk`, where `Xk` is the centered matrix of samples
from class k.
classes_ : ndarray of shape (n_classes,)
Unique class labels.
Examples
--------
>>> from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = QuadraticDiscriminantAnalysis()
>>> clf.fit(X, y)
QuadraticDiscriminantAnalysis()
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.discriminant_analysis.LinearDiscriminantAnalysis: Linear
Discriminant Analysis
"""
@_deprecate_positional_args
def __init__(self, *, priors=None, reg_param=0., store_covariance=False,
tol=1.0e-4):
self.priors = np.asarray(priors) if priors is not None else None
self.reg_param = reg_param
self.store_covariance = store_covariance
self.tol = tol
def fit(self, X, y):
"""Fit the model according to the given training data and parameters.
.. versionchanged:: 0.19
``store_covariances`` has been moved to main constructor as
``store_covariance``
.. versionchanged:: 0.19
``tol`` has been moved to main constructor.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like of shape (n_samples,)
Target values (integers)
"""
X, y = self._validate_data(X, y)
check_classification_targets(y)
self.classes_, y = np.unique(y, return_inverse=True)
n_samples, n_features = X.shape
n_classes = len(self.classes_)
if n_classes < 2:
raise ValueError('The number of classes has to be greater than'
' one; got %d class' % (n_classes))
if self.priors is None:
self.priors_ = np.bincount(y) / float(n_samples)
else:
self.priors_ = self.priors
cov = None
store_covariance = self.store_covariance
if store_covariance:
cov = []
means = []
scalings = []
rotations = []
for ind in range(n_classes):
Xg = X[y == ind, :]
meang = Xg.mean(0)
means.append(meang)
if len(Xg) == 1:
raise ValueError('y has only 1 sample in class %s, covariance '
'is ill defined.' % str(self.classes_[ind]))
Xgc = Xg - meang
# Xgc = U * S * V.T
_, S, Vt = np.linalg.svd(Xgc, full_matrices=False)
rank = np.sum(S > self.tol)
if rank < n_features:
warnings.warn("Variables are collinear")
S2 = (S ** 2) / (len(Xg) - 1)
S2 = ((1 - self.reg_param) * S2) + self.reg_param
if self.store_covariance or store_covariance:
# cov = V * (S^2 / (n-1)) * V.T
cov.append(np.dot(S2 * Vt.T, Vt))
scalings.append(S2)
rotations.append(Vt.T)
if self.store_covariance or store_covariance:
self.covariance_ = cov
self.means_ = np.asarray(means)
self.scalings_ = scalings
self.rotations_ = rotations
return self
def _decision_function(self, X):
# return log posterior, see eq (4.12) p. 110 of the ESL.
check_is_fitted(self)
X = check_array(X)
norm2 = []
for i in range(len(self.classes_)):
R = self.rotations_[i]
S = self.scalings_[i]
Xm = X - self.means_[i]
X2 = np.dot(Xm, R * (S ** (-0.5)))
norm2.append(np.sum(X2 ** 2, axis=1))
norm2 = np.array(norm2).T # shape = [len(X), n_classes]
u = np.asarray([np.sum(np.log(s)) for s in self.scalings_])
return (-0.5 * (norm2 + u) + np.log(self.priors_))
def decision_function(self, X):
"""Apply decision function to an array of samples.
The decision function is equal (up to a constant factor) to the
log-posterior of the model, i.e. `log p(y = k | x)`. In a binary
classification setting this instead corresponds to the difference
`log p(y = 1 | x) - log p(y = 0 | x)`. See :ref:`lda_qda_math`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Array of samples (test vectors).
Returns
-------
C : ndarray of shape (n_samples,) or (n_samples, n_classes)
Decision function values related to each class, per sample.
In the two-class case, the shape is (n_samples,), giving the
log likelihood ratio of the positive class.
"""
dec_func = self._decision_function(X)
# handle special case of two classes
if len(self.classes_) == 2:
return dec_func[:, 1] - dec_func[:, 0]
return dec_func
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Returns
-------
C : ndarray of shape (n_samples,)
"""
d = self._decision_function(X)
y_pred = self.classes_.take(d.argmax(1))
return y_pred
def predict_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Array of samples/test vectors.
Returns
-------
C : ndarray of shape (n_samples, n_classes)
Posterior probabilities of classification per class.
"""
values = self._decision_function(X)
# compute the likelihood of the underlying gaussian models
# up to a multiplicative constant.
likelihood = np.exp(values - values.max(axis=1)[:, np.newaxis])
# compute posterior probabilities
return likelihood / likelihood.sum(axis=1)[:, np.newaxis]
def predict_log_proba(self, X):
"""Return log of posterior probabilities of classification.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Array of samples/test vectors.
Returns
-------
C : ndarray of shape (n_samples, n_classes)
Posterior log-probabilities of classification per class.
"""
# XXX : can do better to avoid precision overflows
probas_ = self.predict_proba(X)
return np.log(probas_)
|
bnaul/scikit-learn
|
sklearn/discriminant_analysis.py
|
Python
|
bsd-3-clause
| 30,056
|
[
"Gaussian"
] |
6af8faddf2957c23c2e44bbc9008dd528b3601c3806d1509834c5f7224482ffb
|
from brian2 import *
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(font_scale=2.0)
# Neuron Parameters
g_l = 0.05 * msiemens / cm2
cm = 1.0 * ufarad / cm2 # Specific membrane capacitance
E_l = -60 * mV # Resting potential
V_t = -50 * mV # Threshold
tau_w = 600 * ms # Adaptation time constant of w
Delta = 2.5 * mV # Steep when approaching the threshold
S = 20000 * um2 # Membrane area
a = 0.001 * usiemens # dynamics of adaptation
b = 0.04 * namp # w increases by this value at each spike
# a = 0.03 * usiemens # dynamics of adaptation
f_m = g_l / cm
# tau_ m = 1 / f_m
aux = S * cm
# Voltage and w stable values
v_aux = -55 * mV
v = np.linspace(-80, -40, 100) * mV
w_sta = (v_aux - E_l) * a
# b jumps
b = 0.04 * namp # w increases by this value at each spike
b_set = np.linspace(0, 5, 6) * b
# Current
I_o = 0.25
I = I_o * namp * np.ones(v.size)
fig = plt.figure(figsize=(16, 12))
data = 'Stability Analysis (a = ' + str(a) + ', b = ' + str(b) + ' )'
# fig.suptitle(data)
# First plot
ax1 = fig.add_subplot(121)
for index, b in enumerate(b_set):
y1 = -f_m * (v - E_l)
y2 = f_m * Delta * np.exp((v - V_t) / Delta)
y3 = - ((w_sta + b) / aux)
f = y1 + y2 + y3 + (I / aux)
ax1.plot(v / mV, f, label='Spikes= ' + str(index))
ax1.legend()
ax1.set_ylim([-1, 10])
ax1.axhline(0, color='black')
ax1.set_xlabel('Voltage')
ax1.set_ylabel('f(v) = dv /dt')
ax1.set_title(data)
ax1.annotate('$V_{re}$', xy=(E_l / mV, 0), xytext=(E_l / mV, -0.5),
arrowprops=dict(facecolor='red', shrink=0.05))
ax1.annotate('$V_{th}$', xy=(V_t / mV, 0), xytext=(V_t / mV, -0.5),
arrowprops=dict(facecolor='red', shrink=0.05))
# Second plot
# b jumps
b = 0.005 * namp # w increases by this value at each spike
b_set = np.linspace(0, 5, 6) * b
data = 'Stability Analysis (a = ' + str(a) + ', b = ' + str(b) + ' )'
ax2 = fig.add_subplot(122, sharey=ax1)
for index, b in enumerate(b_set):
y1 = -f_m * (v - E_l)
y2 = f_m * Delta * np.exp((v - V_t) / Delta)
y3 = - ((w_sta + b) / aux)
f = y1 + y2 + y3 + (I / aux)
ax2.plot(v / mV, f, label='Spikes= ' + str(index))
ax2.legend()
ax2.set_ylim([-1, 10])
ax2.axhline(0, color='black')
ax2.set_xlabel('Voltage')
# ax2.set_ylabel('f(v) = dv /dt')
ax2.set_title(data)
ax2.annotate('$V_{re}$', xy=(E_l / mV, 0), xytext=(E_l / mV, -0.5),
arrowprops=dict(facecolor='red', shrink=0.05))
ax2.annotate('$V_{th}$', xy=(V_t / mV, 0), xytext=(V_t / mV, -0.5),
arrowprops=dict(facecolor='red', shrink=0.05))
# Remove the axis
plt.setp(ax2.get_yticklabels(), visible=False)
plt.show()
|
h-mayorquin/camp_india_2016
|
project3_sustained_activity/dynamical_study_jump.py
|
Python
|
mit
| 2,647
|
[
"NEURON"
] |
c0e78e6fc5a5e9fa92e1bfeb456d048789df06715676f7d9c7271f01eeafd72b
|
from Screens.Screen import Screen
from Components.Label import Label, MultiColorLabel
from Components.Pixmap import Pixmap
from Components.ServiceEventTracker import ServiceEventTracker
from Components.config import config
from enigma import eTimer, eEnv, iServiceInformation, iPlayableService, eDVBFrontendParametersSatellite, eDVBFrontendParametersCable, eDVBFrontendParametersTerrestrial
from nemesisTool import nemesisTool, parse_ecm
from Tools.Directories import fileExists, SCOPE_SKIN_IMAGE, SCOPE_CURRENT_SKIN, resolveFilename
import os
import re
t = nemesisTool()
class nemesisEI(Screen):
__module__ = __name__
def readEcmFile(self):
emuActive = t.readEmuActive()
return t.readEcmInfoFile(emuActive)
def readEmuName(self):
emuActive = t.readEmuActive()
return t.readEmuName(emuActive)
def readCsName(self):
csActive = t.readSrvActive()
return t.readSrvName(csActive)
def showEmuName(self):
self['emuname'].setText(self.readEmuName())
csName = self.readCsName()
if csName != 'None':
self['emuname'].setText(self['emuname'].getText() + ' / ' + csName)
def __init__(self, session):
Screen.__init__(self, session)
self.systemCod = [
'beta',
'bis',
'bul',
'dream',
'dre',
'conax',
'cw',
'irdeto',
'nagra',
'nds',
'seca',
'via']
self.systemState = [
'b_fta',
'b_card',
'b_emu',
'b_spider']
self.systemCaids = {
'06': 'irdeto',
'01': 'seca',
'18': 'nagra',
'05': 'via',
'0B': 'conax',
'17': 'beta',
'0D': 'cw',
'4A': 'dream',
'09': 'nds',
'4AE0': 'dre',
'4AE1': 'dre',
'55': 'bul',
'26': 'bis' }
for x in self.systemCod:
self[x] = MultiColorLabel()
for x in self.systemState:
self[x] = Label()
self['ecm_pict'] = Pixmap()
self['TunerInfo'] = Label()
self['SatInfo'] = Label()
self['EmuInfo'] = Label()
self['ecmtime'] = Label()
self['netcard'] = Label()
self['emuname'] = Label()
self['hops'] = Label()
self['reader'] = Label()
self.path = 'piconSys'
self.ecm_timer = eTimer()
self.ecm_timer.timeout.get().append(self._nemesisEI__updateEmuInfo)
self.ecmTimeStep = 0
self.emm_timer = eTimer()
self.emm_timer.timeout.get().append(self._nemesisEI__updateEMMInfo)
self._nemesisEI__evStart()
self._nemesisEI__event_tracker = ServiceEventTracker(screen = self, eventmap = {
iPlayableService.evStart: self._nemesisEI__evStart,
iPlayableService.evUpdatedInfo: self._nemesisEI__evUpdatedInfo,
iPlayableService.evTunedIn: self._nemesisEI__evTunedIn })
self.onHide.append(self._nemesisEI__onHide)
self.onShow.append(self._nemesisEI__onShow)
def _nemesisEI__onHide(self):
if self.emm_timer.isActive():
self.emm_timer.stop()
if self.ecm_timer.isActive():
self.ecm_timer.stop()
def _nemesisEI__onShow(self):
service = self.session.nav.getCurrentService()
if service:
pass
info = service.info()
if info is not None:
if not self.ecm_timer.isActive():
self.ecmTimeStep = 0
self.ecm_timer.start(config.nemesis.ecminfodelay.value)
def _nemesisEI__evStart(self):
if self.emm_timer.isActive():
self.emm_timer.stop()
if self.ecm_timer.isActive():
self.ecm_timer.stop()
self.displayClean()
def _nemesisEI__evTunedIn(self):
service = self.session.nav.getCurrentService()
if service:
pass
info = service.info()
if info is not None:
if not self.emm_timer.isActive():
self.ecmTimeStep = 0
self.emm_timer.start(config.nemesis.emminfodelay.value)
if not self.ecm_timer.isActive():
self.ecm_timer.start(config.nemesis.ecminfodelay.value)
def _nemesisEI__updateEMMInfo(self):
self.emm_timer.stop()
service = self.session.nav.getCurrentService()
if service:
pass
info = service.info()
if info is not None:
self.showEMM(info.getInfoObject(iServiceInformation.sCAIDs))
def _nemesisEI__updateEmuInfo(self):
service = self.session.nav.getCurrentService()
if service:
pass
info = service.info()
self.cleandecInfo()
if info is not None:
if info.getInfo(iServiceInformation.sIsCrypted):
self.showEmuName()
print '[self.ecmTimeStep]', self.ecmTimeStep
if self.ecmTimeStep >= 5:
self.ecm_timer.changeInterval(11000)
else:
self.ecmTimeStep += 1
info = parse_ecm(self.readEcmFile())
if info != 0:
caid = info[0]
pid = info[1]
provid = info[2]
ecmtime = info[3]
source = info[4]
addr = info[5]
port = info[6]
hops = info[7]
reader = info[8]
protocol = info[9]
if ecmtime > 0:
self['ecmtime'].setText('ECM Time: %s msec' % str(ecmtime))
if provid != '':
self['EmuInfo'].setText('ProvID: %s ' % provid)
else:
self['EmuInfo'].setText('')
if pid != '':
self['EmuInfo'].setText('%sPID: %s ' % (self['EmuInfo'].getText(), pid))
if caid != '':
self['EmuInfo'].setText('%sCaID: %s' % (self['EmuInfo'].getText(), caid))
self.showECM(caid)
if source == 0:
self['EmuInfo'].setText('Decode: Unsupported!')
self['ecm_pict'].show()
self['ecm_pict'].instance.setPixmapFromFile(self.findPicon('unknown'))
elif source == 1:
self['b_emu'].show()
self['netcard'].setText('Decode: Internal')
elif source == 2:
if addr != '':
if addr.find('127.0.0.1') != -1 or addr.find('localhost') != -1:
self['netcard'].setText('Decode: Internal')
self['b_card'].show()
else:
self['b_spider'].show()
if config.nemesis.shownetdet.value:
if port != '':
self['netcard'].setText('Source: %s:%s' % (addr, port))
else:
self['netcard'].setText('Source: %s' % addr)
if hops > 0:
self['hops'].setText('Hops: %s' % str(hops))
if protocol != '':
self['reader'].setText('Protocol: %s ' % str(protocol))
if reader != '' and protocol != '':
self['reader'].setText('%sReader: %s' % (self['reader'].getText(), str(reader)))
elif reader != '':
self['reader'].setText('Reader: %s' % str(reader))
else:
self['netcard'].setText('Decode: Network')
else:
self['b_spider'].show()
self['netcard'].setText('Decode: Network')
elif source == 3:
self['b_card'].show()
self['netcard'].setText('Decode: %s' % str(reader))
elif source == 4:
self['b_card'].show()
self['netcard'].setText('Decode: slot-1')
elif source == 5:
self['b_card'].show()
self['netcard'].setText('Decode: slot-2')
else:
self['EmuInfo'].setText('Decode: Unsupported!')
self['ecm_pict'].show()
self['ecm_pict'].instance.setPixmapFromFile(self.findPicon('unknown'))
else:
self['hops'].setText('')
self['emuname'].setText('')
self['EmuInfo'].setText('')
self['ecmtime'].setText('')
self['netcard'].setText('')
self['reader'].setText('')
self['b_fta'].show()
self['ecm_pict'].show()
self['ecm_pict'].instance.setPixmapFromFile(self.findPicon('fta'))
def int2hex(self, int):
return '%x' % int
def showECM(self, caid):
caid = caid.lower()
if caid.__contains__('x'):
idx = caid.index('x')
caid = caid[idx + 1:]
if len(caid) == 3:
caid = '0%s' % caid
caid = caid[:2]
caid = caid.upper()
if self.systemCaids.has_key(caid):
system = self.systemCaids.get(caid)
self[system].setForegroundColorNum(2)
pngname = self.findPicon(system)
if pngname == '':
pngname = self.findPicon('unknown')
self['ecm_pict'].show()
self['ecm_pict'].instance.setPixmapFromFile(pngname)
def showEMM(self, caids):
if caids:
if len(caids) > 0:
for caid in caids:
caid = self.int2hex(caid)
if len(caid) == 3:
caid = '0%s' % caid
caid = caid[:2]
caid = caid.upper()
if self.systemCaids.has_key(caid):
self[self.systemCaids.get(caid)].setForegroundColorNum(1)
continue
def findPicon(self, codName):
if config.nemesis.usepiconinhdd.value:
searchPaths = ('/media/hdd/%s/', eEnv.resolve('${datadir}/%s/'), '/media/usb/%s/', '/media/cf/%s/')
else:
searchPaths = (eEnv.resolve('${datadir}/%s/'), '/media/usb/%s/', '/media/cf/%s/')
for path in searchPaths:
pngname = path % self.path + codName + '.png'
if fileExists(pngname):
return pngname
return ''
def cleandecInfo(self):
self['b_fta'].hide()
self['b_card'].hide()
self['b_emu'].hide()
self['b_spider'].hide()
def displayClean(self):
self['ecm_pict'].hide()
self['hops'].setText('')
self['reader'].setText('')
self['emuname'].setText('')
self['EmuInfo'].setText('')
self['ecmtime'].setText('')
self['netcard'].setText('')
for x in self.systemState:
self[x].hide()
for x in self.systemCod:
self[x].setForegroundColorNum(0)
def _nemesisEI__evUpdatedInfo(self):
self['TunerInfo'].setText('')
self['SatInfo'].setText('')
service = self.session.nav.getCurrentService()
frontendInfo = service.frontendInfo()
if frontendInfo:
pass
frontendData = frontendInfo.getAll(True)
if frontendData is not None:
tuner_type = frontendData.get('tuner_type', 'None')
sr = str(int(frontendData.get('symbol_rate', 0) / 1000))
freq = str(int(frontendData.get('frequency', 0) / 1000))
if tuner_type == 'DVB-S' or tuner_type == 'DVB-S2':
try:
orb = {
3590: 'Thor/Intelsat (1.0\xc2\xb0W)',
3560: 'Amos 2/3 (4.0\xc2\xb0W)',
3550: 'Atlantic Bird 3 (5.0\xc2\xb0W)',
3530: 'Nilesat/Atlantic Bird (7.0\xc2\xb0W)',
3520: 'Atlantic Bird 2 (8.0\xc2\xb0W)',
3475: 'Atlantic Bird 1 (12.5\xc2\xb0W)',
3460: 'Express A4 (14.0\xc2\xb0W)',
3450: 'Telstar 12 (15.0\xc2\xb0W)',
3420: 'Intelsat 901 (18.0\xc2\xb0W)',
3380: 'Nss 7 (22.0\xc2\xb0W)',
3355: 'Intelsat 905 (24.5\xc2\xb0W)',
3325: 'Intelsat 907 (27.5\xc2\xb0W)',
3300: 'Hispasat 1C/1D (30.0\xc2\xb0W)',
3170: 'Intelsat 11 (43.0\xc2\xb0W)',
3150: 'Intelsat 14 (IS-14) (45.0\xc2\xb0W)',
3070: 'Intelsat 707 (53.0\xc2\xb0W)',
3045: 'Intelsat 805 (55.5\xc2\xb0W)',
3020: 'Intelsat 9 (58.0\xc2\xb0W)',
2990: 'Amazonas (61.0\xc2\xb0W)',
2900: 'Star One C2 (70.0\xc2\xb0W)',
2875: 'Nimiq 5 (72.7\xc2\xb0W)',
2780: 'NIMIQ 4 (82.0\xc2\xb0W)',
2690: 'NIMIQ 1 (91.0\xc2\xb0W)',
3592: 'Thor/Intelsat (0.8\xc2\xb0W)',
2985: 'EchoStar 12/15 (61.5\xc2\xb0W)',
2830: 'Echostar 4/8 (77.0\xc2\xb0W)',
2500: 'Echostar 10/11 (110.0\xc2\xb0W)',
2502: 'Echostar 10/11 (110.0\xc2\xb0W)',
2410: 'Echostar/Anik F3 (119.0\xc2\xb0W)',
2391: 'Galaxy 23 (121.0\xc2\xb0W)',
2390: 'Echostar 9 (121.0\xc2\xb0W)',
2412: 'DirectTV 7S (119.0\xc2\xb0W)',
2310: 'Galaxy 27 (129.0\xc2\xb0W)',
2311: 'Ciel 2 (129.0\xc2\xb0W)',
2120: 'Echostar 2 (148.0\xc2\xb0W)',
1100: 'BSAT 2A (110.0\xc2\xb0E)',
1101: 'N-Sat 110 (110.0\xc2\xb0E)',
1131: 'KoreaSat 5 (113.0\xc2\xb0E)',
1440: 'SuperBird C2 (144.0\xc2\xb0E)',
1006: 'AsiaSat 5 (100.5\xc2\xb0E)',
1056: 'Asiasat 3S (105.5\xc2\xb0E)',
1082: 'NSS 11 (108.2\xc2\xb0E)',
881: 'ST1 (88.0\xc2\xb0E)',
917: 'Measat 3 (91.5\xc2\xb0E)',
950: 'Insat 4B (95.0\xc2\xb0E)',
951: 'NSS 6 (95.0\xc2\xb0E)',
765: 'Apstar 2R (76.5\xc2\xb0E)',
785: 'ThaiCom 5 (78.5\xc2\xb0E)',
800: 'Express (80.0\xc2\xb0E)',
830: 'Insat 4A (83.0\xc2\xb0E)',
850: 'Intelsat 15 (85.2\xc2\xb0E)',
750: 'Abs 1 (75.0\xc2\xb0E)',
720: 'Intelsat (72.0\xc2\xb0E)',
705: 'Eutelsat W5 (70.5\xc2\xb0E)',
685: 'Intelsat 7/10 (68.5\xc2\xb0E)',
620: 'Intelsat 902 (62.0\xc2\xb0E)',
600: 'Intelsat 904 (60.0\xc2\xb0E)',
570: 'Nss 12 (57.0\xc2\xb0E)',
530: 'Express AM22 (53.0\xc2\xb0E)',
482: 'Eutelsat W48 (48.2\xc2\xb0E)',
450: 'Intelsat 12 (45.0\xc2\xb0E)',
420: 'Turksat 2A/3A (42.0\xc2\xb0E)',
400: 'Express AM1 (40.0\xc2\xb0E)',
390: 'Hellas Sat 2 (39.0\xc2\xb0E)',
380: 'Paksat 1 (38.0\xc2\xb0E)',
360: 'Eutelsat W4/W7 (36.0\xc2\xb0E)',
335: 'Astra 1M (33.5\xc2\xb0E)',
330: 'Eurobird 3 (33.0\xc2\xb0E)',
328: 'Galaxy 11 (32.8\xc2\xb0E)',
315: 'Astra 1G (31.5\xc2\xb0E)',
310: 'Turksat (31.0\xc2\xb0E)',
305: 'Arabsat 5A (30.5\xc2\xb0E)',
285: 'Eurobird/Astra (28.5\xc2\xb0E)',
284: 'Eurobird/Astra (28.2\xc2\xb0E)',
282: 'Eurobird/Astra (28.2\xc2\xb0E)',
1220: 'AsiaSat 4 (122.0\xc2\xb0E)',
1380: 'Telstar 18 (138.0\xc2\xb0E)',
260: 'Badr 6 (26.0\xc2\xb0E)',
255: 'Eurobird 2 (25.5\xc2\xb0E)',
232: 'Astra 3A/3B (23.5\xc2\xb0E)',
235: 'Astra 3A/3B (23.5\xc2\xb0E)',
215: 'Eutelsat W6 (21.5\xc2\xb0E)',
216: 'Eutelsat W6 (21.6\xc2\xb0E)',
210: 'AfriStar 1 (21.0\xc2\xb0E)',
192: 'Astra 1M/1H/1L/1KR (19.2\xc2\xb0E)',
160: 'Eutelsat W2M/16A/Sesat (16.0\xc2\xb0E)',
130: 'Hot Bird 6/8/9 (13.0\xc2\xb0E)',
100: 'Eutelsat W2A (10.0\xc2\xb0E)',
90: 'Eurobird 9A (9.0\xc2\xb0E)',
70: 'Eutelsat W3A (7.0\xc2\xb0E)',
50: 'Sirius 4 (5.0\xc2\xb0E)',
48: 'Astra 4A (4.8\xc2\xb0E)',
40: 'Eurobird 4A (4.0\xc2\xb0E)',
30: 'Telecom 2 (3.0\xc2\xb0E)',
852: 'Intelsat 15 (85.2\xc2\xb0E)',
660: 'Intelsat 17 (66.0\xc2\xb0E)',
3490: 'Express AM44 (11.0\xc2\xb0W)' }[frontendData.get('orbital_position', 'None')]
except:
orb = 'Unsupported SAT: %s' % str([
frontendData.get('orbital_position', 'None')])
pol = {
eDVBFrontendParametersSatellite.Polarisation_Horizontal: 'H',
eDVBFrontendParametersSatellite.Polarisation_Vertical: 'V',
eDVBFrontendParametersSatellite.Polarisation_CircularLeft: 'CL',
eDVBFrontendParametersSatellite.Polarisation_CircularRight: 'CR' }[frontendData.get('polarization', eDVBFrontendParametersSatellite.Polarisation_Horizontal)]
fec = {
eDVBFrontendParametersSatellite.FEC_None: 'None',
eDVBFrontendParametersSatellite.FEC_Auto: 'Auto',
eDVBFrontendParametersSatellite.FEC_1_2: '1/2',
eDVBFrontendParametersSatellite.FEC_2_3: '2/3',
eDVBFrontendParametersSatellite.FEC_3_4: '3/4',
eDVBFrontendParametersSatellite.FEC_5_6: '5/6',
eDVBFrontendParametersSatellite.FEC_7_8: '7/8',
eDVBFrontendParametersSatellite.FEC_3_5: '3/5',
eDVBFrontendParametersSatellite.FEC_4_5: '4/5',
eDVBFrontendParametersSatellite.FEC_8_9: '8/9',
eDVBFrontendParametersSatellite.FEC_9_10: '9/10' }[frontendData.get('fec_inner', eDVBFrontendParametersSatellite.FEC_Auto)]
self['TunerInfo'].setText('Freq: ' + freq + ' MHz, Pol: ' + pol + ', Fec: ' + fec + ', SR: ' + sr)
self['SatInfo'].setText('Satellite: ' + orb)
elif tuner_type == 'DVB-C':
fec = {
eDVBFrontendParametersCable.FEC_None: 'None',
eDVBFrontendParametersCable.FEC_Auto: 'Auto',
eDVBFrontendParametersCable.FEC_1_2: '1/2',
eDVBFrontendParametersCable.FEC_2_3: '2/3',
eDVBFrontendParametersCable.FEC_3_4: '3/4',
eDVBFrontendParametersCable.FEC_5_6: '5/6',
eDVBFrontendParametersCable.FEC_7_8: '7/8',
eDVBFrontendParametersCable.FEC_8_9: '8/9' }[frontendData.get('fec_inner', eDVBFrontendParametersCable.FEC_Auto)]
self['TunerInfo'].setText('Freq: ' + freq + ' MHz, Fec: ' + fec + ', SR: ' + sr)
elif tuner_type == 'DVB-T':
pol = {
eDVBFrontendParametersTerrestrial.Modulation_Auto: 'Auto',
eDVBFrontendParametersTerrestrial.Modulation_QPSK: 'QPSK',
eDVBFrontendParametersTerrestrial.Modulation_QAM16: 'QAM16',
eDVBFrontendParametersTerrestrial.Modulation_QAM64: 'QAM64' }[frontendData.get('constellation', eDVBFrontendParametersTerrestrial.Modulation_Auto)]
fec = {
eDVBFrontendParametersTerrestrial.FEC_Auto: 'Auto',
eDVBFrontendParametersTerrestrial.FEC_1_2: '1/2',
eDVBFrontendParametersTerrestrial.FEC_2_3: '2/3',
eDVBFrontendParametersTerrestrial.FEC_3_4: '3/4',
eDVBFrontendParametersTerrestrial.FEC_5_6: '5/6',
eDVBFrontendParametersTerrestrial.FEC_7_8: '7/8' }[frontendData.get('code_rate_lp', eDVBFrontendParametersTerrestrial.FEC_Auto)]
self['TunerInfo'].setText('Freq: ' + freq + ' MHz, Fec: ' + fec + ', SR: ' + sr)
self['SatInfo'].setText('Constellation: ' + pol)
else:
self['TunerInfo'].setText('Freq: ' + freq + ' MHz, SR: ' + sr)
|
kingvuplus/nn-gui
|
lib/python/Nemesis/nemesisExtraInfoBar.py
|
Python
|
gpl-2.0
| 22,381
|
[
"Galaxy"
] |
3ac480fc7633dcefe109c757123dc55700571bc156a9723c01bd13fb4977d3a3
|
# -*- coding: utf-8 -*-
"""Tests for PyBEL induction functions."""
import string
import unittest
from pybel import BELGraph
from pybel.constants import (
CITATION_AUTHORS,
CITATION_TYPE_PUBMED,
IDENTIFIER,
NAMESPACE,
)
from pybel.dsl import BaseEntity, gene, protein, rna
from pybel.struct.mutation.expansion import expand_upstream_causal
from pybel.struct.mutation.induction import get_subgraph_by_annotation_value
from pybel.struct.mutation.induction.citation import (
get_subgraph_by_authors,
get_subgraph_by_pubmed,
)
from pybel.struct.mutation.induction.paths import (
get_nodes_in_all_shortest_paths,
get_subgraph_by_all_shortest_paths,
)
from pybel.struct.mutation.induction.upstream import get_upstream_causal_subgraph
from pybel.struct.mutation.induction.utils import get_subgraph_by_induction
from pybel.testing.utils import n
trem2_gene = gene(namespace="HGNC", name="TREM2")
trem2_rna = rna(namespace="HGNC", name="TREM2")
trem2_protein = protein(namespace="HGNC", name="TREM2")
class TestGraphMixin(unittest.TestCase):
"""A mixin to enable testing nodes and edge membership in the graph."""
def assert_in_edge(self, source, target, graph):
"""Assert the edge is in the graph.
:param source:
:param target:
:type graph: pybel.BELGraph
:rtype: bool
"""
self.assertIn(target, graph[source])
def assert_all_nodes_are_base_entities(self, graph):
"""Assert that all nodes are base entities."""
for node in graph:
self.assertIsInstance(node, BaseEntity)
class TestInduction(TestGraphMixin):
"""Test induction functions."""
def test_get_subgraph_by_induction(self):
"""Test get_subgraph_by_induction."""
graph = BELGraph()
keyword, url = n(), n()
graph.namespace_url[keyword] = url
a, b, c, d = [protein(namespace="test", name=str(i)) for i in range(4)]
graph.add_directly_increases(a, b, citation=n(), evidence=n())
graph.add_directly_increases(b, c, citation=n(), evidence=n())
graph.add_directly_increases(c, d, citation=n(), evidence=n())
graph.add_increases(a, d, citation=n(), evidence=n())
nodes = [b, c]
subgraph = get_subgraph_by_induction(graph, nodes)
self.assertIsInstance(subgraph, BELGraph)
self.assert_all_nodes_are_base_entities(subgraph)
self.assertNotEqual(
0,
len(subgraph.namespace_url),
msg="improperly found metadata: {}".format(subgraph.graph),
)
self.assertIn(keyword, subgraph.namespace_url)
self.assertEqual(url, subgraph.namespace_url[keyword])
self.assertNotIn(a, subgraph)
self.assertIn(b, subgraph)
self.assertIn(c, subgraph)
self.assertNotIn(d, subgraph)
def test_get_subgraph_by_all_shortest_paths(self):
"""Test get_subgraph_by_all_shortest_paths."""
graph = BELGraph()
keyword, url = n(), n()
graph.namespace_url[keyword] = url
a, b, c, d, e, f = [protein(namespace="test", name=n()) for _ in range(6)]
graph.add_increases(a, b, citation=n(), evidence=n())
graph.add_increases(a, c, citation=n(), evidence=n())
graph.add_increases(b, d, citation=n(), evidence=n())
graph.add_increases(c, d, citation=n(), evidence=n())
graph.add_increases(a, e, citation=n(), evidence=n())
graph.add_increases(e, f, citation=n(), evidence=n())
graph.add_increases(f, d, citation=n(), evidence=n())
query_nodes = [a, d]
shortest_paths_nodes = get_nodes_in_all_shortest_paths(graph, query_nodes)
self.assertIn(a, shortest_paths_nodes)
self.assertIn(b, shortest_paths_nodes)
self.assertIn(c, shortest_paths_nodes)
self.assertIn(d, shortest_paths_nodes)
subgraph = get_subgraph_by_all_shortest_paths(graph, query_nodes)
self.assert_all_nodes_are_base_entities(subgraph)
self.assertIsInstance(subgraph, BELGraph)
self.assertIn(keyword, subgraph.namespace_url)
self.assertEqual(url, subgraph.namespace_url[keyword])
self.assertIn(a, subgraph)
self.assertIn(b, subgraph)
self.assertIn(c, subgraph)
self.assertIn(d, subgraph)
self.assertNotIn(e, subgraph)
self.assertNotIn(f, subgraph)
def test_get_upstream_causal_subgraph(self):
"""Test get_upstream_causal_subgraph."""
a, b, c, d, e, f = [protein(namespace="test", name=n()) for _ in range(6)]
universe = BELGraph()
universe.namespace_pattern["test"] = "test-url"
universe.add_increases(a, b, citation=n(), evidence=n())
universe.add_increases(b, c, citation=n(), evidence=n())
universe.add_association(d, a, citation=n(), evidence=n())
universe.add_increases(e, a, citation=n(), evidence=n())
universe.add_decreases(f, b, citation=n(), evidence=n())
subgraph = get_upstream_causal_subgraph(universe, [a, b])
self.assertIsInstance(subgraph, BELGraph)
self.assert_all_nodes_are_base_entities(subgraph)
self.assertIn("test", subgraph.namespace_pattern)
self.assertEqual("test-url", subgraph.namespace_pattern["test"])
self.assertIn(a, subgraph)
self.assertIn(b, subgraph)
self.assertNotIn(c, subgraph)
self.assertNotIn(d, subgraph)
self.assertIn(e, subgraph)
self.assertIn(f, subgraph)
self.assertEqual(4, subgraph.number_of_nodes())
self.assert_in_edge(e, a, subgraph)
self.assert_in_edge(a, b, subgraph)
self.assert_in_edge(f, b, subgraph)
self.assertEqual(3, subgraph.number_of_edges())
def test_expand_upstream_causal_subgraph(self):
"""Test expanding on the upstream causal subgraph."""
a, b, c, d, e, f = [protein(namespace="test", name=i) for i in string.ascii_lowercase[:6]]
universe = BELGraph()
universe.add_increases(a, b, citation=n(), evidence=n())
universe.add_increases(b, c, citation=n(), evidence=n())
universe.add_association(d, a, citation=n(), evidence=n())
universe.add_increases(e, a, citation=n(), evidence=n())
universe.add_decreases(f, b, citation=n(), evidence=n())
subgraph = BELGraph()
subgraph.add_increases(a, b, citation=n(), evidence=n())
expand_upstream_causal(universe, subgraph)
self.assertIsInstance(subgraph, BELGraph)
self.assert_all_nodes_are_base_entities(subgraph)
self.assertIn(a, subgraph)
self.assertIn(b, subgraph)
self.assertNotIn(c, subgraph)
self.assertNotIn(d, subgraph)
self.assertIn(e, subgraph)
self.assertIn(f, subgraph)
self.assertEqual(4, subgraph.number_of_nodes())
self.assert_in_edge(e, a, subgraph)
self.assert_in_edge(a, b, subgraph)
self.assert_in_edge(f, b, subgraph)
self.assertEqual(2, len(subgraph[a][b]))
self.assertEqual(4, subgraph.number_of_edges(), msg="\n".join(map(str, subgraph.edges())))
class TestEdgePredicateBuilders(TestGraphMixin):
"""Tests for edge predicate builders."""
def test_build_pmid_inclusion_filter(self):
"""Test getting a sub-graph by a single PubMed identifier."""
a, b, c, d = [protein(namespace="test", name=n()) for _ in range(4)]
p1, p2, p3, p4 = n(), n(), n(), n()
graph = BELGraph()
keyword, url = n(), n()
graph.namespace_url[keyword] = url
graph.add_increases(a, b, evidence=n(), citation=p1)
graph.add_increases(a, b, evidence=n(), citation=p2)
graph.add_increases(b, c, evidence=n(), citation=p1)
graph.add_increases(b, c, evidence=n(), citation=p3)
graph.add_increases(c, d, evidence=n(), citation=p3)
subgraph = get_subgraph_by_pubmed(graph, p1)
self.assertIsInstance(subgraph, BELGraph)
self.assert_all_nodes_are_base_entities(subgraph)
self.assertIn(keyword, subgraph.namespace_url)
self.assertEqual(url, subgraph.namespace_url[keyword])
self.assertIn(a, subgraph)
self.assertIn(b, subgraph)
self.assertIn(c, subgraph)
self.assertNotIn(d, subgraph)
empty_subgraph = get_subgraph_by_pubmed(graph, p4)
self.assertIn(keyword, subgraph.namespace_url)
self.assertEqual(url, subgraph.namespace_url[keyword])
self.assertEqual(0, empty_subgraph.number_of_nodes())
def test_build_pmid_set_inclusion_filter(self):
"""Test getting a sub-graph by a set of PubMed identifiers."""
a, b, c, d, e, f = [protein(namespace="test", name=n()) for _ in range(6)]
p1, p2, p3, p4, p5, p6 = n(), n(), n(), n(), n(), n()
graph = BELGraph()
keyword, url = n(), n()
graph.namespace_url[keyword] = url
graph.add_increases(a, b, evidence=n(), citation=p1)
graph.add_increases(a, b, evidence=n(), citation=p2)
graph.add_increases(b, c, evidence=n(), citation=p1)
graph.add_increases(b, c, evidence=n(), citation=p3)
graph.add_increases(c, d, evidence=n(), citation=p3)
graph.add_increases(e, f, evidence=n(), citation=p4)
subgraph = get_subgraph_by_pubmed(graph, [p1, p4])
self.assertIsInstance(subgraph, BELGraph)
self.assert_all_nodes_are_base_entities(subgraph)
self.assertIn(keyword, subgraph.namespace_url)
self.assertEqual(url, subgraph.namespace_url[keyword])
self.assertIn(a, subgraph)
self.assertIn(b, subgraph)
self.assertIn(c, subgraph)
self.assertNotIn(d, subgraph)
self.assertIn(e, subgraph)
self.assertIn(f, subgraph)
empty_subgraph = get_subgraph_by_pubmed(graph, [p5, p6])
self.assertIn(keyword, subgraph.namespace_url)
self.assertEqual(url, subgraph.namespace_url[keyword])
self.assertEqual(0, empty_subgraph.number_of_nodes())
def test_build_author_inclusion_filter(self):
"""Test getting a sub-graph by a single author."""
a, b, c, d = [protein(namespace="test", name=n()) for _ in range(4)]
a1, a2, a3, a4, a5 = n(), n(), n(), n(), n()
c1 = {
NAMESPACE: CITATION_TYPE_PUBMED,
IDENTIFIER: n(),
CITATION_AUTHORS: [a1, a2, a3],
}
c2 = {
NAMESPACE: CITATION_TYPE_PUBMED,
IDENTIFIER: n(),
CITATION_AUTHORS: [a1, a4],
}
graph = BELGraph()
keyword, url = n(), n()
graph.namespace_url[keyword] = url
graph.add_increases(a, b, evidence=n(), citation=c1)
graph.add_increases(a, b, evidence=n(), citation=c2)
graph.add_increases(b, c, evidence=n(), citation=c1)
graph.add_increases(c, d, evidence=n(), citation=c2)
subgraph1 = get_subgraph_by_authors(graph, a1)
self.assertIsInstance(subgraph1, BELGraph)
self.assert_all_nodes_are_base_entities(subgraph1)
self.assertIn(keyword, subgraph1.namespace_url)
self.assertEqual(url, subgraph1.namespace_url[keyword])
self.assertIn(a, subgraph1)
self.assertIn(b, subgraph1)
self.assertIn(c, subgraph1)
self.assertIn(d, subgraph1)
subgraph2 = get_subgraph_by_authors(graph, a2)
self.assertIsInstance(subgraph2, BELGraph)
self.assert_all_nodes_are_base_entities(subgraph2)
self.assertIn(keyword, subgraph2.namespace_url)
self.assertEqual(url, subgraph2.namespace_url[keyword])
self.assertIn(a, subgraph2)
self.assertIn(b, subgraph2)
self.assertIn(c, subgraph2)
self.assertNotIn(d, subgraph2)
subgraph3 = get_subgraph_by_authors(graph, a5)
self.assertIsInstance(subgraph3, BELGraph)
self.assert_all_nodes_are_base_entities(subgraph3)
self.assertIn(keyword, subgraph3.namespace_url)
self.assertEqual(url, subgraph3.namespace_url[keyword])
self.assertEqual(0, subgraph3.number_of_nodes())
def test_build_author_set_inclusion_filter(self):
"""Test getting a sub-graph by a set of authors."""
a, b, c, d = [protein(namespace="test", name=n()) for _ in range(4)]
a1, a2, a3, a4 = n(), n(), n(), n()
c1 = {
NAMESPACE: CITATION_TYPE_PUBMED,
IDENTIFIER: n(),
CITATION_AUTHORS: [a1, a2, a3],
}
c2 = {
NAMESPACE: CITATION_TYPE_PUBMED,
IDENTIFIER: n(),
CITATION_AUTHORS: [a1, a4],
}
graph = BELGraph()
keyword, url = n(), n()
graph.namespace_url[keyword] = url
graph.add_increases(a, b, evidence=n(), citation=c1)
graph.add_increases(a, b, evidence=n(), citation=c2)
graph.add_increases(b, c, evidence=n(), citation=c1)
graph.add_increases(c, d, evidence=n(), citation=c2)
subgraph1 = get_subgraph_by_authors(graph, [a1, a2])
self.assertIsInstance(subgraph1, BELGraph)
self.assert_all_nodes_are_base_entities(subgraph1)
self.assertIn(keyword, subgraph1.namespace_url)
self.assertEqual(url, subgraph1.namespace_url[keyword])
self.assertIn(a, subgraph1)
self.assertIn(b, subgraph1)
self.assertIn(c, subgraph1)
self.assertIn(d, subgraph1)
class TestEdgeInduction(unittest.TestCase):
"""Test induction over edges."""
def test_get_subgraph_by_annotation_value(self):
"""Test getting a subgraph by a single annotation value."""
graph = BELGraph()
graph.annotation_url["Subgraph"] = n()
a, b, c, d = [protein(namespace="test", name=n()) for _ in range(4)]
k1 = graph.add_increases(a, b, citation=n(), evidence=n(), annotations={"Subgraph": {"A"}})
k2 = graph.add_increases(a, b, citation=n(), evidence=n(), annotations={"Subgraph": {"B"}})
k3 = graph.add_increases(a, b, citation=n(), evidence=n(), annotations={"Subgraph": {"A", "C", "D"}})
subgraph = get_subgraph_by_annotation_value(graph, "Subgraph", "A")
self.assertIsInstance(subgraph, BELGraph)
self.assertIn(a, subgraph)
self.assertIn(b, subgraph)
self.assertIn(b, subgraph[a])
self.assertIn(k1, subgraph[a][b])
self.assertNotIn(k2, subgraph[a][b])
self.assertIn(k3, subgraph[a][b])
def test_get_subgraph_by_annotation_values(self):
"""Test getting a subgraph by multiple annotation value."""
graph = BELGraph()
graph.annotation_list["Subgraph"] = set("ABCDE")
a, b, c, d = [protein(namespace="test", name=n()) for _ in range(4)]
k1 = graph.add_increases(a, b, citation=n(), evidence=n(), annotations={"Subgraph": {"A"}})
k2 = graph.add_increases(a, b, citation=n(), evidence=n(), annotations={"Subgraph": {"B"}})
k3 = graph.add_increases(a, b, citation=n(), evidence=n(), annotations={"Subgraph": {"A", "C", "D"}})
k4 = graph.add_increases(a, b, citation=n(), evidence=n(), annotations={"Subgraph": {"C", "D"}})
subgraph = get_subgraph_by_annotation_value(graph, "Subgraph", {"A", "C"})
self.assertIsInstance(subgraph, BELGraph)
self.assertIn(a, subgraph)
self.assertIn(b, subgraph)
self.assertIn(b, subgraph[a])
self.assertIn(k1, subgraph[a][b])
self.assertNotIn(k2, subgraph[a][b])
self.assertIn(k3, subgraph[a][b])
self.assertIn(k4, subgraph[a][b])
|
pybel/pybel
|
tests/test_struct/test_transformations/test_induction.py
|
Python
|
mit
| 15,582
|
[
"Pybel"
] |
6add5bb6d45862c8f81ead281d0ec1dcb8426fb930d054db5cdaea3ce41ab091
|
# Copyright 2010-2017, The University of Melbourne
# Copyright 2010-2017, Brian May
#
# This file is part of Karaage.
#
# Karaage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Karaage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Karaage If not, see <http://www.gnu.org/licenses/>.
import datetime
import pytest
from django.core.management import call_command
from django.urls import reverse
from tldap.exceptions import ObjectDoesNotExist
from karaage.machines.models import Account
from karaage.people.models import Group, Person
from karaage.projects.models import Project
from karaage.tests.integration import IntegrationTestCase
@pytest.mark.django_db
class ProjectTestCase(IntegrationTestCase):
def setUp(self):
super(ProjectTestCase, self).setUp()
call_command('loaddata', 'test_karaage', **{'verbosity': 0})
def test_admin_add_project(self):
Project.objects.count()
self.client.login(username='kgsuper', password='aq12ws')
response = self.client.get(reverse('kg_project_add'))
self.assertEqual(response.status_code, 200)
form_data = {
'name': 'Test Project 4',
'description': 'Test',
'institute': 1,
'leaders': "|2|3|",
'start_date': datetime.date.today(),
}
response = self.client.post(reverse('kg_project_add'), form_data)
self.assertEqual(response.status_code, 302)
project = Project.objects.get(name='Test Project 4')
self.assertEqual(project.is_active, True)
self.assertEqual(project.date_approved, datetime.date.today())
self.assertEqual(
project.approved_by,
Person.objects.get(username='kgsuper'))
self.assertEqual(project.pid, 'pExam0001')
self.assertTrue(Person.objects.get(pk=2) in project.leaders.all())
self.assertTrue(Person.objects.get(pk=3) in project.leaders.all())
lgroup = self._ldap_datastore._get_group(cn=project.pid)
self.assertEqual(lgroup['cn'], [project.pid])
def test_admin_add_project_pid(self):
Project.objects.count()
self.client.login(username='kgsuper', password='aq12ws')
response = self.client.get(reverse('kg_project_add'))
self.assertEqual(response.status_code, 200)
form_data = {
'pid': "Enrico",
'name': 'Test Project 4',
'description': 'Test',
'institute': 1,
'leaders': "|2|3|",
'start_date': datetime.date.today(),
}
response = self.client.post(reverse('kg_project_add'), form_data)
self.assertEqual(response.status_code, 302)
project = Project.objects.get(pid="Enrico")
self.assertEqual(project.is_active, True)
self.assertEqual(project.date_approved, datetime.date.today())
self.assertEqual(
project.approved_by,
Person.objects.get(username='kgsuper'))
self.assertEqual(project.pid, 'Enrico')
self.assertTrue(Person.objects.get(pk=2) in project.leaders.all())
self.assertTrue(Person.objects.get(pk=3) in project.leaders.all())
lgroup = self._ldap_datastore._get_group(cn=project.pid)
self.assertEqual(lgroup['cn'], [project.pid])
def test_add_remove_user_to_project(self):
self.assertRaises(
ObjectDoesNotExist,
self._ldap_datastore._get_account, uid='kgtestuser2')
# login
self.client.login(username='kgsuper', password='aq12ws')
# get project details
project = Project.objects.get(pid='TestProject1')
self.assertEqual(project.group.members.count(), 1)
response = self.client.get(
reverse('kg_project_detail', args=[project.id]))
self.assertEqual(response.status_code, 200)
self.assertRaises(
ObjectDoesNotExist,
self._ldap_datastore._get_account, uid='kgtestuser2')
# add kgtestuser2 to project
new_user = Person.objects.get(username='kgtestuser2')
response = self.client.post(
reverse('kg_project_detail', args=[project.id]),
{'person': new_user.id})
self.assertEqual(response.status_code, 302)
project = Project.objects.get(pid='TestProject1')
self.assertEqual(project.group.members.count(), 2)
account = self._ldap_datastore._get_account(uid='kgtestuser2')
groups = account['groups'].load(self._ldap_datastore._database)
assert len(groups) == 1
assert groups[0]['cn'] == ['TestProject1']
# remove user
for account in new_user.account_set.all():
account.default_project = None
account.save()
response = self.client.post(
reverse('kg_remove_project_member',
args=[project.id, new_user.username]))
self.assertEqual(response.status_code, 302)
project = Project.objects.get(pid='TestProject1')
self.assertEqual(project.group.members.count(), 1)
lgroup = self._ldap_datastore._get_group(cn=project.pid)
assert 'kgtestuser2' not in lgroup['memberUid']
def test_delete_project(self):
self.client.login(username='kgsuper', password='aq12ws')
project = Project.objects.get(pid='TestProject1')
for account in Account.objects.filter(default_project=project):
account.default_project = None
account.save()
self.assertEqual(project.is_active, True)
response = self.client.post(
reverse('kg_project_delete', args=[project.id]))
self.assertEqual(response.status_code, 302)
project = Project.objects.get(pid='TestProject1')
self.assertEqual(project.is_active, False)
self.assertEqual(project.group.members.count(), 0)
self.assertEqual(project.date_deleted, datetime.date.today())
self.assertEqual(
project.deleted_by,
Person.objects.get(username='kgsuper'))
def test_change_default(self):
pass
def test_admin_edit_project(self):
project = Project.objects.get(pid='TestProject1')
self.assertEqual(project.is_active, True)
self.assertEqual(project.name, 'Test Project 1')
self.assertTrue(Person.objects.get(pk=1) in project.leaders.all())
self.assertFalse(Person.objects.get(pk=2) in project.leaders.all())
self.assertFalse(Person.objects.get(pk=3) in project.leaders.all())
self.assertTrue(
Person.objects.get(pk=3) in project.group.members.all())
self.client.login(username='kgsuper', password='aq12ws')
response = self.client.get(
reverse('kg_project_edit', args=[project.id]))
self.assertEqual(response.status_code, 200)
form_data = {
'name': 'Test Project 1 was 4',
'description': 'Test',
'institute': 1,
'leaders': "|2|3|",
'start_date': project.start_date,
}
response = self.client.post(
reverse('kg_project_edit', args=[project.id]),
form_data)
self.assertEqual(response.status_code, 302)
project = Project.objects.get(pid='TestProject1')
self.assertEqual(project.is_active, True)
# this is not changed because we don't support editing leaders with
# this interface any more
self.assertTrue(Person.objects.get(pk=1) in project.leaders.all())
self.assertFalse(Person.objects.get(pk=2) in project.leaders.all())
self.assertFalse(Person.objects.get(pk=3) in project.leaders.all())
lgroup = self._ldap_datastore._get_group(cn=project.pid)
self.assertEqual(lgroup['cn'], [project.pid])
def test_user_add_project(self):
pass
def test_user_edit_project(self):
pass
def test_change_group(self):
group, _ = Group.objects.get_or_create(name='example')
project = Project.objects.get(pid='TestProject1')
project.group = group
project.save()
|
brianmay/karaage
|
karaage/tests/projects/test_projects.py
|
Python
|
gpl-3.0
| 8,530
|
[
"Brian"
] |
e275d7a498203d73b53f58b1a47c68ea0484d852678e4f0385c785e4a108ce29
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2016 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
#!/usr/bin/env python
import os, re, sys
name = sys.argv[1]
os.system("sed -i 's/NAME/%s/g' %sfunctional.cc" %(name,name))
os.system("sed -i 's/NAME/%s/g' %sfunctional.h" %(name,name))
fh = open('preamble', 'r')
preamble = fh.readlines()
fh.close()
fh = open('parameters', 'r')
parameters = fh.readlines()
fh.close()
fh = open('functional', 'r')
functional = fh.readlines()
fh.close()
fh = open('functional_rho_a0', 'r')
functional_rho_a0 = fh.readlines()
fh.close()
fh = open('functional_rho_b0', 'r')
functional_rho_b0 = fh.readlines()
fh.close()
fh = open('%sfunctional.cc' % (name), 'r')
template = fh.readlines()
fh.close()
fh = open('%sfunctional.cc' % (name), 'w')
pre_re = re.compile(r'^(\s+)PREAMBLE$')
par_re = re.compile(r'^(\s+)PARAMETERS$')
fun_re = re.compile(r'^(\s+)FUNCTIONAL$')
fun_a0_re = re.compile(r'^(\s+)FUNCTIONAL_RHO_A0$')
fun_b0_re = re.compile(r'^(\s+)FUNCTIONAL_RHO_B0$')
for line in template:
mobj = re.match(pre_re, line);
if mobj:
spaces = mobj.group(1)
for l in preamble:
fh.write(spaces + l)
continue
mobj = re.match(par_re, line);
if mobj:
spaces = mobj.group(1)
for l in parameters:
fh.write(spaces + l)
continue
mobj = re.match(fun_re, line);
if mobj:
spaces = mobj.group(1)
for l in functional:
fh.write(spaces + l)
continue
mobj = re.match(fun_a0_re, line);
if mobj:
spaces = mobj.group(1)
for l in functional_rho_a0:
fh.write(spaces + l)
continue
mobj = re.match(fun_b0_re, line);
if mobj:
spaces = mobj.group(1)
for l in functional_rho_b0:
fh.write(spaces + l)
continue
fh.write(line)
os.system('rm preamble')
os.system('rm parameters')
os.system('rm functional')
os.system('rm functional_rho_a0')
os.system('rm functional_rho_b0')
fh.close()
|
kannon92/psi4
|
psi4/src/psi4/libfunctional/matlab/mfiles/assemble.py
|
Python
|
gpl-2.0
| 2,877
|
[
"Psi4"
] |
3bcd9fd867e30d24f90d307507834f52d09a7dc1a4e26f05b78dc8b17d7a3c3d
|
# Main script of the game
# Reference http://renpyhandbook.tumblr.com/code-tutorials as needed
#########################################
# Defines backgrounds we'll use
image bg outside = "assets/backgrounds/outside_bg.png"
image bg outside_rain = im.MatrixColor("assets/backgrounds/outside_bg.png", im.matrix.tint(0.7, 0.7, 0.7))
image bg shower = "assets/backgrounds/bathroom_bg.png"
image bg apartment = Placeholder("bg")
image black = Solid((0, 0, 0, 255))
#########################################
# Defines sprites we'll use
#The c_ prefix denotes Fia's Hotpants Outfit
#The s_ prefix denotes Fia's Sweater Outfit
#The n_ prefix denotes Fia in the nude
#A 'w' in the prefix denotes a wet version
#A 's' in the prefix denotes soap (Nude only, implies 'w')
#The _u suffix denotes the fact Fia's tail is raised (Default on Fire/Nude)
#All the clothed sprites
image fia c_fire = "assets/sprites/fia_clothed_grin.png"
image fia c_neutral = "assets/sprites/fia_clothed_neutral.png"
image fia c_smile = "assets/sprites/fia_clothed_smile.png"
image fia c_pout = "assets/sprites/fia_clothed_pout.png"
image fia c_puppy = "assets/sprites/fia_clothed_sad.png"
image fia cw_pout = "assets/sprites/fia_clothedwet_pout.png"
image fia cw_neutral = Placeholder("girl")
image fia cw_smile = Placeholder("girl")
image fia cw_fire = Placeholder("girl")
image fia cw_fire_smile = Placeholder("girl")
#All the nude sprites
image fia n_smile = "assets/sprites/fia_nude_smile.png"
image fia n_fire = Placeholder("girl")
image fia n_neutral = Placeholder("girl")
image fia nw_smile = "assets/sprites/fia_nudewet_smile.png"
image fia nw_fire = Placeholder("girl")
image fia nw_bite = Placeholder("girl")
image fia nw_neutral = Placeholder("girl")
image fia nws_smile = Placeholder("girl")
#All the sweater sprites
image fia s_bite = "assets/sprites/fia_sweater_lipbite.png"
#########################################
# Define the characters we'll use
#Unknown
define un = Character('???', what_prefix='"', what_suffix='"', show_two_window=True)
#Fia
define f = Character('Fia', color="#CC5252", what_prefix='"', what_suffix='"', show_two_window=True)
#The MC
#As his inner thoughts will be handled by the narrator we need to add quotes
define mc = Character("Mike", color="#F0FFFF", what_prefix='"', what_suffix='"', show_two_window=True)
#NVL mode narrator
$ narrator = Character(None, kind=nvl)
#########################################
#Transitions, variables, and what not
init:
$ circirisout = ImageDissolve("assets/backgrounds/circleiris.png", 1.5, 8)
$ circirisin = ImageDissolve("assets/backgrounds/circleiris.png", 1.5, 8, reverse=True)
image rain:
"assets/backgrounds/rain.png"
0.3
"assets/backgrounds/rain2.png"
0.3
"assets/backgrounds/rain3.png"
0.3
repeat
#init python:
#Messes with the default dissolve
#define.move_transitions("dissolve", 0.5)
#########################################
# The acutal game starts here.
label start:
#Start the VN with Fia & the MC outside
scene black
"..."
"It's funny how quickly one loses track of time when you're sleeping under the sun."
"Thirty minutes, an hour, three hours... I honestly can't say how long I've been here."
"Not that that bothers me."
"After all, Fia and I chose to stay in town over Spring Break for this very reason."
"None of the usual hustle and bustle, none of the interruptions."
"Just the two of us taking it easy and relaxing any way we can."
un "MIKE."
"Perhaps I spoke too soon about being able to relax..."
#Open his eyes
scene bg outside with circirisout
#Anytime she changes sprites add "with dissolve"
show fia c_pout with dissolve
"As I open my eyes I'm greeted with the sight of my girlfriend looming over me."
"I didn't even notice her leave my side under our tree."
"There's plenty of good napping spots this far out in the hills, but we always come back to this one in particular."
"I'm not entirely sure why we do at this point. Force of habit maybe?"
"In any case, it seems Fia's tired of laying about."
f "Jeez."
f "I've been trying to wake you up for at least five minutes now!"
"I let out a yawn and stretch before standing."
mc "Did something come up?"
show fia c_neutral with dissolve
f "Maybe. Take a look over there."
"She points over my shoulder at the sky behind us."
"The normally crystal-blue horizon seems to be growing ever darker. An assemblage of clouds, thunderheads even."
mc "Huh. I didn't think we were due for rain until tomorrow?"
f "And we were having such a lovely time too..."
"Her eyes shift downwards while her tail and ears droop."
"It's understandable. One of her quirks is that she hates getting wet outside of showers and baths."
"Those, for some reason, she absolutely adores."
"Between that the obvious and it's no wonder she's anxious."
mc "Well I suppose we'd better head back then."
f "Yeah..."
show fia c_puppy with dissolve
"And there it is. Her infamous \"kicked puppy\" look."
"I know she's not doing it on purpose but it still makes me feel awful."
"I should probably try and get her mind off of it."
#Removed dialogue choice. Just go to the next scene.
jump tease
#We should never get here, but just in case...
return
#Scene where the MC teases Fia for being anxious about the rain
label tease:
"The quickest way to get her to perk up might be to rib her a little."
"A little tease there, a little sexual innuendo there..."
mc "Well look on the bright side: At least now you have an excuse to pounce me when we get back."
show fia c_neutral with dissolve
f "Huh?"
"I give her a wink."
mc "I may have been out cold but I could still tell you were groping at my..."
show fia c_pout with dissolve
f "I did no such thing!"
"And yet your tail is wagging. Seems I guessed right."
mc "Uh huh. It's kinda hard to miss your paws, Fia."
"She lets out a little whine in response. Curse her adorableness."
f "Okay fine. I maybe sorta thought about waking you up in a different way."
mc "Oh?"
show fia c_fire with dissolve
f "It involved forcing you up against the tree, tearing your clothes off, and..."
"I wave her off before she gets too fired up."
mc "Alright alright, I get it. Let's getting walking before you decide to change your mind and we get caught in the rain."
"With that I start walking down the road back towards our apartment. Fia's quick to catch up."
show fia c_smile with dissolve
f "That's why you were playing possum, weren't you?"
"It's my turn to be confused."
mc "Eh?"
f "You were secretly hoping I would wake you up like that~"
"I let out a playful sigh. I walked right into that one."
mc "You know how much I love it when you do."
show fia c_fire with dissolve
f "I'll have to do it more often then~"
mc "I'm going to have to start wearing a chastity belt to bed, aren't I?"
mc "Otherwise I'll never get any sleep."
show fia c_smile with dissolve
f "It wouldn't last five seconds."
"She winks at me when I turn to raise an eyebrow at her."
"It's her way of letting me know that if she's pushing too hard I can tell her."
"But honestly?"
"I don't mind in the slightest."
"I'm just happy she's not dwelling on the storm gathering behind us."
#Move to the Wet T-Shirt Scene
jump wetshirt
#The obligatory wet t-shirt scene
label wetshirt:
#Thunder sound here?
with vpunch
"Speaking of..."
"A crack of thunder peels across the land, stopping me from replying."
"Seconds later the windfront reaches us, carrying with it the first drops of rain."
"It seems the storm was closer than either of us thought."
show fia c_pout with dissolve
f "Ugh. We're going to get soaking wet..."
mc "Probably. But then again, you're always wet around me."
"That earns me a playful punch on the shoulder."
show fia c_smile with dissolve
f "Come on, we'd better hurry."
#Need to make a rain overlay of sorts
scene bg outside_rain with dissolve
show rain
"..."
"It's times like this that make me consider always carrying an umbrella around."
"Even my underwear is drenched. And not in the good way."
"We're still a good mile and a half away from our apartment too."
show fia cw_pout with dissolve
"Not that I mind in the slightest."
"Fia, on the other hand..."
f "Grrrrr..."
"I'm torn between wanting to laugh and to ogle her."
"I know she doesn't mind the latter. Hell she encourages it normally."
"The former, though, I should at least address. Even if it earns me another love tap."
mc "You know, Fia..."
f "What?"
"She sounds absolutely dejected. I hope this works."
mc "If I didn't know any better, I'd say you planned this whole thing from the start."
"It takes her a moment to realize I'm talking about her outfit."
show fia cw_smile with dissolve
f "Not really. But it's a nice bonus, I guess?"
mc "I'd say so."
f "Too bad the effect doesn't work so well on your clothes, though."
"I chuckle at that."
mc "First the groping, now this... I'm starting to wonder if I'm just a piece of meat to you."
"Again, it's all in good fun. And I make she knows for sure by winking."
"It seems to have the intended effect as wisps of flames begin to seep from the corners of her eyes."
"These aren't the flames of pure lust like from before, though."
"These are ones of passion and adoration."
show fia cw_fire with dissolve
f "Never. But even if you were a literal steak I'd love you all the same."
mc "Plus, if I were, you'd have more fun eating me."
"She stops, dead in her tracks. I follow suit shortly thereafter."
f "I don't think I'll ever get tired of devouring you, hun."
mc "And I you, dear."
"For a moment we just stare at each other in silence."
"We're both trying not to laugh at our sappiness."
"There's always so much sexual tension that sometimes we need moments like this for perspective."
"Luckily for me, Fia cracks first."
show fia cw_fire_smile with dissolve
f "Maybe I should get caught in the rain more often~"
"I can't resist reaching out and pulling her close."
#zoom in
"Both of our clothes makes a sort of squelching noise as the wet fabrics rub against each other."
"Even through the chilling rain, I can feel Fia's swelling body heat as she presses against me."
mc "I certainly wouldn't mind."
"We look into each others eyes. Then, at the same time, our lips meet in midair."
"It's a kiss of affection. Short, sweet, and just long enough to mean something."
"Though, for all it matters, we could remain like this for hours. Rain and all."
"Fia's the first to pull away."
#zoom out
show fia cw_fire with dissolve
f "I hope you know what you've started~"
"I let out a hearty laugh."
mc "Of course I do. And I know you know I do because your hand wandered again."
"It's true. My fly seems to be suspiciously down."
show fia cw_pout with dissolve
f "Oh I am SO going to tear into you when we get home."
"I wink at her and start walking again."
mc "We'd better stop flirting shamelessly and hurry then."
show fia cw_smile with dissolve
"As we plod onwards, Fia whispers something under her breath."
"I don't quite catch all of it though."
f "...so hard~"
jump apartment
#MC & Fia enter the apartment. They quickly strip and hit the shower.
label apartment:
scene bg apartment with fade
"We arrive at our apartment some ten minutes later."
"Seemingly just in time too, as the rain starts coming down even harder."
show fia cw_smile with dissolve
f "I guess it's a good thing we didn't do it out there."
mc "Yeah, we'd probably be drowning right about now if we did."
"She flashes a grin at me, then strips off her wet clothes."
show fia nw_smile with dissolve
f "Come on. Before we get too riled up let's at least try to wash up."
"I follow her example and ditch my soggy, drenched coverings."
"Now both nude, we both take a moment to drink the other in. Fia even bites her lip in anticipation."
show fia nw_bite with dissolve
mc "So about that shower..."
f "Huh...?"
"I repeat myself. Which is luckily all it takes to snap her out of it."
show fia nw_fire with dissolve
f "Oh, right. Come on, you~"
"She turns deftly on her heels and starts leading the way to the bathroom."
"All along the way she puts extra emphasis into her hip movements."
"Even her tail gets into the seductive action: swishing side to side to accent her stride."
"...have I mentioned how lucky I am yet?"
scene bg shower with fade
#Originally we had a choice here. But fuck that noise, Fia's switch has been flipped!
jump shower_sex
#Soapy fun times followed by some slippery sex
label shower_sex:
show fia nws_smile with dissolve
f "Hurry up and write this so I can play with my bone!"
jump post_shower
#After the shower scene
label post_shower:
show fia s_bite with dissolve
f "The sooner you write this the sooner I can love you tenderly in our fort~"
return
|
ELHmk1/TouchFluffyTail
|
HellhoundVN/game/scripts/Old Script.py
|
Python
|
gpl-3.0
| 13,460
|
[
"CRYSTAL"
] |
b24c5c7a0452e3d4f560d1a050c93abfde4169ad3680358c85165a9e70b8ac22
|
__author__ = 'zeek'
import time
import notify2
from splinter import Browser
import html
browser = Browser()
browser.visit('http://www.douyutv.com/zeek')
last_list = 0
last_yuwan = 0
while True:
if last_list % 20 == 10:
browser.reload()
time.sleep(2)
try:
element_list_chat = browser.find_by_xpath('//span[@class="text_cont"]')
element_list_uid = browser.find_by_xpath('//a[@class="nick js_nick"]')
element_list_yuwan = browser.find_by_xpath('//p[@class="text_cont"]/i')
except Exception as err:
print(err)
continue
if element_list_uid and last_list != len(element_list_uid):
if last_yuwan != len(element_list_yuwan):
element_list_chat.append('送了' + element_list_yuwan[-1].html + '鱼丸')
last_yuwan += 1
else:
for i in range(last_yuwan):
element_list_chat.insert(0, 0)
print(element_list_chat[last_list].html)
notify2.init("douyu")
header = html.unescape(element_list_uid[last_list].html)
content = html.unescape(element_list_chat[last_list].html)
n = notify2.Notification(header, content)
n.show()
last_list += 1
else:
print('empty')
|
zephyrzoom/douyu
|
test/splinter_t.py
|
Python
|
mit
| 1,248
|
[
"VisIt"
] |
92488c0e65943c1b92fc70676e0faf67ae730efd23f131aad6562744b3ef93d0
|
# -*- coding: utf-8 -*-
"""
This script opens data files and extracts relevant data. Then, using a sklearn
gaussian process package, fits a gaussian to the crosswind (1d) temperature
measurements and interpolates temparture values (relative, not absolute) at a
2 mm interval.
Created on Mon Dec 01 11:51:44 2014
@authors: Sharri
"""
from scipy.optimize import curve_fit
from sklearn import gaussian_process#,grid_search,svm
import numpy as np
import matplotlib.pyplot as plt
#SECTION 1 (not sure if this should include these imports)
import scipy.io as io
import glob
allfiles = glob.glob(r'C:\Users\Sharri\Dropbox\Le grand dossier du Sharri\Data\Temperature Data\python temp data\*csv')
datafiles = []
for i in range(len(allfiles)):
datafiles.append(np.genfromtxt(allfiles[i],delimiter = ','))
#load data
xyz_observed = datafiles[5]#np.genfromtxt('lh_temp_data_complete.csv', delimiter = ' ')
T_raw = datafiles[7] #np.genfromtxt('lh50temp_raw_data.csv',delimiter = ' ')
T_sd = np.nanstd(T_raw,axis=0)
#move origin to corner, this may be causing praaaablems.
xyz_observed[:,1] = xyz_observed[:,1]+0.127;
if np.any(np.isnan(xyz_observed[:,3])):
NaN_locations = np.where(np.isnan(xyz_observed[:,3])==1) #find the indices of Nans
# T_time_avg_3d = np.delete(T_time_avg_3d, NaN_locations)
xyz_observed = np.delete(xyz_observed, NaN_locations, axis = 0)
T_raw = np.delete(T_raw, NaN_locations,axis = 1)
T_sd = np.delete(T_sd, NaN_locations)
#SECTION 3
#TODO: move everything below this to a function and/or separate script
#calculate noise (required)
nugget = T_sd/xyz_observed[:,3]
sweep = xyz_observed;#data to train gridsearch
check = np.arange(100,200)
sweep = np.delete(sweep, check, axis = 0) #remove some points to test values
nugget = np.delete(nugget, check)
#TODO: make into function
gp = gaussian_process.GaussianProcess(regr = 'linear',
corr = 'squared_exponential',
theta0 = 4e-2,
thetaL = 1e-4, #best = 1e-6
thetaU = 1, #best = 1
normalize = True,
nugget = nugget)
#when height = 1, thetaL = .1, thetaU = .3
#when height = 0, thetaL = 10e-2,thetaU = .3
#
#param_grid = [{'thetaL':[1e-10 1e-8 1e-]
#
#gp = grid_search.ParameterGrid()
#gp.fit(xyz_observed[:,:3], xyz_observed[:,3])
gp.fit(sweep[:,:3],sweep[:,3])
#make prediction location grid
#x_predict = np.atleast_2d(np.linspace(.20, .900, 20)) #(-0.127, 0.127, 25)) #2 mm prediction sites
#y_predict = np.atleast_2d(np.linspace(0., 0.254, 25))
#z_predict = np.atleast_2d(np.linspace(.1, .240, 25))
x_predict = np.atleast_2d(xyz_observed[check,0])
y_predict = np.atleast_2d(xyz_observed[check,1])
z_predict = np.atleast_2d(xyz_observed[check,2])
#x1,x2,x3 = np.meshgrid(x_predict, y_predict, z_predict) #for proper ordering use z-y-x
#xyz_predict = np.vstack([x1.reshape(x1.size), x2.reshape(x2.size), x3.reshape(x3.size)]).T
T_prediction, y_prediction_MSE = gp.predict(xyz_observed[check, :3], eval_MSE = True) #produce predicted y values
sigma = np.sqrt(y_prediction_MSE) #get SD of fit at each x_predicted location (for confidence interval)
plt.figure, plt.plot(xyz_observed[check,3], T_prediction,'.')
plt.plot(np.arange(19,25),np.arange(19,25),'k')
plt.xlabel('Observed Temp')
plt.ylabel('Predicted Temp')
#predicted_draw = np.random.normal(T_prediction, sigma)
|
sazamore/GP-temperature-interpolation
|
gptempdata-3d.py
|
Python
|
mit
| 3,540
|
[
"Gaussian"
] |
bd18ce3925c30140974d604a425696cf058c45a5779700dea9daed13ae833fb2
|
import numpy as np
from flatdict import FlatDict
from xml.etree import ElementTree as ET
from openpnm.io import GenericIO, Dict
from openpnm.utils import logging, Workspace
logger = logging.getLogger(__name__)
ws = Workspace()
class VTK(GenericIO):
r"""
The Visualization Toolkit (VTK) format defined by Kitware and used by
Paraview.
Notes
-----
Because OpenPNM data is unstructured, the actual output format is VTP,
not VTK.
"""
_TEMPLATE = """
<?xml version="1.0" ?>
<VTKFile byte_order="LittleEndian" type="PolyData" version="0.1">
<PolyData>
<Piece NumberOfLines="0" NumberOfPoints="0">
<Points>
</Points>
<Lines>
</Lines>
<PointData>
</PointData>
<CellData>
</CellData>
</Piece>
</PolyData>
</VTKFile>
""".strip()
@classmethod
def export_data(cls, network, phases=[], filename="", delim=" | ",
fill_nans=None, fill_infs=None):
r"""
Save network and phase data to a single vtp file for visualizing in
Paraview.
Parameters
----------
network : GenericNetwork
The Network containing the data to be written
phases : list, optional
A list containing GenericPhase(s) containing data to be
written
filename : str, optional
Filename to write data. If no name is given the file is named
after the network
delim : str
Specify which character is used to delimit the data names. The
default is ' | ' which creates a nice clean output in the Paraview
pipeline viewer (e.g. net | property | pore | diameter)
fill_nans : scalar
The value to use to replace NaNs with. The VTK file format does
not work with NaNs, so they must be dealt with. The default is
`None` which means property arrays with NaNs are not written to the
file. Other useful options might be 0 or -1, but the user must
be aware that these are not real values, only place holders.
fill_infs : scalar
The value to use to replace infs with. The default is ``None``
which means that property arrays containing ``None`` will *not*
be written to the file, and a warning will be issued. A useful
value is
"""
project, network, phases = cls._parse_args(network=network, phases=phases)
# Check if any of the phases has time series
transient = GenericIO._is_transient(phases=phases)
if transient:
logger.warning(
"vtp format does not support transient data, " + "use xdmf instead"
)
if filename == "":
filename = project.name
filename = cls._parse_filename(filename=filename, ext="vtp")
am = Dict.to_dict(
network=network,
phases=phases,
interleave=True,
categorize_by=["object", "data"],
)
am = FlatDict(am, delimiter=delim)
key_list = list(sorted(am.keys()))
network = network[0]
points = network["pore.coords"]
pairs = network["throat.conns"]
num_points = np.shape(points)[0]
num_throats = np.shape(pairs)[0]
root = ET.fromstring(VTK._TEMPLATE)
piece_node = root.find("PolyData").find("Piece")
piece_node.set("NumberOfPoints", str(num_points))
piece_node.set("NumberOfLines", str(num_throats))
points_node = piece_node.find("Points")
coords = VTK._array_to_element("coords", points.T.ravel("F"), n=3)
points_node.append(coords)
lines_node = piece_node.find("Lines")
connectivity = VTK._array_to_element("connectivity", pairs)
lines_node.append(connectivity)
offsets = VTK._array_to_element("offsets", 2 * np.arange(len(pairs)) + 2)
lines_node.append(offsets)
point_data_node = piece_node.find("PointData")
cell_data_node = piece_node.find("CellData")
for key in key_list:
array = am[key]
if array.dtype == "O":
logger.warning(key + " has dtype object," + " will not write to file")
else:
if array.dtype == np.bool:
array = array.astype(int)
if np.any(np.isnan(array)):
if fill_nans is None:
logger.warning(key + " has nans," + " will not write to file")
continue
else:
array[np.isnan(array)] = fill_nans
if np.any(np.isinf(array)):
if fill_infs is None:
logger.warning(key + " has infs," + " will not write to file")
continue
else:
array[np.isinf(array)] = fill_infs
element = VTK._array_to_element(key, array)
if array.size == num_points:
point_data_node.append(element)
elif array.size == num_throats:
cell_data_node.append(element)
tree = ET.ElementTree(root)
tree.write(filename)
with open(filename, "r+") as f:
string = f.read()
string = string.replace("</DataArray>", "</DataArray>\n\t\t\t")
f.seek(0)
# consider adding header: '<?xml version="1.0"?>\n'+
f.write(string)
@classmethod
def import_data(cls, filename, project=None, delim=" | "):
r"""
Read in pore and throat data from a saved VTK file.
Parameters
----------
filename : str (optional)
The name of the file containing the data to import. The formatting
of this file is outlined below.
project : Project
A GenericNetwork is created and added to the specified Project.
If no Project is supplied then one will be created and returned.
"""
net = {}
filename = cls._parse_filename(filename, ext="vtp")
tree = ET.parse(filename)
piece_node = tree.find("PolyData").find("Piece")
# Extract connectivity
conn_element = piece_node.find("Lines").find("DataArray")
conns = VTK._element_to_array(conn_element, 2)
# Extract coordinates
coord_element = piece_node.find("Points").find("DataArray")
coords = VTK._element_to_array(coord_element, 3)
# Extract pore data
for item in piece_node.find("PointData").iter("DataArray"):
key = item.get("Name")
array = VTK._element_to_array(item)
net[key] = array
# Extract throat data
for item in piece_node.find("CellData").iter("DataArray"):
key = item.get("Name")
array = VTK._element_to_array(item)
net[key] = array
if project is None:
project = ws.new_project()
project = Dict.from_dict(dct=net, project=project, delim=delim)
# Clean up data values, if necessary, like convert array's of
# 1's and 0's into boolean.
project = cls._convert_data(project)
# Add coords and conns to network
network = project.network
network.update({"throat.conns": conns})
network.update({"pore.coords": coords})
return project
@classmethod
def _array_to_element(cls, name, array, n=1):
dtype_map = {
"int8": "Int8",
"int16": "Int16",
"int32": "Int32",
"int64": "Int64",
"uint8": "UInt8",
"uint16": "UInt16",
"uint32": "UInt32",
"uint64": "UInt64",
"float32": "Float32",
"float64": "Float64",
"str": "String",
}
element = None
if str(array.dtype) in dtype_map.keys():
element = ET.Element("DataArray")
element.set("Name", name)
element.set("NumberOfComponents", str(n))
element.set("type", dtype_map[str(array.dtype)])
element.text = "\t".join(map(str, array.ravel()))
return element
@classmethod
def _element_to_array(cls, element, n=1):
string = element.text
dtype = element.get("type")
array = np.fromstring(string, sep="\t")
array = array.astype(dtype.lower())
if n != 1:
array = array.reshape(array.size // n, n)
return array
def from_vtk(filename, project=None, delim=' | '):
project = VTK.import_data(filename=filename, project=project, delim=delim)
return project
from_vtk.__doc__ = VTK.import_data.__doc__
def to_vtk(network, phases=[], filename="", delim=" | ",
fill_nans=None, fill_infs=None):
VTK.export_data(network=network, phases=phases, filename=filename,
delim=delim, fill_nans=fill_nans, fill_infs=fill_infs)
to_vtk.__doc__ = VTK.export_data.__doc__
|
PMEAL/OpenPNM
|
openpnm/io/_vtk.py
|
Python
|
mit
| 9,179
|
[
"ParaView",
"VTK"
] |
f5fe0b8be2a3fae6b1d9f1adf95b4fd388eaf53608a4854705c4c2e332874c46
|
# -*- coding: utf-8 -*-
"""
Bok choy acceptance tests for problems in the LMS
See also old lettuce tests in lms/djangoapps/courseware/features/problems.feature
"""
from nose.plugins.attrib import attr
from textwrap import dedent
from common.test.acceptance.tests.helpers import UniqueCourseTest
from common.test.acceptance.pages.studio.auto_auth import AutoAuthPage
from common.test.acceptance.pages.lms.courseware import CoursewarePage
from common.test.acceptance.pages.lms.problem import ProblemPage
from common.test.acceptance.pages.lms.login_and_register import CombinedLoginAndRegisterPage
from common.test.acceptance.fixtures.course import CourseFixture, XBlockFixtureDesc
from common.test.acceptance.tests.helpers import EventsTestMixin
class ProblemsTest(UniqueCourseTest):
"""
Base class for tests of problems in the LMS.
"""
def setUp(self):
super(ProblemsTest, self).setUp()
self.username = "test_student_{uuid}".format(uuid=self.unique_id[0:8])
self.email = "{username}@example.com".format(username=self.username)
self.password = "keep it secret; keep it safe."
self.xqueue_grade_response = None
self.courseware_page = CoursewarePage(self.browser, self.course_id)
# Install a course with a hierarchy and problems
course_fixture = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
problem = self.get_problem()
sequential = self.get_sequential()
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
sequential.add_children(problem)
)
).install()
# Auto-auth register for the course.
AutoAuthPage(
self.browser,
username=self.username,
email=self.email,
password=self.password,
course_id=self.course_id,
staff=True
).visit()
def get_problem(self):
""" Subclasses should override this to complete the fixture """
raise NotImplementedError()
def get_sequential(self):
""" Subclasses can override this to add a sequential with metadata """
return XBlockFixtureDesc('sequential', 'Test Subsection')
@attr(shard=9)
class ProblemClarificationTest(ProblemsTest):
"""
Tests the <clarification> element that can be used in problem XML.
"""
def get_problem(self):
"""
Create a problem with a <clarification>
"""
xml = dedent("""
<problem markdown="null">
<text>
<p>
Given the data in Table 7 <clarification>Table 7: "Example PV Installation Costs",
Page 171 of Roberts textbook</clarification>, compute the ROI
<clarification>Return on Investment <strong>(per year)</strong></clarification> over 20 years.
</p>
<numericalresponse answer="6.5">
<label>Enter the annual ROI</label>
<textline trailing_text="%" />
</numericalresponse>
</text>
</problem>
""")
return XBlockFixtureDesc('problem', 'TOOLTIP TEST PROBLEM', data=xml)
def test_clarification(self):
"""
Test that we can see the <clarification> tooltips.
"""
self.courseware_page.visit()
problem_page = ProblemPage(self.browser)
self.assertEqual(problem_page.problem_name, 'TOOLTIP TEST PROBLEM')
problem_page.click_clarification(0)
self.assertIn('"Example PV Installation Costs"', problem_page.visible_tooltip_text)
problem_page.click_clarification(1)
tooltip_text = problem_page.visible_tooltip_text
self.assertIn('Return on Investment', tooltip_text)
self.assertIn('per year', tooltip_text)
self.assertNotIn('strong', tooltip_text)
@attr(shard=9)
class ProblemHintTest(ProblemsTest, EventsTestMixin):
"""
Base test class for problem hint tests.
"""
def verify_check_hint(self, answer, answer_text, expected_events):
"""
Verify clicking Check shows the extended hint in the problem message.
"""
self.courseware_page.visit()
problem_page = ProblemPage(self.browser)
self.assertEqual(problem_page.problem_text[0], u'question text')
problem_page.fill_answer(answer)
problem_page.click_submit()
self.assertEqual(problem_page.message_text, answer_text)
# Check for corresponding tracking event
actual_events = self.wait_for_events(
event_filter={'event_type': 'edx.problem.hint.feedback_displayed'},
number_of_matches=1
)
self.assert_events_match(expected_events, actual_events)
def verify_demand_hints(self, first_hint, second_hint, expected_events):
"""
Test clicking through the demand hints and verify the events sent.
"""
self.courseware_page.visit()
problem_page = ProblemPage(self.browser)
# The hint notification should not be visible on load
self.assertFalse(problem_page.is_hint_notification_visible())
# The two Hint button should be enabled. One visible, one present, but not visible in the DOM
self.assertEqual([None, None], problem_page.get_hint_button_disabled_attr())
# The hint button rotates through multiple hints
problem_page.click_hint()
self.assertTrue(problem_page.is_hint_notification_visible())
self.assertEqual(problem_page.hint_text, first_hint)
# Now there are two "hint" buttons, as there is also one in the hint notification.
self.assertEqual([None, None], problem_page.get_hint_button_disabled_attr())
problem_page.click_hint()
self.assertEqual(problem_page.hint_text, second_hint)
# Now both "hint" buttons should be disabled, as there are no more hints.
self.assertEqual(['true', 'true'], problem_page.get_hint_button_disabled_attr())
# Now click on "Review" and make sure the focus goes to the correct place.
problem_page.click_review_in_notification(notification_type='hint')
problem_page.wait_for_focus_on_problem_meta()
# Check corresponding tracking events
actual_events = self.wait_for_events(
event_filter={'event_type': 'edx.problem.hint.demandhint_displayed'},
number_of_matches=2
)
self.assert_events_match(expected_events, actual_events)
def get_problem(self):
""" Subclasses should override this to complete the fixture """
raise NotImplementedError()
@attr(shard=9)
class ProblemNotificationTests(ProblemsTest):
"""
Tests that the notifications are visible when expected.
"""
def get_problem(self):
"""
Problem structure.
"""
xml = dedent("""
<problem>
<label>Which of the following countries has the largest population?</label>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice">
<choice correct="false">Brazil <choicehint>timely feedback -- explain why an almost correct answer is wrong</choicehint></choice>
<choice correct="false">Germany</choice>
<choice correct="true">Indonesia</choice>
<choice correct="false">Russia</choice>
</choicegroup>
</multiplechoiceresponse>
</problem>
""")
return XBlockFixtureDesc('problem', 'TEST PROBLEM', data=xml,
metadata={'max_attempts': 10},
grader_type='Final Exam')
def test_notification_updates(self):
"""
Verifies that the notification is removed and not visible when it should be
"""
self.courseware_page.visit()
problem_page = ProblemPage(self.browser)
problem_page.click_choice("choice_2")
self.assertFalse(problem_page.is_success_notification_visible())
problem_page.click_submit()
problem_page.wait_success_notification()
self.assertEqual('Question 1: correct', problem_page.status_sr_text)
# Clicking Save should clear the submit notification
problem_page.click_save()
self.assertFalse(problem_page.is_success_notification_visible())
problem_page.wait_for_save_notification()
# Changing the answer should clear the save notification
problem_page.click_choice("choice_1")
self.assertFalse(problem_page.is_save_notification_visible())
problem_page.click_save()
problem_page.wait_for_save_notification()
# Submitting the problem again should clear the save notification
problem_page.click_submit()
problem_page.wait_incorrect_notification()
self.assertEqual('Question 1: incorrect', problem_page.status_sr_text)
self.assertFalse(problem_page.is_save_notification_visible())
@attr(shard=9)
class ProblemFeedbackNotificationTests(ProblemsTest):
"""
Tests that the feedback notifications are visible when expected.
"""
def get_problem(self):
"""
Problem structure.
"""
xml = dedent("""
<problem>
<label>Which of the following countries has the largest population?</label>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice">
<choice correct="false">Brazil <choicehint>timely feedback -- explain why an almost correct answer is wrong</choicehint></choice>
<choice correct="false">Germany</choice>
<choice correct="true">Indonesia</choice>
<choice correct="false">Russia</choice>
</choicegroup>
</multiplechoiceresponse>
</problem>
""")
return XBlockFixtureDesc('problem', 'TEST PROBLEM', data=xml,
metadata={'max_attempts': 10},
grader_type='Final Exam')
def test_feedback_notification_hides_after_save(self):
self.courseware_page.visit()
problem_page = ProblemPage(self.browser)
problem_page.click_choice("choice_0")
problem_page.click_submit()
problem_page.wait_for_feedback_message_visibility()
problem_page.click_choice("choice_1")
problem_page.click_save()
self.assertFalse(problem_page.is_feedback_message_notification_visible())
@attr(shard=9)
class ProblemSaveStatusUpdateTests(ProblemsTest):
"""
Tests the problem status updates correctly with an answer change and save.
"""
def get_problem(self):
"""
Problem structure.
"""
xml = dedent("""
<problem>
<label>Which of the following countries has the largest population?</label>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice">
<choice correct="false">Brazil <choicehint>timely feedback -- explain why an almost correct answer is wrong</choicehint></choice>
<choice correct="false">Germany</choice>
<choice correct="true">Indonesia</choice>
<choice correct="false">Russia</choice>
</choicegroup>
</multiplechoiceresponse>
</problem>
""")
return XBlockFixtureDesc('problem', 'TEST PROBLEM', data=xml,
metadata={'max_attempts': 10},
grader_type='Final Exam')
def test_status_removed_after_save_before_submit(self):
"""
Scenario: User should see the status removed when saving after submitting an answer and reloading the page.
Given that I have loaded the problem page
And a choice has been selected and submitted
When I change the choice
And Save the problem
And reload the problem page
Then I should see the save notification and I should not see any indication of problem status
"""
self.courseware_page.visit()
problem_page = ProblemPage(self.browser)
problem_page.click_choice("choice_1")
problem_page.click_submit()
problem_page.wait_incorrect_notification()
problem_page.wait_for_expected_status('label.choicegroup_incorrect', 'incorrect')
problem_page.click_choice("choice_2")
self.assertFalse(problem_page.is_expected_status_visible('label.choicegroup_incorrect'))
problem_page.click_save()
problem_page.wait_for_save_notification()
# Refresh the page and the status should not be added
self.courseware_page.visit()
self.assertFalse(problem_page.is_expected_status_visible('label.choicegroup_incorrect'))
self.assertTrue(problem_page.is_save_notification_visible())
@attr(shard=9)
class ProblemSubmitButtonMaxAttemptsTest(ProblemsTest):
"""
Tests that the Submit button disables after the number of max attempts is reached.
"""
def get_problem(self):
"""
Problem structure.
"""
xml = dedent("""
<problem>
<label>Which of the following countries has the largest population?</label>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice">
<choice correct="false">Brazil <choicehint>timely feedback -- explain why an almost correct answer is wrong</choicehint></choice>
<choice correct="false">Germany</choice>
<choice correct="true">Indonesia</choice>
<choice correct="false">Russia</choice>
</choicegroup>
</multiplechoiceresponse>
</problem>
""")
return XBlockFixtureDesc('problem', 'TEST PROBLEM', data=xml,
metadata={'max_attempts': 2},
grader_type='Final Exam')
def test_max_attempts(self):
"""
Verifies that the Submit button disables when the max number of attempts is reached.
"""
self.courseware_page.visit()
problem_page = ProblemPage(self.browser)
# Submit first answer (correct)
problem_page.click_choice("choice_2")
self.assertFalse(problem_page.is_submit_disabled())
problem_page.click_submit()
problem_page.wait_success_notification()
# Submit second and final answer (incorrect)
problem_page.click_choice("choice_1")
problem_page.click_submit()
problem_page.wait_incorrect_notification()
# Make sure that the Submit button disables.
problem_page.wait_for_submit_disabled()
@attr(shard=9)
class ProblemSubmitButtonPastDueTest(ProblemsTest):
"""
Tests that the Submit button is disabled if it is past the due date.
"""
def get_problem(self):
"""
Problem structure.
"""
xml = dedent("""
<problem>
<label>Which of the following countries has the largest population?</label>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice">
<choice correct="false">Brazil <choicehint>timely feedback -- explain why an almost correct answer is wrong</choicehint></choice>
<choice correct="false">Germany</choice>
<choice correct="true">Indonesia</choice>
<choice correct="false">Russia</choice>
</choicegroup>
</multiplechoiceresponse>
</problem>
""")
return XBlockFixtureDesc('problem', 'TEST PROBLEM', data=xml,
metadata={'max_attempts': 2},
grader_type='Final Exam')
def get_sequential(self):
""" Subclasses can override this to add a sequential with metadata """
return XBlockFixtureDesc('sequential', 'Test Subsection', metadata={'due': "2016-10-01T00"})
def test_past_due(self):
"""
Verifies that the Submit button disables when the max number of attempts is reached.
"""
self.courseware_page.visit()
problem_page = ProblemPage(self.browser)
# Should have Submit button disabled on original rendering.
problem_page.wait_for_submit_disabled()
# Select a choice, and make sure that the Submit button remains disabled.
problem_page.click_choice("choice_2")
problem_page.wait_for_submit_disabled()
@attr(shard=9)
class ProblemExtendedHintTest(ProblemHintTest, EventsTestMixin):
"""
Test that extended hint features plumb through to the page html and tracking log.
"""
def get_problem(self):
"""
Problem with extended hint features.
"""
xml = dedent("""
<problem>
<p>question text</p>
<stringresponse answer="A">
<stringequalhint answer="B">hint</stringequalhint>
<textline size="20"/>
</stringresponse>
<demandhint>
<hint>demand-hint1</hint>
<hint>demand-hint2</hint>
</demandhint>
</problem>
""")
return XBlockFixtureDesc('problem', 'TITLE', data=xml)
def test_check_hint(self):
"""
Test clicking Check shows the extended hint in the problem message.
"""
self.verify_check_hint(
'B',
u'Answer\nIncorrect: hint',
[
{
'event':
{
'hint_label': u'Incorrect:',
'trigger_type': 'single',
'student_answer': [u'B'],
'correctness': False,
'question_type': 'stringresponse',
'hints': [{'text': 'hint'}]
}
}
]
)
def test_demand_hint(self):
"""
Test clicking hint button shows the demand hint in its div.
"""
self.verify_demand_hints(
u'Hint (1 of 2): demand-hint1',
u'Hint (1 of 2): demand-hint1\nHint (2 of 2): demand-hint2',
[
{'event': {u'hint_index': 0, u'hint_len': 2, u'hint_text': u'demand-hint1'}},
{'event': {u'hint_index': 1, u'hint_len': 2, u'hint_text': u'demand-hint2'}}
]
)
@attr(shard=9)
class ProblemHintWithHtmlTest(ProblemHintTest, EventsTestMixin):
"""
Tests that hints containing html get rendered properly
"""
def get_problem(self):
"""
Problem with extended hint features.
"""
xml = dedent("""
<problem>
<p>question text</p>
<stringresponse answer="A">
<stringequalhint answer="C"><a href="#">aa bb</a> cc</stringequalhint>
<textline size="20"/>
</stringresponse>
<demandhint>
<hint>aa <a href="#">bb</a> cc</hint>
<hint><a href="#">dd ee</a> ff</hint>
</demandhint>
</problem>
""")
return XBlockFixtureDesc('problem', 'PROBLEM HTML HINT TEST', data=xml)
def test_check_hint(self):
"""
Test clicking Check shows the extended hint in the problem message.
"""
self.verify_check_hint(
'C',
u'Answer\nIncorrect: aa bb cc',
[
{
'event':
{
'hint_label': u'Incorrect:',
'trigger_type': 'single',
'student_answer': [u'C'],
'correctness': False,
'question_type': 'stringresponse',
'hints': [{'text': '<a href="#">aa bb</a> cc'}]
}
}
]
)
def test_demand_hint(self):
"""
Test clicking hint button shows the demand hints in a notification area.
"""
self.verify_demand_hints(
u'Hint (1 of 2): aa bb cc',
u'Hint (1 of 2): aa bb cc\nHint (2 of 2): dd ee ff',
[
{'event': {u'hint_index': 0, u'hint_len': 2, u'hint_text': u'aa <a href="#">bb</a> cc'}},
{'event': {u'hint_index': 1, u'hint_len': 2, u'hint_text': u'<a href="#">dd ee</a> ff'}}
]
)
@attr(shard=9)
class ProblemWithMathjax(ProblemsTest):
"""
Tests the <MathJax> used in problem
"""
def get_problem(self):
"""
Create a problem with a <MathJax> in body and hint
"""
xml = dedent(r"""
<problem>
<p>Check mathjax has rendered [mathjax]E=mc^2[/mathjax]</p>
<multiplechoiceresponse>
<label>Answer this?</label>
<choicegroup type="MultipleChoice">
<choice correct="true">Choice1 <choicehint>Correct choice message</choicehint></choice>
<choice correct="false">Choice2<choicehint>Wrong choice message</choicehint></choice>
</choicegroup>
</multiplechoiceresponse>
<demandhint>
<hint>mathjax should work1 \(E=mc^2\) </hint>
<hint>mathjax should work2 [mathjax]E=mc^2[/mathjax]</hint>
</demandhint>
</problem>
""")
return XBlockFixtureDesc('problem', 'MATHJAX TEST PROBLEM', data=xml)
def test_mathjax_in_hint(self):
"""
Test that MathJax have successfully rendered in problem hint
"""
self.courseware_page.visit()
problem_page = ProblemPage(self.browser)
self.assertEqual(problem_page.problem_name, "MATHJAX TEST PROBLEM")
problem_page.verify_mathjax_rendered_in_problem()
# The hint button rotates through multiple hints
problem_page.click_hint()
self.assertEqual(
["<strong>Hint (1 of 2): </strong>mathjax should work1"],
problem_page.extract_hint_text_from_html
)
problem_page.verify_mathjax_rendered_in_hint()
# Rotate the hint and check the problem hint
problem_page.click_hint()
self.assertEqual(
[
"<strong>Hint (1 of 2): </strong>mathjax should work1",
"<strong>Hint (2 of 2): </strong>mathjax should work2"
],
problem_page.extract_hint_text_from_html
)
problem_page.verify_mathjax_rendered_in_hint()
@attr(shard=9)
class ProblemPartialCredit(ProblemsTest):
"""
Makes sure that the partial credit is appearing properly.
"""
def get_problem(self):
"""
Create a problem with partial credit.
"""
xml = dedent("""
<problem>
<p>The answer is 1. Partial credit for -1.</p>
<numericalresponse answer="1" partial_credit="list">
<label>How many miles away from Earth is the sun? Use scientific notation to answer.</label>
<formulaequationinput/>
<responseparam type="tolerance" default="0.01" />
<responseparam partial_answers="-1" />
</numericalresponse>
</problem>
""")
return XBlockFixtureDesc('problem', 'PARTIAL CREDIT TEST PROBLEM', data=xml)
def test_partial_credit(self):
"""
Test that we can see the partial credit value and feedback.
"""
self.courseware_page.visit()
problem_page = ProblemPage(self.browser)
self.assertEqual(problem_page.problem_name, 'PARTIAL CREDIT TEST PROBLEM')
problem_page.fill_answer_numerical('-1')
problem_page.click_submit()
problem_page.wait_for_status_icon()
self.assertTrue(problem_page.simpleprob_is_partially_correct())
@attr(shard=9)
class LogoutDuringAnswering(ProblemsTest):
"""
Tests for the scenario where a user is logged out (their session expires
or is revoked) just before they click "check" on a problem.
"""
def get_problem(self):
"""
Create a problem.
"""
xml = dedent("""
<problem>
<numericalresponse answer="1">
<label>The answer is 1</label>
<formulaequationinput/>
<responseparam type="tolerance" default="0.01" />
</numericalresponse>
</problem>
""")
return XBlockFixtureDesc('problem', 'TEST PROBLEM', data=xml)
def log_user_out(self):
"""
Log the user out by deleting their session cookie.
"""
self.browser.delete_cookie('sessionid')
def test_logout_after_click_redirect(self):
"""
1) User goes to a problem page.
2) User fills out an answer to the problem.
3) User is logged out because their session id is invalidated or removed.
4) User clicks "check", and sees a confirmation modal asking them to
re-authenticate, since they've just been logged out.
5) User clicks "ok".
6) User is redirected to the login page.
7) User logs in.
8) User is redirected back to the problem page they started out on.
9) User is able to submit an answer
"""
self.courseware_page.visit()
problem_page = ProblemPage(self.browser)
self.assertEqual(problem_page.problem_name, 'TEST PROBLEM')
problem_page.fill_answer_numerical('1')
self.log_user_out()
with problem_page.handle_alert(confirm=True):
problem_page.click_submit()
login_page = CombinedLoginAndRegisterPage(self.browser)
login_page.wait_for_page()
login_page.login(self.email, self.password)
problem_page.wait_for_page()
self.assertEqual(problem_page.problem_name, 'TEST PROBLEM')
problem_page.fill_answer_numerical('1')
problem_page.click_submit()
self.assertTrue(problem_page.simpleprob_is_correct())
def test_logout_cancel_no_redirect(self):
"""
1) User goes to a problem page.
2) User fills out an answer to the problem.
3) User is logged out because their session id is invalidated or removed.
4) User clicks "check", and sees a confirmation modal asking them to
re-authenticate, since they've just been logged out.
5) User clicks "cancel".
6) User is not redirected to the login page.
"""
self.courseware_page.visit()
problem_page = ProblemPage(self.browser)
self.assertEqual(problem_page.problem_name, 'TEST PROBLEM')
problem_page.fill_answer_numerical('1')
self.log_user_out()
with problem_page.handle_alert(confirm=False):
problem_page.click_submit()
problem_page.wait_for_page()
self.assertEqual(problem_page.problem_name, 'TEST PROBLEM')
@attr(shard=9)
class ProblemQuestionDescriptionTest(ProblemsTest):
"""TestCase Class to verify question and description rendering."""
descriptions = [
"A vegetable is an edible part of a plant in tuber form.",
"A fruit is a fertilized ovary of a plant and contains seeds."
]
def get_problem(self):
"""
Create a problem with question and description.
"""
xml = dedent("""
<problem>
<choiceresponse>
<label>Eggplant is a _____?</label>
<description>{}</description>
<description>{}</description>
<checkboxgroup>
<choice correct="true">vegetable</choice>
<choice correct="false">fruit</choice>
</checkboxgroup>
</choiceresponse>
</problem>
""".format(*self.descriptions))
return XBlockFixtureDesc('problem', 'Label with Description', data=xml)
def test_question_with_description(self):
"""
Scenario: Test that question and description are rendered as expected.
Given I am enrolled in a course.
When I visit a unit page with a CAPA question.
Then label and description should be rendered correctly.
"""
self.courseware_page.visit()
problem_page = ProblemPage(self.browser)
self.assertEqual(problem_page.problem_name, 'Label with Description')
self.assertEqual(problem_page.problem_question, 'Eggplant is a _____?')
self.assertEqual(problem_page.problem_question_descriptions, self.descriptions)
class CAPAProblemA11yBaseTestMixin(object):
"""Base TestCase Class to verify CAPA problem accessibility."""
def test_a11y(self):
"""
Verifies that there are no accessibility issues for a particular problem type
"""
self.courseware_page.visit()
problem_page = ProblemPage(self.browser)
# Set the scope to the problem question
problem_page.a11y_audit.config.set_scope(
include=['.wrapper-problem-response']
)
# Run the accessibility audit.
problem_page.a11y_audit.check_for_accessibility_errors()
@attr('a11y')
class CAPAProblemChoiceA11yTest(CAPAProblemA11yBaseTestMixin, ProblemsTest):
"""TestCase Class to verify accessibility for checkboxes and multiplechoice CAPA problems."""
def get_problem(self):
"""
Problem structure.
"""
xml = dedent("""
<problem>
<choiceresponse>
<label>question 1 text here</label>
<description>description 2 text 1</description>
<description>description 2 text 2</description>
<checkboxgroup>
<choice correct="true">True</choice>
<choice correct="false">False</choice>
</checkboxgroup>
</choiceresponse>
<multiplechoiceresponse>
<label>question 2 text here</label>
<description>description 2 text 1</description>
<description>description 2 text 2</description>
<choicegroup type="MultipleChoice">
<choice correct="false">Alpha <choicehint>A hint</choicehint></choice>
<choice correct="true">Beta</choice>
</choicegroup>
</multiplechoiceresponse>
</problem>
""")
return XBlockFixtureDesc('problem', 'Problem A11Y TEST', data=xml)
@attr('a11y')
class ProblemTextInputA11yTest(CAPAProblemA11yBaseTestMixin, ProblemsTest):
"""TestCase Class to verify TextInput problem accessibility."""
def get_problem(self):
"""
TextInput problem XML.
"""
xml = dedent("""
<problem>
<stringresponse answer="fight" type="ci">
<label>who wishes to _____ must first count the cost.</label>
<description>Appear weak when you are strong, and strong when you are weak.</description>
<description>In the midst of chaos, there is also opportunity.</description>
<textline size="40"/>
</stringresponse>
<stringresponse answer="force" type="ci">
<label>A leader leads by example not by _____.</label>
<description>The supreme art of war is to subdue the enemy without fighting.</description>
<description>Great results, can be achieved with small forces.</description>
<textline size="40"/>
</stringresponse>
</problem>""")
return XBlockFixtureDesc('problem', 'TEXTINPUT PROBLEM', data=xml)
@attr('a11y')
class CAPAProblemDropDownA11yTest(CAPAProblemA11yBaseTestMixin, ProblemsTest):
"""TestCase Class to verify accessibility for dropdowns(optioninput) CAPA problems."""
def get_problem(self):
"""
Problem structure.
"""
xml = dedent("""
<problem>
<optionresponse>
<p>You can use this template as a guide to the simple editor markdown and OLX markup to use for
dropdown problems. Edit this component to replace this template with your own assessment.</p>
<label>Which of the following is a fruit</label>
<description>Choose wisely</description>
<optioninput>
<option correct="False">radish</option>
<option correct="True">appple</option>
<option correct="False">carrot</option>
</optioninput>
</optionresponse>
</problem>
""")
return XBlockFixtureDesc('problem', 'Problem A11Y TEST', data=xml)
@attr('a11y')
class ProblemNumericalInputA11yTest(CAPAProblemA11yBaseTestMixin, ProblemsTest):
"""Tests NumericalInput accessibility."""
def get_problem(self):
"""NumericalInput problem XML."""
xml = dedent("""
<problem>
<numericalresponse answer="10*i">
<label>The square of what number is -100?</label>
<description>Use scientific notation to answer.</description>
<formulaequationinput/>
</numericalresponse>
</problem>""")
return XBlockFixtureDesc('problem', 'NUMERICALINPUT PROBLEM', data=xml)
@attr('a11y')
class ProblemMathExpressionInputA11yTest(CAPAProblemA11yBaseTestMixin, ProblemsTest):
"""Tests MathExpressionInput accessibility."""
def get_problem(self):
"""MathExpressionInput problem XML."""
xml = dedent(r"""
<problem>
<script type="loncapa/python">
derivative = "n*x^(n-1)"
</script>
<formularesponse type="ci" samples="x,n@1,2:3,4#10" answer="$derivative">
<label>Let \( x\) be a variable, and let \( n\) be an arbitrary constant. What is the derivative of \( x^n\)?</label>
<description>Enter the equation</description>
<responseparam type="tolerance" default="0.00001"/>
<formulaequationinput size="40"/>
</formularesponse>
</problem>""")
return XBlockFixtureDesc('problem', 'MATHEXPRESSIONINPUT PROBLEM', data=xml)
class ProblemMetaGradedTest(ProblemsTest):
"""
TestCase Class to verify that the graded variable is passed
"""
def get_problem(self):
"""
Problem structure
"""
xml = dedent("""
<problem>
<label>Which of the following countries has the largest population?</label>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice">
<choice correct="false">Brazil <choicehint>timely feedback -- explain why an almost correct answer is wrong</choicehint></choice>
<choice correct="false">Germany</choice>
<choice correct="true">Indonesia</choice>
<choice correct="false">Russia</choice>
</choicegroup>
</multiplechoiceresponse>
</problem>
""")
return XBlockFixtureDesc('problem', 'TEST PROBLEM', data=xml, grader_type='Final Exam')
def test_grader_type_displayed(self):
self.courseware_page.visit()
problem_page = ProblemPage(self.browser)
self.assertEqual(problem_page.problem_name, 'TEST PROBLEM')
self.assertEqual(problem_page.problem_progress_graded_value, "1 point possible (graded)")
class ProblemMetaUngradedTest(ProblemsTest):
"""
TestCase Class to verify that the ungraded variable is passed
"""
def get_problem(self):
"""
Problem structure
"""
xml = dedent("""
<problem>
<label>Which of the following countries has the largest population?</label>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice">
<choice correct="false">Brazil <choicehint>timely feedback -- explain why an almost correct answer is wrong</choicehint></choice>
<choice correct="false">Germany</choice>
<choice correct="true">Indonesia</choice>
<choice correct="false">Russia</choice>
</choicegroup>
</multiplechoiceresponse>
</problem>
""")
return XBlockFixtureDesc('problem', 'TEST PROBLEM', data=xml)
def test_grader_type_displayed(self):
self.courseware_page.visit()
problem_page = ProblemPage(self.browser)
self.assertEqual(problem_page.problem_name, 'TEST PROBLEM')
self.assertEqual(problem_page.problem_progress_graded_value, "1 point possible (ungraded)")
|
synergeticsedx/deployment-wipro
|
common/test/acceptance/tests/lms/test_lms_problems.py
|
Python
|
agpl-3.0
| 37,760
|
[
"VisIt"
] |
6347a2910aec178d96ecb3397eacade31e1900dbb125e3e882e7a2c519f57f1d
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import copy
import json
import os
import pytest
import re
import shutil
import stat
import tarfile
import yaml
from io import BytesIO, StringIO
from units.compat.mock import MagicMock
import ansible.module_utils.six.moves.urllib.error as urllib_error
from ansible import context
from ansible.cli.galaxy import GalaxyCLI
from ansible.errors import AnsibleError
from ansible.galaxy import collection, api
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.utils import context_objects as co
from ansible.utils.display import Display
def call_galaxy_cli(args):
orig = co.GlobalCLIArgs._Singleton__instance
co.GlobalCLIArgs._Singleton__instance = None
try:
GalaxyCLI(args=['ansible-galaxy', 'collection'] + args).run()
finally:
co.GlobalCLIArgs._Singleton__instance = orig
def artifact_json(namespace, name, version, dependencies, server):
json_str = json.dumps({
'artifact': {
'filename': '%s-%s-%s.tar.gz' % (namespace, name, version),
'sha256': '2d76f3b8c4bab1072848107fb3914c345f71a12a1722f25c08f5d3f51f4ab5fd',
'size': 1234,
},
'download_url': '%s/download/%s-%s-%s.tar.gz' % (server, namespace, name, version),
'metadata': {
'namespace': namespace,
'name': name,
'dependencies': dependencies,
},
'version': version
})
return to_text(json_str)
def artifact_versions_json(namespace, name, versions, galaxy_api, available_api_versions=None):
results = []
available_api_versions = available_api_versions or {}
api_version = 'v2'
if 'v3' in available_api_versions:
api_version = 'v3'
for version in versions:
results.append({
'href': '%s/api/%s/%s/%s/versions/%s/' % (galaxy_api.api_server, api_version, namespace, name, version),
'version': version,
})
if api_version == 'v2':
json_str = json.dumps({
'count': len(versions),
'next': None,
'previous': None,
'results': results
})
if api_version == 'v3':
response = {'meta': {'count': len(versions)},
'data': results,
'links': {'first': None,
'last': None,
'next': None,
'previous': None},
}
json_str = json.dumps(response)
return to_text(json_str)
def error_json(galaxy_api, errors_to_return=None, available_api_versions=None):
errors_to_return = errors_to_return or []
available_api_versions = available_api_versions or {}
response = {}
api_version = 'v2'
if 'v3' in available_api_versions:
api_version = 'v3'
if api_version == 'v2':
assert len(errors_to_return) <= 1
if errors_to_return:
response = errors_to_return[0]
if api_version == 'v3':
response['errors'] = errors_to_return
json_str = json.dumps(response)
return to_text(json_str)
@pytest.fixture(autouse='function')
def reset_cli_args():
co.GlobalCLIArgs._Singleton__instance = None
yield
co.GlobalCLIArgs._Singleton__instance = None
@pytest.fixture()
def collection_artifact(request, tmp_path_factory):
test_dir = to_text(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
namespace = 'ansible_namespace'
collection = 'collection'
skeleton_path = os.path.join(os.path.dirname(os.path.split(__file__)[0]), 'cli', 'test_data', 'collection_skeleton')
collection_path = os.path.join(test_dir, namespace, collection)
call_galaxy_cli(['init', '%s.%s' % (namespace, collection), '-c', '--init-path', test_dir,
'--collection-skeleton', skeleton_path])
dependencies = getattr(request, 'param', None)
if dependencies:
galaxy_yml = os.path.join(collection_path, 'galaxy.yml')
with open(galaxy_yml, 'rb+') as galaxy_obj:
existing_yaml = yaml.safe_load(galaxy_obj)
existing_yaml['dependencies'] = dependencies
galaxy_obj.seek(0)
galaxy_obj.write(to_bytes(yaml.safe_dump(existing_yaml)))
galaxy_obj.truncate()
# Create a file with +x in the collection so we can test the permissions
execute_path = os.path.join(collection_path, 'runme.sh')
with open(execute_path, mode='wb') as fd:
fd.write(b"echo hi")
os.chmod(execute_path, os.stat(execute_path).st_mode | stat.S_IEXEC)
call_galaxy_cli(['build', collection_path, '--output-path', test_dir])
collection_tar = os.path.join(test_dir, '%s-%s-0.1.0.tar.gz' % (namespace, collection))
return to_bytes(collection_path), to_bytes(collection_tar)
@pytest.fixture()
def galaxy_server():
context.CLIARGS._store = {'ignore_certs': False}
galaxy_api = api.GalaxyAPI(None, 'test_server', 'https://galaxy.ansible.com')
return galaxy_api
def test_build_requirement_from_path(collection_artifact):
actual = collection.CollectionRequirement.from_path(collection_artifact[0], True)
assert actual.namespace == u'ansible_namespace'
assert actual.name == u'collection'
assert actual.b_path == collection_artifact[0]
assert actual.api is None
assert actual.skip is True
assert actual.versions == set([u'*'])
assert actual.latest_version == u'*'
assert actual.dependencies == {}
@pytest.mark.parametrize('version', ['1.1.1', '1.1.0', '1.0.0'])
def test_build_requirement_from_path_with_manifest(version, collection_artifact):
manifest_path = os.path.join(collection_artifact[0], b'MANIFEST.json')
manifest_value = json.dumps({
'collection_info': {
'namespace': 'namespace',
'name': 'name',
'version': version,
'dependencies': {
'ansible_namespace.collection': '*'
}
}
})
with open(manifest_path, 'wb') as manifest_obj:
manifest_obj.write(to_bytes(manifest_value))
actual = collection.CollectionRequirement.from_path(collection_artifact[0], True)
# While the folder name suggests a different collection, we treat MANIFEST.json as the source of truth.
assert actual.namespace == u'namespace'
assert actual.name == u'name'
assert actual.b_path == collection_artifact[0]
assert actual.api is None
assert actual.skip is True
assert actual.versions == set([to_text(version)])
assert actual.latest_version == to_text(version)
assert actual.dependencies == {'ansible_namespace.collection': '*'}
def test_build_requirement_from_path_invalid_manifest(collection_artifact):
manifest_path = os.path.join(collection_artifact[0], b'MANIFEST.json')
with open(manifest_path, 'wb') as manifest_obj:
manifest_obj.write(b"not json")
expected = "Collection file at '%s' does not contain a valid json string." % to_native(manifest_path)
with pytest.raises(AnsibleError, match=expected):
collection.CollectionRequirement.from_path(collection_artifact[0], True)
def test_build_requirement_from_path_no_version(collection_artifact, monkeypatch):
manifest_path = os.path.join(collection_artifact[0], b'MANIFEST.json')
manifest_value = json.dumps({
'collection_info': {
'namespace': 'namespace',
'name': 'name',
'version': '',
'dependencies': {}
}
})
with open(manifest_path, 'wb') as manifest_obj:
manifest_obj.write(to_bytes(manifest_value))
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
actual = collection.CollectionRequirement.from_path(collection_artifact[0], True)
# While the folder name suggests a different collection, we treat MANIFEST.json as the source of truth.
assert actual.namespace == u'namespace'
assert actual.name == u'name'
assert actual.b_path == collection_artifact[0]
assert actual.api is None
assert actual.skip is True
assert actual.versions == set(['*'])
assert actual.latest_version == u'*'
assert actual.dependencies == {}
assert mock_display.call_count == 1
actual_warn = ' '.join(mock_display.mock_calls[0][1][0].split('\n'))
expected_warn = "Collection at '%s' does not have a valid version set, falling back to '*'. Found version: ''" \
% to_text(collection_artifact[0])
assert expected_warn in actual_warn
def test_build_requirement_from_tar(collection_artifact):
actual = collection.CollectionRequirement.from_tar(collection_artifact[1], True, True)
assert actual.namespace == u'ansible_namespace'
assert actual.name == u'collection'
assert actual.b_path == collection_artifact[1]
assert actual.api is None
assert actual.skip is False
assert actual.versions == set([u'0.1.0'])
assert actual.latest_version == u'0.1.0'
assert actual.dependencies == {}
def test_build_requirement_from_tar_fail_not_tar(tmp_path_factory):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
test_file = os.path.join(test_dir, b'fake.tar.gz')
with open(test_file, 'wb') as test_obj:
test_obj.write(b"\x00\x01\x02\x03")
expected = "Collection artifact at '%s' is not a valid tar file." % to_native(test_file)
with pytest.raises(AnsibleError, match=expected):
collection.CollectionRequirement.from_tar(test_file, True, True)
def test_build_requirement_from_tar_no_manifest(tmp_path_factory):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
json_data = to_bytes(json.dumps(
{
'files': [],
'format': 1,
}
))
tar_path = os.path.join(test_dir, b'ansible-collections.tar.gz')
with tarfile.open(tar_path, 'w:gz') as tfile:
b_io = BytesIO(json_data)
tar_info = tarfile.TarInfo('FILES.json')
tar_info.size = len(json_data)
tar_info.mode = 0o0644
tfile.addfile(tarinfo=tar_info, fileobj=b_io)
expected = "Collection at '%s' does not contain the required file MANIFEST.json." % to_native(tar_path)
with pytest.raises(AnsibleError, match=expected):
collection.CollectionRequirement.from_tar(tar_path, True, True)
def test_build_requirement_from_tar_no_files(tmp_path_factory):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
json_data = to_bytes(json.dumps(
{
'collection_info': {},
}
))
tar_path = os.path.join(test_dir, b'ansible-collections.tar.gz')
with tarfile.open(tar_path, 'w:gz') as tfile:
b_io = BytesIO(json_data)
tar_info = tarfile.TarInfo('MANIFEST.json')
tar_info.size = len(json_data)
tar_info.mode = 0o0644
tfile.addfile(tarinfo=tar_info, fileobj=b_io)
expected = "Collection at '%s' does not contain the required file FILES.json." % to_native(tar_path)
with pytest.raises(AnsibleError, match=expected):
collection.CollectionRequirement.from_tar(tar_path, True, True)
def test_build_requirement_from_tar_invalid_manifest(tmp_path_factory):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
json_data = b"not a json"
tar_path = os.path.join(test_dir, b'ansible-collections.tar.gz')
with tarfile.open(tar_path, 'w:gz') as tfile:
b_io = BytesIO(json_data)
tar_info = tarfile.TarInfo('MANIFEST.json')
tar_info.size = len(json_data)
tar_info.mode = 0o0644
tfile.addfile(tarinfo=tar_info, fileobj=b_io)
expected = "Collection tar file member MANIFEST.json does not contain a valid json string."
with pytest.raises(AnsibleError, match=expected):
collection.CollectionRequirement.from_tar(tar_path, True, True)
def test_build_requirement_from_name(galaxy_server, monkeypatch):
mock_get_versions = MagicMock()
mock_get_versions.return_value = ['2.1.9', '2.1.10']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
actual = collection.CollectionRequirement.from_name('namespace.collection', [galaxy_server], '*', True, True)
assert actual.namespace == u'namespace'
assert actual.name == u'collection'
assert actual.b_path is None
assert actual.api == galaxy_server
assert actual.skip is False
assert actual.versions == set([u'2.1.9', u'2.1.10'])
assert actual.latest_version == u'2.1.10'
assert actual.dependencies == {}
assert mock_get_versions.call_count == 1
assert mock_get_versions.mock_calls[0][1] == ('namespace', 'collection')
def test_build_requirement_from_name_with_prerelease(galaxy_server, monkeypatch):
mock_get_versions = MagicMock()
mock_get_versions.return_value = ['1.0.1', '2.0.1-beta.1', '2.0.1']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
actual = collection.CollectionRequirement.from_name('namespace.collection', [galaxy_server], '*', True, True)
assert actual.namespace == u'namespace'
assert actual.name == u'collection'
assert actual.b_path is None
assert actual.api == galaxy_server
assert actual.skip is False
assert actual.versions == set([u'1.0.1', u'2.0.1'])
assert actual.latest_version == u'2.0.1'
assert actual.dependencies == {}
assert mock_get_versions.call_count == 1
assert mock_get_versions.mock_calls[0][1] == ('namespace', 'collection')
def test_build_requirment_from_name_with_prerelease_explicit(galaxy_server, monkeypatch):
mock_get_info = MagicMock()
mock_get_info.return_value = api.CollectionVersionMetadata('namespace', 'collection', '2.0.1-beta.1', None, None,
{})
monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)
actual = collection.CollectionRequirement.from_name('namespace.collection', [galaxy_server], '2.0.1-beta.1', True,
True)
assert actual.namespace == u'namespace'
assert actual.name == u'collection'
assert actual.b_path is None
assert actual.api == galaxy_server
assert actual.skip is False
assert actual.versions == set([u'2.0.1-beta.1'])
assert actual.latest_version == u'2.0.1-beta.1'
assert actual.dependencies == {}
assert mock_get_info.call_count == 1
assert mock_get_info.mock_calls[0][1] == ('namespace', 'collection', '2.0.1-beta.1')
def test_build_requirement_from_name_second_server(galaxy_server, monkeypatch):
mock_get_versions = MagicMock()
mock_get_versions.return_value = ['1.0.1', '1.0.2', '1.0.3']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
broken_server = copy.copy(galaxy_server)
broken_server.api_server = 'https://broken.com/'
mock_404 = MagicMock()
mock_404.side_effect = api.GalaxyError(urllib_error.HTTPError('https://galaxy.server.com', 404, 'msg', {},
StringIO()), "custom msg")
monkeypatch.setattr(broken_server, 'get_collection_versions', mock_404)
actual = collection.CollectionRequirement.from_name('namespace.collection', [broken_server, galaxy_server],
'>1.0.1', False, True)
assert actual.namespace == u'namespace'
assert actual.name == u'collection'
assert actual.b_path is None
# assert actual.api == galaxy_server
assert actual.skip is False
assert actual.versions == set([u'1.0.2', u'1.0.3'])
assert actual.latest_version == u'1.0.3'
assert actual.dependencies == {}
assert mock_404.call_count == 1
assert mock_404.mock_calls[0][1] == ('namespace', 'collection')
assert mock_get_versions.call_count == 1
assert mock_get_versions.mock_calls[0][1] == ('namespace', 'collection')
def test_build_requirement_from_name_missing(galaxy_server, monkeypatch):
mock_open = MagicMock()
mock_open.side_effect = api.GalaxyError(urllib_error.HTTPError('https://galaxy.server.com', 404, 'msg', {},
StringIO()), "")
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_open)
expected = "Failed to find collection namespace.collection:*"
with pytest.raises(AnsibleError, match=expected):
collection.CollectionRequirement.from_name('namespace.collection', [galaxy_server, galaxy_server], '*', False,
True)
def test_build_requirement_from_name_401_unauthorized(galaxy_server, monkeypatch):
mock_open = MagicMock()
mock_open.side_effect = api.GalaxyError(urllib_error.HTTPError('https://galaxy.server.com', 401, 'msg', {},
StringIO()), "error")
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_open)
expected = "error (HTTP Code: 401, Message: msg)"
with pytest.raises(api.GalaxyError, match=re.escape(expected)):
collection.CollectionRequirement.from_name('namespace.collection', [galaxy_server, galaxy_server], '*', False)
def test_build_requirement_from_name_single_version(galaxy_server, monkeypatch):
mock_get_info = MagicMock()
mock_get_info.return_value = api.CollectionVersionMetadata('namespace', 'collection', '2.0.0', None, None,
{})
monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)
actual = collection.CollectionRequirement.from_name('namespace.collection', [galaxy_server], '2.0.0', True,
True)
assert actual.namespace == u'namespace'
assert actual.name == u'collection'
assert actual.b_path is None
assert actual.api == galaxy_server
assert actual.skip is False
assert actual.versions == set([u'2.0.0'])
assert actual.latest_version == u'2.0.0'
assert actual.dependencies == {}
assert mock_get_info.call_count == 1
assert mock_get_info.mock_calls[0][1] == ('namespace', 'collection', '2.0.0')
def test_build_requirement_from_name_multiple_versions_one_match(galaxy_server, monkeypatch):
mock_get_versions = MagicMock()
mock_get_versions.return_value = ['2.0.0', '2.0.1', '2.0.2']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
mock_get_info = MagicMock()
mock_get_info.return_value = api.CollectionVersionMetadata('namespace', 'collection', '2.0.1', None, None,
{})
monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)
actual = collection.CollectionRequirement.from_name('namespace.collection', [galaxy_server], '>=2.0.1,<2.0.2',
True, True)
assert actual.namespace == u'namespace'
assert actual.name == u'collection'
assert actual.b_path is None
assert actual.api == galaxy_server
assert actual.skip is False
assert actual.versions == set([u'2.0.1'])
assert actual.latest_version == u'2.0.1'
assert actual.dependencies == {}
assert mock_get_versions.call_count == 1
assert mock_get_versions.mock_calls[0][1] == ('namespace', 'collection')
assert mock_get_info.call_count == 1
assert mock_get_info.mock_calls[0][1] == ('namespace', 'collection', '2.0.1')
def test_build_requirement_from_name_multiple_version_results(galaxy_server, monkeypatch):
mock_get_versions = MagicMock()
mock_get_versions.return_value = ['2.0.0', '2.0.1', '2.0.2', '2.0.3', '2.0.4', '2.0.5']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
actual = collection.CollectionRequirement.from_name('namespace.collection', [galaxy_server], '!=2.0.2',
True, True)
assert actual.namespace == u'namespace'
assert actual.name == u'collection'
assert actual.b_path is None
assert actual.api == galaxy_server
assert actual.skip is False
assert actual.versions == set([u'2.0.0', u'2.0.1', u'2.0.3', u'2.0.4', u'2.0.5'])
assert actual.latest_version == u'2.0.5'
assert actual.dependencies == {}
assert mock_get_versions.call_count == 1
assert mock_get_versions.mock_calls[0][1] == ('namespace', 'collection')
@pytest.mark.parametrize('versions, requirement, expected_filter, expected_latest', [
[['1.0.0', '1.0.1'], '*', ['1.0.0', '1.0.1'], '1.0.1'],
[['1.0.0', '1.0.5', '1.1.0'], '>1.0.0,<1.1.0', ['1.0.5'], '1.0.5'],
[['1.0.0', '1.0.5', '1.1.0'], '>1.0.0,<=1.0.5', ['1.0.5'], '1.0.5'],
[['1.0.0', '1.0.5', '1.1.0'], '>=1.1.0', ['1.1.0'], '1.1.0'],
[['1.0.0', '1.0.5', '1.1.0'], '!=1.1.0', ['1.0.0', '1.0.5'], '1.0.5'],
[['1.0.0', '1.0.5', '1.1.0'], '==1.0.5', ['1.0.5'], '1.0.5'],
[['1.0.0', '1.0.5', '1.1.0'], '1.0.5', ['1.0.5'], '1.0.5'],
[['1.0.0', '2.0.0', '3.0.0'], '>=2', ['2.0.0', '3.0.0'], '3.0.0'],
])
def test_add_collection_requirements(versions, requirement, expected_filter, expected_latest):
req = collection.CollectionRequirement('namespace', 'name', None, 'https://galaxy.com', versions, requirement,
False)
assert req.versions == set(expected_filter)
assert req.latest_version == expected_latest
def test_add_collection_requirement_to_unknown_installed_version(monkeypatch):
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
req = collection.CollectionRequirement('namespace', 'name', None, 'https://galaxy.com', ['*'], '*', False,
skip=True)
req.add_requirement('parent.collection', '1.0.0')
assert req.latest_version == '*'
assert mock_display.call_count == 1
actual_warn = ' '.join(mock_display.mock_calls[0][1][0].split('\n'))
assert "Failed to validate the collection requirement 'namespace.name:1.0.0' for parent.collection" in actual_warn
def test_add_collection_wildcard_requirement_to_unknown_installed_version():
req = collection.CollectionRequirement('namespace', 'name', None, 'https://galaxy.com', ['*'], '*', False,
skip=True)
req.add_requirement(str(req), '*')
assert req.versions == set('*')
assert req.latest_version == '*'
def test_add_collection_requirement_with_conflict(galaxy_server):
expected = "Cannot meet requirement ==1.0.2 for dependency namespace.name from source '%s'. Available versions " \
"before last requirement added: 1.0.0, 1.0.1\n" \
"Requirements from:\n" \
"\tbase - 'namespace.name:==1.0.2'" % galaxy_server.api_server
with pytest.raises(AnsibleError, match=expected):
collection.CollectionRequirement('namespace', 'name', None, galaxy_server, ['1.0.0', '1.0.1'], '==1.0.2',
False)
def test_add_requirement_to_existing_collection_with_conflict(galaxy_server):
req = collection.CollectionRequirement('namespace', 'name', None, galaxy_server, ['1.0.0', '1.0.1'], '*', False)
expected = "Cannot meet dependency requirement 'namespace.name:1.0.2' for collection namespace.collection2 from " \
"source '%s'. Available versions before last requirement added: 1.0.0, 1.0.1\n" \
"Requirements from:\n" \
"\tbase - 'namespace.name:*'\n" \
"\tnamespace.collection2 - 'namespace.name:1.0.2'" % galaxy_server.api_server
with pytest.raises(AnsibleError, match=re.escape(expected)):
req.add_requirement('namespace.collection2', '1.0.2')
def test_add_requirement_to_installed_collection_with_conflict():
source = 'https://galaxy.ansible.com'
req = collection.CollectionRequirement('namespace', 'name', None, source, ['1.0.0', '1.0.1'], '*', False,
skip=True)
expected = "Cannot meet requirement namespace.name:1.0.2 as it is already installed at version '1.0.1'. " \
"Use --force to overwrite"
with pytest.raises(AnsibleError, match=re.escape(expected)):
req.add_requirement(None, '1.0.2')
def test_add_requirement_to_installed_collection_with_conflict_as_dep():
source = 'https://galaxy.ansible.com'
req = collection.CollectionRequirement('namespace', 'name', None, source, ['1.0.0', '1.0.1'], '*', False,
skip=True)
expected = "Cannot meet requirement namespace.name:1.0.2 as it is already installed at version '1.0.1'. " \
"Use --force-with-deps to overwrite"
with pytest.raises(AnsibleError, match=re.escape(expected)):
req.add_requirement('namespace.collection2', '1.0.2')
def test_install_skipped_collection(monkeypatch):
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
req = collection.CollectionRequirement('namespace', 'name', None, 'source', ['1.0.0'], '*', False, skip=True)
req.install(None, None)
assert mock_display.call_count == 1
assert mock_display.mock_calls[0][1][0] == "Skipping 'namespace.name' as it is already installed"
def test_install_collection(collection_artifact, monkeypatch):
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
collection_tar = collection_artifact[1]
output_path = os.path.join(os.path.split(collection_tar)[0], b'output')
collection_path = os.path.join(output_path, b'ansible_namespace', b'collection')
os.makedirs(os.path.join(collection_path, b'delete_me')) # Create a folder to verify the install cleans out the dir
temp_path = os.path.join(os.path.split(collection_tar)[0], b'temp')
os.makedirs(temp_path)
req = collection.CollectionRequirement.from_tar(collection_tar, True, True)
req.install(to_text(output_path), temp_path)
# Ensure the temp directory is empty, nothing is left behind
assert os.listdir(temp_path) == []
actual_files = os.listdir(collection_path)
actual_files.sort()
assert actual_files == [b'FILES.json', b'MANIFEST.json', b'README.md', b'docs', b'playbooks', b'plugins', b'roles',
b'runme.sh']
assert stat.S_IMODE(os.stat(os.path.join(collection_path, b'plugins')).st_mode) == 0o0755
assert stat.S_IMODE(os.stat(os.path.join(collection_path, b'README.md')).st_mode) == 0o0644
assert stat.S_IMODE(os.stat(os.path.join(collection_path, b'runme.sh')).st_mode) == 0o0755
assert mock_display.call_count == 1
assert mock_display.mock_calls[0][1][0] == "Installing 'ansible_namespace.collection:0.1.0' to '%s'" \
% to_text(collection_path)
def test_install_collection_with_download(galaxy_server, collection_artifact, monkeypatch):
collection_tar = collection_artifact[1]
output_path = os.path.join(os.path.split(collection_tar)[0], b'output')
collection_path = os.path.join(output_path, b'ansible_namespace', b'collection')
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
mock_download = MagicMock()
mock_download.return_value = collection_tar
monkeypatch.setattr(collection, '_download_file', mock_download)
monkeypatch.setattr(galaxy_server, '_available_api_versions', {'v2': 'v2/'})
temp_path = os.path.join(os.path.split(collection_tar)[0], b'temp')
os.makedirs(temp_path)
meta = api.CollectionVersionMetadata('ansible_namespace', 'collection', '0.1.0', 'https://downloadme.com',
'myhash', {})
req = collection.CollectionRequirement('ansible_namespace', 'collection', None, galaxy_server,
['0.1.0'], '*', False, metadata=meta)
req.install(to_text(output_path), temp_path)
# Ensure the temp directory is empty, nothing is left behind
assert os.listdir(temp_path) == []
actual_files = os.listdir(collection_path)
actual_files.sort()
assert actual_files == [b'FILES.json', b'MANIFEST.json', b'README.md', b'docs', b'playbooks', b'plugins', b'roles',
b'runme.sh']
assert mock_display.call_count == 1
assert mock_display.mock_calls[0][1][0] == "Installing 'ansible_namespace.collection:0.1.0' to '%s'" \
% to_text(collection_path)
assert mock_download.call_count == 1
assert mock_download.mock_calls[0][1][0] == 'https://downloadme.com'
assert mock_download.mock_calls[0][1][1] == temp_path
assert mock_download.mock_calls[0][1][2] == 'myhash'
assert mock_download.mock_calls[0][1][3] is True
def test_install_collections_from_tar(collection_artifact, monkeypatch):
collection_path, collection_tar = collection_artifact
temp_path = os.path.split(collection_tar)[0]
shutil.rmtree(collection_path)
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
collection.install_collections([(to_text(collection_tar), '*', None,)], to_text(temp_path),
[u'https://galaxy.ansible.com'], True, False, False, False, False)
assert os.path.isdir(collection_path)
actual_files = os.listdir(collection_path)
actual_files.sort()
assert actual_files == [b'FILES.json', b'MANIFEST.json', b'README.md', b'docs', b'playbooks', b'plugins', b'roles',
b'runme.sh']
with open(os.path.join(collection_path, b'MANIFEST.json'), 'rb') as manifest_obj:
actual_manifest = json.loads(to_text(manifest_obj.read()))
assert actual_manifest['collection_info']['namespace'] == 'ansible_namespace'
assert actual_manifest['collection_info']['name'] == 'collection'
assert actual_manifest['collection_info']['version'] == '0.1.0'
# Filter out the progress cursor display calls.
display_msgs = [m[1][0] for m in mock_display.mock_calls if 'newline' not in m[2] and len(m[1]) == 1]
assert len(display_msgs) == 3
assert display_msgs[0] == "Process install dependency map"
assert display_msgs[1] == "Starting collection install process"
assert display_msgs[2] == "Installing 'ansible_namespace.collection:0.1.0' to '%s'" % to_text(collection_path)
def test_install_collections_existing_without_force(collection_artifact, monkeypatch):
collection_path, collection_tar = collection_artifact
temp_path = os.path.split(collection_tar)[0]
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
# If we don't delete collection_path it will think the original build skeleton is installed so we expect a skip
collection.install_collections([(to_text(collection_tar), '*', None,)], to_text(temp_path),
[u'https://galaxy.ansible.com'], True, False, False, False, False)
assert os.path.isdir(collection_path)
actual_files = os.listdir(collection_path)
actual_files.sort()
assert actual_files == [b'README.md', b'docs', b'galaxy.yml', b'playbooks', b'plugins', b'roles', b'runme.sh']
# Filter out the progress cursor display calls.
display_msgs = [m[1][0] for m in mock_display.mock_calls if 'newline' not in m[2] and len(m[1]) == 1]
assert len(display_msgs) == 4
# Msg1 is the warning about not MANIFEST.json, cannot really check message as it has line breaks which varies based
# on the path size
assert display_msgs[1] == "Process install dependency map"
assert display_msgs[2] == "Starting collection install process"
assert display_msgs[3] == "Skipping 'ansible_namespace.collection' as it is already installed"
# Makes sure we don't get stuck in some recursive loop
@pytest.mark.parametrize('collection_artifact', [
{'ansible_namespace.collection': '>=0.0.1'},
], indirect=True)
def test_install_collection_with_circular_dependency(collection_artifact, monkeypatch):
collection_path, collection_tar = collection_artifact
temp_path = os.path.split(collection_tar)[0]
shutil.rmtree(collection_path)
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
collection.install_collections([(to_text(collection_tar), '*', None,)], to_text(temp_path),
[u'https://galaxy.ansible.com'], True, False, False, False, False)
assert os.path.isdir(collection_path)
actual_files = os.listdir(collection_path)
actual_files.sort()
assert actual_files == [b'FILES.json', b'MANIFEST.json', b'README.md', b'docs', b'playbooks', b'plugins', b'roles',
b'runme.sh']
with open(os.path.join(collection_path, b'MANIFEST.json'), 'rb') as manifest_obj:
actual_manifest = json.loads(to_text(manifest_obj.read()))
assert actual_manifest['collection_info']['namespace'] == 'ansible_namespace'
assert actual_manifest['collection_info']['name'] == 'collection'
assert actual_manifest['collection_info']['version'] == '0.1.0'
# Filter out the progress cursor display calls.
display_msgs = [m[1][0] for m in mock_display.mock_calls if 'newline' not in m[2] and len(m[1]) == 1]
assert len(display_msgs) == 3
assert display_msgs[0] == "Process install dependency map"
assert display_msgs[1] == "Starting collection install process"
assert display_msgs[2] == "Installing 'ansible_namespace.collection:0.1.0' to '%s'" % to_text(collection_path)
|
ilpianista/ansible
|
test/units/galaxy/test_collection_install.py
|
Python
|
gpl-3.0
| 33,974
|
[
"Galaxy"
] |
7a959739b5c38b4f3d5a7d807818a881845abd210acbabf5aaa6736576752c39
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Miscellaneous utilities used by time series models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
from tensorflow.contrib import lookup
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.contrib.timeseries.python.timeseries.feature_keys import TrainEvalFeatures
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import nest
def clip_covariance(
covariance_matrix, maximum_variance_ratio, minimum_variance):
"""Enforce constraints on a covariance matrix to improve numerical stability.
Args:
covariance_matrix: A [..., N, N] batch of covariance matrices.
maximum_variance_ratio: The maximum allowed ratio of two diagonal
entries. Any entries lower than the maximum entry divided by this ratio
will be set to that value.
minimum_variance: A floor for diagonal entries in the returned matrix.
Returns:
A new covariance matrix with the requested constraints enforced. If the
input was positive definite, the output will be too.
"""
# TODO(allenl): Smarter scaling here so that correlations are preserved when
# fiddling with diagonal elements.
diagonal = array_ops.matrix_diag_part(covariance_matrix)
maximum = math_ops.reduce_max(diagonal, axis=-1, keepdims=True)
new_diagonal = gen_math_ops.maximum(
diagonal, maximum / maximum_variance_ratio)
return array_ops.matrix_set_diag(
covariance_matrix, math_ops.maximum(new_diagonal, minimum_variance))
def block_diagonal(matrices, dtype=dtypes.float32, name="block_diagonal"):
r"""Constructs block-diagonal matrices from a list of batched 2D tensors.
Args:
matrices: A list of Tensors with shape [..., N_i, M_i] (i.e. a list of
matrices with the same batch dimension).
dtype: Data type to use. The Tensors in `matrices` must match this dtype.
name: A name for the returned op.
Returns:
A matrix with the input matrices stacked along its main diagonal, having
shape [..., \sum_i N_i, \sum_i M_i].
"""
matrices = [ops.convert_to_tensor(matrix, dtype=dtype) for matrix in matrices]
blocked_rows = tensor_shape.Dimension(0)
blocked_cols = tensor_shape.Dimension(0)
batch_shape = tensor_shape.TensorShape(None)
for matrix in matrices:
full_matrix_shape = matrix.get_shape().with_rank_at_least(2)
batch_shape = batch_shape.merge_with(full_matrix_shape[:-2])
blocked_rows += full_matrix_shape[-2]
blocked_cols += full_matrix_shape[-1]
ret_columns_list = []
for matrix in matrices:
matrix_shape = array_ops.shape(matrix)
ret_columns_list.append(matrix_shape[-1])
ret_columns = math_ops.add_n(ret_columns_list)
row_blocks = []
current_column = 0
for matrix in matrices:
matrix_shape = array_ops.shape(matrix)
row_before_length = current_column
current_column += matrix_shape[-1]
row_after_length = ret_columns - current_column
row_blocks.append(
array_ops.pad(
tensor=matrix,
paddings=array_ops.concat(
[
array_ops.zeros(
[array_ops.rank(matrix) - 1, 2], dtype=dtypes.int32), [(
row_before_length, row_after_length)]
],
axis=0)))
blocked = array_ops.concat(row_blocks, -2, name=name)
blocked.set_shape(batch_shape.concatenate((blocked_rows, blocked_cols)))
return blocked
def power_sums_tensor(array_size, power_matrix, multiplier):
r"""Computes \sum_{i=0}^{N-1} A^i B (A^i)^T for N=0..(array_size + 1).
Args:
array_size: The number of non-trivial sums to pre-compute.
power_matrix: The "A" matrix above.
multiplier: The "B" matrix above
Returns:
A Tensor with S[N] = \sum_{i=0}^{N-1} A^i B (A^i)^T
S[0] is the zero matrix
S[1] is B
S[2] is A B A^T + B
...and so on
"""
array_size = math_ops.cast(array_size, dtypes.int32)
power_matrix = ops.convert_to_tensor(power_matrix)
identity_like_power_matrix = linalg_ops.eye(
array_ops.shape(power_matrix)[0], dtype=power_matrix.dtype)
identity_like_power_matrix.set_shape(
ops.convert_to_tensor(power_matrix).get_shape())
transition_powers = functional_ops.scan(
lambda previous_power, _: math_ops.matmul(previous_power, power_matrix),
math_ops.range(array_size - 1),
initializer=identity_like_power_matrix)
summed = math_ops.cumsum(
array_ops.concat([
array_ops.expand_dims(multiplier, 0), math_ops.matmul(
batch_times_matrix(transition_powers, multiplier),
transition_powers,
adjoint_b=True)
], 0))
return array_ops.concat(
[array_ops.expand_dims(array_ops.zeros_like(multiplier), 0), summed], 0)
def matrix_to_powers(matrix, powers):
"""Raise a single matrix to multiple powers."""
matrix_tiled = array_ops.tile(
array_ops.expand_dims(matrix, 0), [array_ops.size(powers), 1, 1])
return batch_matrix_pow(matrix_tiled, powers)
def batch_matrix_pow(matrices, powers):
"""Compute powers of matrices, e.g. A^3 = matmul(matmul(A, A), A).
Uses exponentiation by squaring, with O(log(p)) matrix multiplications to
compute A^p.
Args:
matrices: [batch size x N x N]
powers: Which integer power to raise each matrix to [batch size]
Returns:
The matrices raised to their respective powers, same dimensions as the
"matrices" argument.
"""
def terminate_when_all_zero(current_argument, residual_powers, accumulator):
del current_argument, accumulator # not used for condition
do_exit = math_ops.reduce_any(
math_ops.greater(residual_powers, array_ops.ones_like(residual_powers)))
return do_exit
def do_iteration(current_argument, residual_powers, accumulator):
"""Compute one step of iterative exponentiation by squaring.
The recursive form is:
power(A, p) = { power(matmul(A, A), p / 2) for even p
{ matmul(A, power(matmul(A, A), (p - 1) / 2)) for odd p
power(A, 0) = I
The power(A, 0) = I case is handled by starting with accumulator set to the
identity matrix; matrices with zero residual powers are passed through
unchanged.
Args:
current_argument: On this step, what is the first argument (A^2..^2) to
the (unrolled) recursive function? [batch size x N x N]
residual_powers: On this step, what is the second argument (residual p)?
[batch_size]
accumulator: Accumulates the exterior multiplications from the odd
powers (initially the identity matrix). [batch_size x N x N]
Returns:
Updated versions of each argument for one step of the unrolled
computation. Does not change parts of the batch which have a residual
power of zero.
"""
is_even = math_ops.equal(residual_powers % 2,
array_ops.zeros(
array_ops.shape(residual_powers),
dtype=dtypes.int32))
new_accumulator = array_ops.where(is_even, accumulator,
math_ops.matmul(accumulator,
current_argument))
new_argument = math_ops.matmul(current_argument, current_argument)
do_update = math_ops.greater(residual_powers, 1)
new_residual_powers = residual_powers - residual_powers % 2
new_residual_powers //= 2
# Stop updating if we've reached our base case; some batch elements may
# finish sooner than others
accumulator = array_ops.where(do_update, new_accumulator, accumulator)
current_argument = array_ops.where(do_update, new_argument,
current_argument)
residual_powers = array_ops.where(do_update, new_residual_powers,
residual_powers)
return (current_argument, residual_powers, accumulator)
matrices = ops.convert_to_tensor(matrices)
powers = math_ops.cast(powers, dtype=dtypes.int32)
ident = array_ops.expand_dims(
array_ops.diag(
array_ops.ones([array_ops.shape(matrices)[1]], dtype=matrices.dtype)),
0)
ident_tiled = array_ops.tile(ident, [array_ops.shape(matrices)[0], 1, 1])
(final_argument,
final_residual_power, final_accumulator) = control_flow_ops.while_loop(
terminate_when_all_zero, do_iteration, [matrices, powers, ident_tiled])
return array_ops.where(
math_ops.equal(final_residual_power,
array_ops.zeros_like(
final_residual_power, dtype=dtypes.int32)),
ident_tiled, math_ops.matmul(final_argument, final_accumulator))
# TODO(allenl): would be useful if this was built into batch_matmul
def batch_times_matrix(batch, matrix, adj_x=False, adj_y=False):
"""Multiply a batch of matrices by a single matrix.
Functionally equivalent to:
tf.matmul(batch, array_ops.tile(gen_math_ops.expand_dims(matrix, 0),
[array_ops.shape(batch)[0], 1, 1]),
adjoint_a=adj_x, adjoint_b=adj_y)
Args:
batch: [batch_size x N x M] after optional transpose
matrix: [M x P] after optional transpose
adj_x: If true, transpose the second two dimensions of "batch" before
multiplying.
adj_y: If true, transpose "matrix" before multiplying.
Returns:
[batch_size x N x P]
"""
batch = ops.convert_to_tensor(batch)
matrix = ops.convert_to_tensor(matrix)
assert batch.get_shape().ndims == 3
assert matrix.get_shape().ndims == 2
if adj_x:
batch = array_ops.transpose(batch, [0, 2, 1])
batch_dimension = batch.get_shape()[0].value
first_dimension = batch.get_shape()[1].value
tensor_batch_shape = array_ops.shape(batch)
if batch_dimension is None:
batch_dimension = tensor_batch_shape[0]
if first_dimension is None:
first_dimension = tensor_batch_shape[1]
matrix_first_dimension, matrix_second_dimension = matrix.get_shape().as_list()
batch_reshaped = array_ops.reshape(batch, [-1, tensor_batch_shape[2]])
if adj_y:
if matrix_first_dimension is None:
matrix_first_dimension = array_ops.shape(matrix)[0]
result_shape = [batch_dimension, first_dimension, matrix_first_dimension]
else:
if matrix_second_dimension is None:
matrix_second_dimension = array_ops.shape(matrix)[1]
result_shape = [batch_dimension, first_dimension, matrix_second_dimension]
return array_ops.reshape(
math_ops.matmul(batch_reshaped, matrix, adjoint_b=adj_y), result_shape)
def matrix_times_batch(matrix, batch, adj_x=False, adj_y=False):
"""Like batch_times_matrix, but with the multiplication order swapped."""
return array_ops.transpose(
batch_times_matrix(
batch=batch, matrix=matrix, adj_x=not adj_y, adj_y=not adj_x),
[0, 2, 1])
def make_toeplitz_matrix(inputs, name=None):
"""Make a symmetric Toeplitz matrix from input array of values.
Args:
inputs: a 3-D tensor of shape [num_blocks, block_size, block_size].
name: the name of the operation.
Returns:
a symmetric Toeplitz matrix of shape
[num_blocks*block_size, num_blocks*block_size].
"""
num_blocks = array_ops.shape(inputs)[0]
block_size = array_ops.shape(inputs)[1]
output_size = block_size * num_blocks
lags = array_ops.reshape(math_ops.range(num_blocks), shape=[1, -1])
indices = math_ops.abs(lags - array_ops.transpose(lags))
output = array_ops.gather(inputs, indices)
output = array_ops.reshape(
array_ops.transpose(output, [0, 2, 1, 3]), [output_size, output_size])
return array_ops.identity(output, name=name)
# TODO(allenl): Investigate alternative parameterizations.
def sign_magnitude_positive_definite(
raw, off_diagonal_scale=0., overall_scale=0.):
"""Constructs a positive definite matrix from an unconstrained input matrix.
We want to keep the whole matrix on a log scale, but also allow off-diagonal
elements to be negative, so the sign of off-diagonal elements is modeled
separately from their magnitude (using the lower and upper triangles
respectively). Specifically:
for i < j, we have:
output_cholesky[i, j] = raw[j, i] / (abs(raw[j, i]) + 1) *
exp((off_diagonal_scale + overall_scale + raw[i, j]) / 2)
output_cholesky[i, i] = exp((raw[i, i] + overall_scale) / 2)
output = output_cholesky^T * output_cholesky
where raw, off_diagonal_scale, and overall_scale are
un-constrained real-valued variables. The resulting values are stable
around zero due to the exponential (and the softsign keeps the function
smooth).
Args:
raw: A [..., M, M] Tensor.
off_diagonal_scale: A scalar or [...] shaped Tensor controlling the relative
scale of off-diagonal values in the output matrix.
overall_scale: A scalar or [...] shaped Tensor controlling the overall scale
of the output matrix.
Returns:
The `output` matrix described above, a [..., M, M] positive definite matrix.
"""
raw = ops.convert_to_tensor(raw)
diagonal = array_ops.matrix_diag_part(raw)
def _right_pad_with_ones(tensor, target_rank):
# Allow broadcasting even if overall_scale and off_diagonal_scale have batch
# dimensions
tensor = ops.convert_to_tensor(tensor, dtype=raw.dtype.base_dtype)
return array_ops.reshape(tensor,
array_ops.concat(
[
array_ops.shape(tensor), array_ops.ones(
[target_rank - array_ops.rank(tensor)],
dtype=target_rank.dtype)
],
axis=0))
# We divide the log values by 2 to compensate for the squaring that happens
# when transforming Cholesky factors into positive definite matrices.
sign_magnitude = (gen_math_ops.exp(
(raw + _right_pad_with_ones(off_diagonal_scale, array_ops.rank(raw)) +
_right_pad_with_ones(overall_scale, array_ops.rank(raw))) / 2.) *
nn.softsign(array_ops.matrix_transpose(raw)))
sign_magnitude.set_shape(raw.get_shape())
cholesky_factor = array_ops.matrix_set_diag(
input=array_ops.matrix_band_part(sign_magnitude, 0, -1),
diagonal=gen_math_ops.exp((diagonal + _right_pad_with_ones(
overall_scale, array_ops.rank(diagonal))) / 2.))
return math_ops.matmul(cholesky_factor, cholesky_factor, transpose_a=True)
def transform_to_covariance_matrices(input_vectors, matrix_size):
"""Construct covariance matrices via transformations from input_vectors.
Args:
input_vectors: A [batch size x input size] batch of vectors to transform.
matrix_size: An integer indicating one dimension of the (square) output
matrix.
Returns:
A [batch size x matrix_size x matrix_size] batch of covariance matrices.
"""
combined_values = layers.fully_connected(
input_vectors, matrix_size**2 + 2, activation_fn=None)
return sign_magnitude_positive_definite(
raw=array_ops.reshape(combined_values[..., :-2],
array_ops.concat([
array_ops.shape(combined_values)[:-1],
[matrix_size, matrix_size]
], 0)),
off_diagonal_scale=combined_values[..., -2],
overall_scale=combined_values[..., -1])
def variable_covariance_matrix(
size, name, dtype, initial_diagonal_values=None,
initial_overall_scale_log=0.):
"""Construct a Variable-parameterized positive definite matrix.
Useful for parameterizing covariance matrices.
Args:
size: The size of the main diagonal, the returned matrix having shape [size
x size].
name: The name to use when defining variables and ops.
dtype: The floating point data type to use.
initial_diagonal_values: A Tensor with shape [size] with initial values for
the diagonal values of the returned matrix. Must be positive.
initial_overall_scale_log: Initial value of the bias term for every element
of the matrix in log space.
Returns:
A Variable-parameterized covariance matrix with shape [size x size].
"""
raw_values = variable_scope.get_variable(
name + "_pre_transform",
dtype=dtype,
shape=[size, size],
initializer=init_ops.zeros_initializer())
if initial_diagonal_values is not None:
raw_values += array_ops.matrix_diag(math_ops.log(initial_diagonal_values))
return array_ops.identity(
sign_magnitude_positive_definite(
raw=raw_values,
off_diagonal_scale=variable_scope.get_variable(
name + "_off_diagonal_scale",
dtype=dtype,
initializer=constant_op.constant(-5., dtype=dtype)),
overall_scale=ops.convert_to_tensor(
initial_overall_scale_log, dtype=dtype) +
variable_scope.get_variable(
name + "_overall_scale",
dtype=dtype,
shape=[],
initializer=init_ops.zeros_initializer())),
name=name)
def batch_start_time(times):
return times[:, 0]
def batch_end_time(times):
return times[:, -1]
def log_noninformative_covariance_prior(covariance):
"""Compute a relatively uninformative prior for noise parameters.
Helpful for avoiding noise over-estimation, where noise otherwise decreases
very slowly during optimization.
See:
Villegas, C. On the A Priori Distribution of the Covariance Matrix.
Ann. Math. Statist. 40 (1969), no. 3, 1098--1099.
Args:
covariance: A covariance matrix.
Returns:
For a [p x p] matrix:
log(det(covariance)^(-(p + 1) / 2))
"""
# Avoid zero/negative determinants due to numerical errors
covariance += array_ops.diag(1e-8 * array_ops.ones(
shape=[array_ops.shape(covariance)[0]], dtype=covariance.dtype))
power = -(math_ops.cast(array_ops.shape(covariance)[0] + 1,
covariance.dtype) / 2.)
return power * math_ops.log(linalg_ops.matrix_determinant(covariance))
def entropy_matched_cauchy_scale(covariance):
"""Approximates a similar Cauchy distribution given a covariance matrix.
Since Cauchy distributions do not have moments, entropy matching provides one
way to set a Cauchy's scale parameter in a way that provides a similar
distribution. The effect is dividing the standard deviation of an independent
Gaussian by a constant very near 3.
To set the scale of the Cauchy distribution, we first select the diagonals of
`covariance`. Since this ignores cross terms, it overestimates the entropy of
the Gaussian. For each of these variances, we solve for the Cauchy scale
parameter which gives the same entropy as the Gaussian with that
variance. This means setting the (univariate) Gaussian entropy
0.5 * ln(2 * variance * pi * e)
equal to the Cauchy entropy
ln(4 * pi * scale)
Solving, we get scale = sqrt(variance * (e / (8 pi))).
Args:
covariance: A [batch size x N x N] batch of covariance matrices to produce
Cauchy scales for.
Returns:
A [batch size x N] set of Cauchy scale parameters for each part of the batch
and each dimension of the input Gaussians.
"""
return math_ops.sqrt(math.e / (8. * math.pi) *
array_ops.matrix_diag_part(covariance))
class TensorValuedMutableDenseHashTable(lookup.MutableDenseHashTable):
"""A version of MutableDenseHashTable which stores arbitrary Tensor shapes.
Since MutableDenseHashTable only allows vectors right now, simply adds reshape
ops on both ends.
"""
def __init__(self, key_dtype, value_dtype, default_value, *args, **kwargs):
self._non_vector_value_shape = array_ops.shape(default_value)
super(TensorValuedMutableDenseHashTable, self).__init__(
key_dtype=key_dtype,
value_dtype=value_dtype,
default_value=array_ops.reshape(default_value, [-1]),
*args,
**kwargs)
def insert(self, keys, values, name=None):
keys = ops.convert_to_tensor(keys, dtype=self._key_dtype)
keys_flat = array_ops.reshape(keys, [-1])
return super(TensorValuedMutableDenseHashTable, self).insert(
keys=keys_flat,
# Each key has one corresponding value, so the shape of the tensor of
# values for every key is key_shape + value_shape
values=array_ops.reshape(values, [array_ops.shape(keys_flat)[0], -1]),
name=name)
def lookup(self, keys, name=None):
keys_flat = array_ops.reshape(
ops.convert_to_tensor(keys, dtype=self._key_dtype), [-1])
return array_ops.reshape(
super(TensorValuedMutableDenseHashTable, self).lookup(
keys=keys_flat, name=name),
array_ops.concat([array_ops.shape(keys), self._non_vector_value_shape],
0))
class TupleOfTensorsLookup(lookup.LookupInterface):
"""A LookupInterface with nested tuples of Tensors as values.
Creates one MutableDenseHashTable per value Tensor, which has some unnecessary
overhead.
"""
def __init__(
self, key_dtype, default_values, empty_key, name, checkpoint=True):
default_values_flat = nest.flatten(default_values)
self._hash_tables = nest.pack_sequence_as(
default_values,
[TensorValuedMutableDenseHashTable(
key_dtype=key_dtype,
value_dtype=default_value.dtype.base_dtype,
default_value=default_value,
empty_key=empty_key,
name=name + "_{}".format(table_number),
checkpoint=checkpoint)
for table_number, default_value
in enumerate(default_values_flat)])
self._name = name
def lookup(self, keys):
return nest.pack_sequence_as(
self._hash_tables,
[hash_table.lookup(keys)
for hash_table in nest.flatten(self._hash_tables)])
def insert(self, keys, values):
nest.assert_same_structure(self._hash_tables, values)
# Avoid race conditions by requiring that all inputs are computed before any
# inserts happen (an issue if one key's update relies on another's value).
values_flat = [array_ops.identity(value) for value in nest.flatten(values)]
with ops.control_dependencies(values_flat):
insert_ops = [hash_table.insert(keys, value)
for hash_table, value
in zip(nest.flatten(self._hash_tables),
values_flat)]
return control_flow_ops.group(*insert_ops)
def check_table_dtypes(self, key_dtype, value_dtype):
# dtype checking is done in the objects in self._hash_tables
pass
def replicate_state(start_state, batch_size):
"""Create batch versions of state.
Takes a list of Tensors, adds a batch dimension, and replicates
batch_size times across that batch dimension. Used to replicate the
non-batch state returned by get_start_state in define_loss.
Args:
start_state: Model-defined state to replicate.
batch_size: Batch dimension for data.
Returns:
Replicated versions of the state.
"""
flattened_state = nest.flatten(start_state)
replicated_state = [
array_ops.tile(
array_ops.expand_dims(state_nonbatch, 0),
array_ops.concat([[batch_size], array_ops.ones(
[array_ops.rank(state_nonbatch)], dtype=dtypes.int32)], 0))
for state_nonbatch in flattened_state
]
return nest.pack_sequence_as(start_state, replicated_state)
Moments = collections.namedtuple("Moments", ["mean", "variance"])
# Currently all of these statistics are computed incrementally (i.e. are updated
# every time a new mini-batch of training data is presented) when this object is
# created in InputStatisticsFromMiniBatch.
InputStatistics = collections.namedtuple(
"InputStatistics",
["series_start_moments", # The mean and variance of each feature in a chunk
# (with a size configured in the statistics
# object) at the start of the series. A tuple of
# (mean, variance), each with shape [number of
# features], floating point. One use is in state
# space models, to keep priors calibrated even as
# earlier parts of the series are presented. If
# this object was created by
# InputStatisticsFromMiniBatch, these moments are
# computed based on the earliest chunk of data
# presented so far. However, there is a race
# condition in the update, so these may reflect
# statistics later in the series, but should
# eventually reflect statistics in a chunk at the
# series start.
"overall_feature_moments", # The mean and variance of each feature over
# the entire series. A tuple of (mean,
# variance), each with shape [number of
# features]. If this object was created by
# InputStatisticsFromMiniBatch, these moments
# are estimates based on the data seen so far.
"start_time", # The first (lowest) time in the series, a scalar
# integer. If this object was created by
# InputStatisticsFromMiniBatch, this is the lowest time seen
# so far rather than the lowest time that will ever be seen
# (guaranteed to be at least as low as the lowest time
# presented in the current minibatch).
"total_observation_count", # Count of data points, a scalar integer. If
# this object was created by
# InputStatisticsFromMiniBatch, this is an
# estimate of the total number of observations
# in the whole dataset computed based on the
# density of the series and the minimum and
# maximum times seen.
])
# TODO(allenl): It would be nice to do something with full series statistics
# when the user provides that.
class InputStatisticsFromMiniBatch(object):
"""Generate statistics from mini-batch input."""
def __init__(self, num_features, dtype, starting_variance_window_size=16):
"""Configure the input statistics object.
Args:
num_features: Number of features for the time series
dtype: The floating point data type to use.
starting_variance_window_size: The number of datapoints to use when
computing the mean and variance at the start of the series.
"""
self._starting_variance_window_size = starting_variance_window_size
self._num_features = num_features
self._dtype = dtype
def initialize_graph(self, features, update_statistics=True):
"""Create any ops needed to provide input statistics.
Should be called before statistics are requested.
Args:
features: A dictionary, the output of a `TimeSeriesInputFn` (with keys
TrainEvalFeatures.TIMES and TrainEvalFeatures.VALUES).
update_statistics: Whether `features` should be used to update adaptive
statistics. Typically True for training and false for evaluation.
Returns:
An InputStatistics object composed of Variables, which will be updated
based on mini-batches of data if requested.
"""
if (TrainEvalFeatures.TIMES in features
and TrainEvalFeatures.VALUES in features):
times = features[TrainEvalFeatures.TIMES]
values = features[TrainEvalFeatures.VALUES]
else:
# times and values may not be available, for example during prediction. We
# still need to retrieve our variables so that they can be read from, even
# if we're not going to update them.
times = None
values = None
# Create/retrieve variables representing input statistics, initialized
# without data to avoid deadlocking if variables are initialized before
# queue runners are started.
with variable_scope.variable_scope("input_statistics", use_resource=True):
statistics = self._create_variable_statistics_object()
with variable_scope.variable_scope(
"input_statistics_auxiliary", use_resource=True):
# Secondary statistics, necessary for the incremental computation of the
# primary statistics (e.g. counts and sums for computing a mean
# incrementally).
auxiliary_variables = self._AdaptiveInputAuxiliaryStatistics(
num_features=self._num_features, dtype=self._dtype)
if update_statistics and times is not None and values is not None:
# If we have times and values from mini-batch input, create update ops to
# take the new data into account.
assign_op = self._update_statistics_from_mini_batch(
statistics, auxiliary_variables, times, values)
with ops.control_dependencies([assign_op]):
stat_variables = nest.pack_sequence_as(statistics, [
array_ops.identity(tensor) for tensor in nest.flatten(statistics)
])
# Since start time updates have a race condition, ensure that the
# reported start time is at least as low as the lowest time in this
# mini-batch. The start time should converge on the correct value
# eventually even with the race condition, but for example state space
# models have an assertion which could fail without this
# post-processing.
return stat_variables._replace(start_time=gen_math_ops.minimum(
stat_variables.start_time, math_ops.reduce_min(times)))
else:
return statistics
class _AdaptiveInputAuxiliaryStatistics(collections.namedtuple(
"_AdaptiveInputAuxiliaryStatistics",
["max_time_seen", # The maximum time seen (best effort if updated from
# multiple workers; see notes about race condition
# below).
"chunk_count", # The number of chunks seen.
"inter_observation_duration_sum", # The sum across chunks of their "time
# density" (number of times per
# example).
"example_count", # The number of examples seen (each example has a
# single time associated with it and one or more
# real-valued features).
"overall_feature_sum", # The sum of values for each feature. Shape
# [number of features].
"overall_feature_sum_of_squares", # The sum of squared values for each
# feature. Shape [number of features]
])):
"""Extra statistics used to incrementally update InputStatistics."""
def __new__(cls, num_features, dtype):
return super(
InputStatisticsFromMiniBatch # pylint: disable=protected-access
._AdaptiveInputAuxiliaryStatistics,
cls).__new__(
cls,
max_time_seen=variable_scope.get_variable(
name="max_time_seen",
initializer=dtypes.int64.min,
dtype=dtypes.int64,
trainable=False),
chunk_count=variable_scope.get_variable(
name="chunk_count",
initializer=init_ops.zeros_initializer(),
shape=[],
dtype=dtypes.int64,
trainable=False),
inter_observation_duration_sum=variable_scope.get_variable(
name="inter_observation_duration_sum",
initializer=init_ops.zeros_initializer(),
shape=[],
dtype=dtype,
trainable=False),
example_count=variable_scope.get_variable(
name="example_count",
shape=[],
dtype=dtypes.int64,
trainable=False),
overall_feature_sum=variable_scope.get_variable(
name="overall_feature_sum",
shape=[num_features],
dtype=dtype,
initializer=init_ops.zeros_initializer(),
trainable=False),
overall_feature_sum_of_squares=variable_scope.get_variable(
name="overall_feature_sum_of_squares",
shape=[num_features],
dtype=dtype,
initializer=init_ops.zeros_initializer(),
trainable=False))
def _update_statistics_from_mini_batch(
self, statistics, auxiliary_variables, times, values):
"""Given mini-batch input, update `statistics` and `auxiliary_variables`."""
values = math_ops.cast(values, self._dtype)
# The density (measured in times per observation) that we see in each part
# of the mini-batch.
batch_inter_observation_duration = (math_ops.cast(
math_ops.reduce_max(times, axis=1) - math_ops.reduce_min(times, axis=1),
self._dtype) / math_ops.cast(
array_ops.shape(times)[1] - 1, self._dtype))
# Co-locate updates with their variables to minimize race conditions when
# updating statistics.
with ops.colocate_with(auxiliary_variables.max_time_seen):
# There is a race condition if this value is being updated from multiple
# workers. However, it should eventually reach the correct value if the
# last chunk is presented enough times.
max_time_seen_assign = state_ops.assign(
auxiliary_variables.max_time_seen,
gen_math_ops.maximum(auxiliary_variables.max_time_seen,
math_ops.reduce_max(times)))
with ops.colocate_with(auxiliary_variables.chunk_count):
chunk_count_assign = state_ops.assign_add(auxiliary_variables.chunk_count,
array_ops.shape(
times,
out_type=dtypes.int64)[0])
with ops.colocate_with(auxiliary_variables.inter_observation_duration_sum):
inter_observation_duration_assign = state_ops.assign_add(
auxiliary_variables.inter_observation_duration_sum,
math_ops.reduce_sum(batch_inter_observation_duration))
with ops.colocate_with(auxiliary_variables.example_count):
example_count_assign = state_ops.assign_add(
auxiliary_variables.example_count,
array_ops.size(times, out_type=dtypes.int64))
# Note: These mean/variance updates assume that all points are equally
# likely, which is not true if _chunks_ are sampled uniformly from the space
# of all possible contiguous chunks, since points at the start and end of
# the series are then members of fewer chunks. For series which are much
# longer than the chunk size (the usual/expected case), this effect becomes
# irrelevant.
with ops.colocate_with(auxiliary_variables.overall_feature_sum):
overall_feature_sum_assign = state_ops.assign_add(
auxiliary_variables.overall_feature_sum,
math_ops.reduce_sum(values, axis=[0, 1]))
with ops.colocate_with(auxiliary_variables.overall_feature_sum_of_squares):
overall_feature_sum_of_squares_assign = state_ops.assign_add(
auxiliary_variables.overall_feature_sum_of_squares,
math_ops.reduce_sum(values**2, axis=[0, 1]))
per_chunk_aux_updates = control_flow_ops.group(
max_time_seen_assign, chunk_count_assign,
inter_observation_duration_assign, example_count_assign,
overall_feature_sum_assign, overall_feature_sum_of_squares_assign)
with ops.control_dependencies([per_chunk_aux_updates]):
example_count_float = math_ops.cast(auxiliary_variables.example_count,
self._dtype)
new_feature_mean = (auxiliary_variables.overall_feature_sum /
example_count_float)
overall_feature_mean_update = state_ops.assign(
statistics.overall_feature_moments.mean, new_feature_mean)
overall_feature_var_update = state_ops.assign(
statistics.overall_feature_moments.variance,
# De-biased n / (n - 1) variance correction
example_count_float / (example_count_float - 1.) *
(auxiliary_variables.overall_feature_sum_of_squares /
example_count_float - new_feature_mean**2))
# TODO(b/35675805): Remove this cast
min_time_batch = math_ops.cast(math_ops.argmin(times[:, 0]), dtypes.int32)
def series_start_updates():
# If this is the lowest-time chunk that we have seen so far, update
# series start moments to reflect that. Note that these statistics are
# "best effort", as there are race conditions in the update (however,
# they should eventually converge if the start of the series is
# presented enough times).
mean, variance = nn.moments(
values[min_time_batch, :self._starting_variance_window_size],
axes=[0])
return control_flow_ops.group(
state_ops.assign(statistics.series_start_moments.mean, mean),
state_ops.assign(statistics.series_start_moments.variance,
variance))
with ops.colocate_with(statistics.start_time):
series_start_update = control_flow_ops.cond(
# Update moments whenever we even match the lowest time seen so far,
# to ensure that series start statistics are eventually updated to
# their correct values, despite race conditions (i.e. eventually
# statistics.start_time will reflect the global lowest time, and
# given that we will eventually update the series start moments to
# their correct values).
math_ops.less_equal(times[min_time_batch, 0],
statistics.start_time),
series_start_updates,
control_flow_ops.no_op)
with ops.control_dependencies([series_start_update]):
# There is a race condition if this update is performed in parallel on
# multiple workers. Since models may be sensitive to being presented
# with times before the putative start time, the value of this
# variable is post-processed above to guarantee that each worker is
# presented with a start time which is at least as low as the lowest
# time in its current mini-batch.
start_time_update = state_ops.assign(statistics.start_time,
gen_math_ops.minimum(
statistics.start_time,
math_ops.reduce_min(times)))
inter_observation_duration_estimate = (
auxiliary_variables.inter_observation_duration_sum / math_ops.cast(
auxiliary_variables.chunk_count, self._dtype))
# Estimate the total number of observations as:
# (end time - start time + 1) * average intra-chunk time density
total_observation_count_update = state_ops.assign(
statistics.total_observation_count,
math_ops.cast(
gen_math_ops.round(
math_ops.cast(auxiliary_variables.max_time_seen -
statistics.start_time + 1, self._dtype) /
inter_observation_duration_estimate), dtypes.int64))
per_chunk_stat_updates = control_flow_ops.group(
overall_feature_mean_update, overall_feature_var_update,
series_start_update, start_time_update,
total_observation_count_update)
return per_chunk_stat_updates
def _create_variable_statistics_object(self):
"""Creates non-trainable variables representing input statistics."""
series_start_moments = Moments(
mean=variable_scope.get_variable(
name="series_start_mean",
shape=[self._num_features],
dtype=self._dtype,
initializer=init_ops.zeros_initializer(),
trainable=False),
variance=variable_scope.get_variable(
name="series_start_variance",
shape=[self._num_features],
dtype=self._dtype,
initializer=init_ops.ones_initializer(),
trainable=False))
overall_feature_moments = Moments(
mean=variable_scope.get_variable(
name="overall_feature_mean",
shape=[self._num_features],
dtype=self._dtype,
initializer=init_ops.zeros_initializer(),
trainable=False),
variance=variable_scope.get_variable(
name="overall_feature_var",
shape=[self._num_features],
dtype=self._dtype,
initializer=init_ops.ones_initializer(),
trainable=False))
start_time = variable_scope.get_variable(
name="start_time",
dtype=dtypes.int64,
initializer=dtypes.int64.max,
trainable=False)
total_observation_count = variable_scope.get_variable(
name="total_observation_count",
shape=[],
dtype=dtypes.int64,
initializer=init_ops.ones_initializer(),
trainable=False)
return InputStatistics(
series_start_moments=series_start_moments,
overall_feature_moments=overall_feature_moments,
start_time=start_time,
total_observation_count=total_observation_count)
|
nburn42/tensorflow
|
tensorflow/contrib/timeseries/python/timeseries/math_utils.py
|
Python
|
apache-2.0
| 43,089
|
[
"Gaussian"
] |
9f0cbbcea03a4f90b16a01827ab3fbc6a9bac0899e67fca4c7294b73837b2279
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RGlmnet(RPackage):
"""Extremely efficient procedures for fitting the entire lasso or
elastic-net regularization path for linear regression, logistic and
multinomial regression models, Poisson regression and the Cox model. Two
recent additions are the multiple-response Gaussian, and the grouped
multinomial. The algorithm uses cyclical coordinate descent in a path-wise
fashion, as described in the paper linked to via the URL below."""
homepage = "https://cran.rstudio.com/web/packages/glmnet/index.html"
url = "https://cran.rstudio.com/src/contrib/glmnet_2.0-13.tar.gz"
list_url = homepage
version('2.0-13', '1dd5636388df5c3a29207d0bf1253343')
version('2.0-5', '049b18caa29529614cd684db3beaec2a')
depends_on('r-matrix', type=('build', 'run'))
depends_on('r-foreach', type=('build', 'run'))
|
TheTimmy/spack
|
var/spack/repos/builtin/packages/r-glmnet/package.py
|
Python
|
lgpl-2.1
| 2,113
|
[
"Gaussian"
] |
5ba4105dfed9d856d056478f8c405a187f0b5253c6e3e778f477821ce9dbb21c
|
#! /usr/bin/env python
# PBS cluster job submission in Python
# BWA alignment to Painted Turtle Genome
# By Jean P. Elbers
# jean.elbers@gmail.com
# Last modified 22 Jan 2015
###############################################################################
Usage = """
03-bwa.py - version 1.0
Command:
1.Uses BWA Mem to align paired reads to Painted Turtle Genome
~/bin/bwa-0.7.12/bwa mem \
-M \
-t16 \
RefDir/GCF_000241765.3_Chrysemys_picta_bellii-3.0.3_genomic \
Sample-R1-unmerged.trim.fastq.gz \
Sample-R2-unmerged.trim.fastq.gz > ../bwa-alignment/Sample-paired.bwa.sam
2.Uses BWA Mem to align single and merged reads to Painted Turtle Genome
~/bin/bwa-0.7.12/bwa mem \
-M \
-t16 \
RefDir/GCF_000241765.3_Chrysemys_picta_bellii-3.0.3_genomic \
Sample-singlesANDmerged.trim.fastq.gz > ../bwa-alignment/Sample-singlesANDmerged.bwa.sam
Directory info:
InDir = /work/jelber2/immunome_2014/run2/trimmed-data
Input Files = Sample-R1-unmerged.trim.fastq.gz
Sample-R2-unmerged.trim.fastq.gz
Sample-singlesANDmerged.trim.fastq.gz
OutDir = /work/jelber2/immunome_2014/run2/bwa-alignment
Output Files = *-singlesANDmerged.bwa.sam
*-paired.bwa.sam
Usage (execute following code in InDir):
~/scripts/immunome_2014/03-bwa.py *.trim.fastq.gz
"""
###############################################################################
import os, sys, subprocess, re #imports os, sys, subprocess, re modules
if len(sys.argv)<2:
print Usage
else:
FileList = sys.argv[1:]
RefDir = "/work/jelber2/reference"
InDir = "/work/jelber2/immunome_2014/run2/trimmed-data/"
OutDir = "/work/jelber2/immunome_2014/run2/bwa-alignment"
NewFolderName = "bwa-alignment"
os.chdir(InDir)
os.chdir("..") # go up one directory
if not os.path.exists(NewFolderName):
os.mkdir(NewFolderName) # if NewFolderName does not exist, then make it
os.chdir(InDir)
R1 = re.compile(r"\w+\-R1\-unmerged\.trim\.fastq\.gz") # search string for obtaining only -R1-paired.fastq.gz files
FileList = [f for f in FileList if R1.match(f)] # keeps only R1 files in FileList
for InFileName in FileList: # so bwa only does command once for each R1 and R2 pair
FileSuffix = "-R1-unmerged.trim.fastq.gz" # string to remove from InFileName
Sample = InFileName.replace(FileSuffix,'') # creates Sample string
# Customize your options here
Queue = "workq"
Allocation = "hpc_gopo02"
Processors = "nodes=1:ppn=16"
WallTime = "01:30:00"
LogOut = OutDir
LogMerge = "oe"
JobName = "BWA-%s" % (Sample)
Command ="""
~/bin/bwa-0.7.12/bwa mem \
-M \
-t16 \
%s/GCF_000241765.3_Chrysemys_picta_bellii-3.0.3_genomic \
%s-R1-unmerged.trim.fastq.gz \
%s-R2-unmerged.trim.fastq.gz > ../bwa-alignment/%s-paired.bwa.sam
~/bin/bwa-0.7.12/bwa mem \
-M \
-t16 \
%s/GCF_000241765.3_Chrysemys_picta_bellii-3.0.3_genomic \
%s-singlesANDmerged.trim.fastq.gz > ../bwa-alignment/%s-singlesANDmerged.bwa.sam""" % \
(RefDir, Sample, Sample, Sample,
RefDir, Sample, Sample)
JobString = """
#!/bin/bash
#PBS -q %s
#PBS -A %s
#PBS -l %s
#PBS -l walltime=%s
#PBS -o %s
#PBS -j %s
#PBS -N %s
cd %s
%s\n""" % (Queue, Allocation, Processors, WallTime, LogOut, LogMerge, JobName, InDir, Command)
#Create pipe to qsub
proc = subprocess.Popen(['qsub'], shell=True,
stdin=subprocess.PIPE, stdout=subprocess.PIPE, close_fds=True)
(child_stdout, child_stdin) = (proc.stdout, proc.stdin)
#Print JobString
jobname = proc.communicate(JobString)[0]
print JobString
print jobname
|
jelber2/immunome_2014
|
03-bwa-run2.py
|
Python
|
mit
| 3,867
|
[
"BWA"
] |
cff8848b5e8df9c2856e2c1f51bc8a39626736eba16718eab6d5405e2321f28c
|
"""
___ _
/ _ \ ___| |_ ___ _ __ _ _ ___
| | | |/ __| __/ _ \| '_ \| | | / __|
| |_| | (__| || (_) | |_) | |_| \__ \
\___/ \___|\__\___/| .__/ \__,_|___/
|_|
<HSC-Herve Schauer Consultants 2015>
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
from twisted.protocols.basic import LineReceiver
from twisted.internet import protocol, reactor, defer
from collections import deque
from chunkify import chunkify, getChunk, saveChunk
from params import api, masternode, secondarynode, dictpath
from md5sum import md5sum
import shutil
import os
import json
import re
import urlparse
import urllib2
import optparse
import random
import logs
#import zlib
api_host = api["host"]
api_port = api["port"]
OCTOPUS = """
___ _
/ _ \\ ___| |_ ___ _ __ _ _ ___
| | | |/ __| __/ _ \\| '_ \\| | | / __|
| |_| | (__| || (_) | |_) | |_| \\__ \\
\\___/ \\___|\\__\\___/| .__/ \\__,_|___/
|_|
"""
print OCTOPUS
print "Masternode started"
def httprequest(data):
#data = zlib.compress(data)
request = "GET /?cmd=%s HTTP/1.1\r\n" %urllib2.quote(data)
request += "User-Agent: Octopus"
return request
def parse_args():
"""
Arguments parser
"""
usage = """usage: %prog [options]"""
parser = optparse.OptionParser(usage)
help = "Use as secondary masternode"
parser.add_option("-s", "--secondary", action = "store_true", help = help)
help = "Debug mode"
parser.add_option("-d", "--debug", action = "store_true", help = help)
options, args = parser.parse_args()
return options
debug = parse_args().debug
is_master = not parse_args().secondary
type = (is_master and "master") or ((not is_master) and "secondary")
if is_master:
config = masternode
else:
config = secondarynode
home = config["vars"]["HOME"]
HOST = config["host"]
PORT = config["port"]
logger = logs.Logger("%s/master.log" %(home), reactor)
class Job(object):
def __init__(self, type, job, father, hashtypes, id):
self.type = type
self.job = job
self.father = father
self.hashtypes = hashtypes
self.id = id
def __eq__(self, other):
sameType = self.type == other.type
sameJob = self.job == other.job
sameFather = self.father == other.father
sameId = self.id == other.id
return sameType and sameJob and sameId and sameFather
def __ne__(self, other):
diffType = self.type != other.type
diffJob = self.job != other.job
diffFather = self.father != other.father
diffId = self.id != other.id
return diffType or diffJob or diffId or diffFather
def getJob(self):
type, job, hashtypes, id = self.type, self.job, self.hashtypes, self.id
father = self.father
return [type, job, father, hashtypes, id]
def getOneJob(self):
try:
type, job, hashtype = self.type, self.job, self.hashtypes.pop()
id, empty = self.id, len(self.hashtypes)==0
return [type, job, hashtype, id], empty
except IndexError:
raise IndexError("no more hashtype to test")
class MasterProtocol(object, LineReceiver):
delimiter = "\r\n\r\n"
id = None
hashtype = None
# ======================= PROTOCOL FUNCTIONS ======================= #
def connectionMade(self):
self.buffer = ""
peer = self.transport.getPeer()
host, port = peer.host, peer.port
logger.log("Connection made with %s:%d"%(host, port))
if debug:
print "Connection made with %s:%d" %(host, port)
self.proto = {"NODETYPE": self.nodetype,
"DO": self.do,
"RESULT": self.result,
"RESULTP": self.resultp,
"PUT": self.put,
"GET": self.get,
"STOP": self.stop,
"PURGE": self.purge,
"ERROR": (lambda msg: None),
"WAIT": self.wait}
self.nodes = {"api": self.api,
"master": self.master,
"secondary": self.secondary,
"slave": self.slave,
"slavesync": self.slavesync}
self.putActions = {"fathers": self.putFathers,
"jobs": self.putJobs,
"nbjobs": self.putNbJobs,
"hashes": self.putHashes}
self.getActions = {"results": self.getResults,
"chunk": self.getChunk,
"dictionaries": self.getDictionaries,
"dictlist": self.getDictlist}
self.sendNodetype()
def connectionLost(self, reason):
peer = self.transport.getPeer()
host, port = peer.host, peer.port
if debug:
print "Connection lost with %s:%d"%(host, port)
logger.log("Connection lost with %s:%d"%(host, port))
if self == self.factory.master:
self.factory.master = None
elif self == self.factory.secondary:
self.factory.secondary = None
elif self in self.factory.slavenodes:
self.factory.slavenodes.remove(self)
peer = self.transport.getPeer()
ipport = "%s:%d" %(peer.host, peer.port)
if ipport in self.factory.deferreds:
self.factory.deferreds.pop(ipport)
if self.job:
if not self.id in self.factory.jobs.keys():
self.factory.jobs[self.id] = deque()
if self.hashtype:
self.job.hashtypes.append(self.hashtype)
if not self.job in self.factory.jobs[self.id]:
self.factory.jobs[self.id].append(self.job)
elif self == self.factory.api:
self.factory.api = None
def lineReceived(self, data):
peer = self.transport.getPeer()
host, port = peer.host, peer.port
if debug:
print "MasterProtocol.lineReceived(%s) from %s:%s" %(data, host, port)
lines = data.split("\r\n")
request = dict()
header = lines[0]
if "HTTP/1.1" not in header:
self.transport.loseConnection()
return
r = re.compile("^([^:]+): (.*?)$", re.MULTILINE)
request = dict((head, value) for (head, value) in r.findall(data[1:]))
addr = header.split()[1]
p = urlparse.parse_qs(urlparse.urlparse(addr).query)
if "cmd" in p.keys() and p['cmd']:
self.requestReceived(p['cmd'][0])
else:
self.transport.loseConnection()
def requestReceived(self, data):
"""
Maps a command with its corresponding function
"""
#data = zlib.decompress(data)
if debug:
print "MasterProtocol.requestReceived(%s)" %(data)
peer = self.transport.getPeer()
host, port = peer.host, peer.port
logger.log("Recv from %s:%d: %s" %(host, port, data))
if data[-1] == "\x03":
self.buffer += data[:-1]
try:
cmd = json.loads(self.buffer)
except ValueError as e:
self.sendError("Your request is not a JSON")
else:
self.buffer += data
return
# self.buffer += data
# try:
# cmd = json.loads(self.buffer)
# except ValueError as e:
# return
self.buffer = ""
if cmd:
if cmd[0] in self.proto.keys():
try:
self.proto[cmd[0]](*cmd[1:])
except TypeError as e:
raise
#self.sendError(str(e))
#return
else:
self.sendError("Function %s() does not exist" %cmd[0].lower())
def sendRequest(self, data):
peer = self.transport.getPeer()
host, port = peer.host, peer.port
if debug:
print "MasterProtocol.sendRequest(%s) to %s:%d" %(data, host, port)
logger.log("Sent to %s:%d: %s"%(host, port, data))
data += "\x03"
while data:
self.sendLine(httprequest(data[:1000]))
data = data[1000:]
# ===================== END PROTOCOL FUNCTIONS ===================== #
# ========================= RECV FUNCTIONS ========================= #
def nodetype(self, type):
"""
Answer to a NODETYPE command.
"""
if debug:
print "MasterProtocol.nodetype(%s)" %(type)
try:
self.nodes[type]()
except KeyError as e:
self.sendError("Nodetype %s does not exists"%str(e))
return
def do(self, type, job, hashtypes, id):
"""
Answer to a DO command. Used to send a job.
"""
if debug:
print "MasterProtocol.do(id=%s)" %(id)
chunks = chunkify(type, job, id)
if not id in self.factory.fathers:
self.factory.fathers[id] = dict()
nb = len(chunks)
father = "%s:%s" %(type, job)
self.factory.fathers[id][father] = nb
self.factory.api.sendFather(father, nb, id)
for chunk in chunks:
self.addTask(type, chunk, father, hashtypes, id)
def result(self, results):
"""
Answer to a RESULT command. Used to send the result of a request.
"""
if debug:
print "MasterProtocol.result(%s)" %(str(results[:3]))
if self.id:
if self.id in self.factory.protocols:
self.factory.protocols[self.id].remove(self)
self.factory.api.sendResult(results, self.id)
if self.id in self.factory.jobs.keys():
if not self.job in self.factory.jobs[self.id]:
self.factory.api.sendRemove(self.job.getJob())
self.job = None
self.father = None
self.hashtype = None
self.removeHashes(results)
else:
self.factory.api.sendRemove(self.job.getJob())
try:
shutil.rmtree("%s/tmp/%s" %(home, self.id))
except OSError as e:
print e.message
self.work()
def resultp(self, results):
"""
Answer to a RESULTP command. Used to send the result of a request.
"""
if debug:
print "MasterProtocol.resultp(%s)" %(str(results[:3]))
id = self.id
if id:
self.factory.api.sendResult(results, id)
self.removeHashes(results)
if not self.id in self.factory.jobs.keys():
try:
shutil.rmtree("%s/tmp/%s" %(home, self.id))
except OSError as e:
print e.message
def put(self, type, objects):
"""
Answer to a PUT command. Used to upload files such as dictionaries and
hashfiles.
"""
if debug:
print "MasterProtocol.put(%s, ...)" %(type)
try:
self.putActions[type](*objects)
except KeyError as e:
self.sendError("Cannot put %s objects" %str(e))
def get(self, type, objects):
"""
Answer to a GET command. Used to get information about slavenodes and
job requests (results, state).
"""
if debug:
print "MasterProtocol.get(%s, ...)" %(type)
try:
self.getActions[type](*objects)
except KeyError as e:
self.sendError("Cannot get %s objects" %str(e))
def stop(self, id):
if debug:
print "MasterProtocol.stop(%s)" %(id)
try:
self.factory.jobs.pop(id)
shutil.rmtree("%s/tmp/%s" %(home, id))
protocols = self.factory.protocols.pop(id)
for slave in protocols:
slave.id = None
slave.sendStop()
except KeyError as e:
self.sendError(str(e))
def purge(self, id):
if debug:
print "MasterProtocol.purge(%s)" %(id)
try:
self.factory.jobs.pop(id)
self.factory.fathers.pop(id)
except KeyError as e:
self.sendError(str(e))
if os.path.isdir("%s/tmp/%s" %(home, id)):
shutil.rmtree("%s/tmp/%s" %(home, id))
def api(self):
"""
Answer to a NODETYPE api command. Sent by API when a connection
is made.
"""
if debug:
print "MasterProtocol.api()"
self.factory.api = self
def master(self):
"""
Answer to a NODETYPE master command. Sent by Masternode when a
connection is made.
"""
if debug:
print "MasterProtocol.master()"
self.factory.master = self
def secondary(self):
"""
Answer to a NODETYPE secondary command. Sent by Secondarynode when a
connection is made.
"""
if debug:
print "MasterProtocol.secondary()"
self.factory.secondary = self
def slave(self):
"""
Answer to a NODETYPE slave command. Sent by a slavenode when a
connection is made.
"""
if debug:
print "MasterProtocol.slave()"
self.factory.slavenodes.append(self)
self.job = None
self.father = None
self.id = None
self.work()
def slavesync(self):
"""
Answer to a NODETYPE slave command. Sent by a slavenode when a
connection is made and when dictionaries synchronization is wanted.
"""
if debug:
print "MasterProtocol.slave()"
self.job = None
self.father = None
self.id = None
def putFathers(self, fathers, id):
"""
Answer to a PUT father command.
"""
self.factory.fathers[id] = fathers
def putJobs(self, job, id):
if debug:
print "MasterProtocol.putJobs(id=%s)" %(id)
if not id in self.factory.jobs.keys():
self.factory.jobs[id] = deque()
if not os.path.isdir("%s/tmp/%s" %(home, id)):
os.mkdir("%s/tmp/%s" %(home, id))
count = self.factory.nbjobs[id]
self.factory.nbjobs[id] += 1
path = "%s/tmp/%s/part%d" %(home, id, count)
if os.path.isfile(path):
os.remove(path)
chunk = saveChunk[job[0]](job[1], path)
father = job[2]
newjob = Job(job[0], chunk, father, job[3], job[4])
self.factory.jobs[id].append(newjob)
self.fire()
def putNbJobs(self, nbjobs, id):
self.factory.nbjobs[id] = nbjobs
def putHashes(self, hashes, id):
"""
Answer to a PUT hashes command. Used to store a hashfile.
"""
if debug:
print "MasterProtocol.putHashes(id=%s)" %(id)
if not os.path.isdir("%s/tmp/%s" %(home, id)):
os.mkdir("%s/tmp/%s" %(home, id))
with open("%s/tmp/%s/hashes"%(home, id), "a") as f:
dico = "\n".join(hash for hash in hashes)
try:
f.write(dico + "\n")
except UnicodeEncodeError:
pass
self.sendOK("Hashes uploaded")
def getDictionaries(self, dictlist):
for dname in dictlist:
print "Sending %s..." %(dname)
with open("%s/%s.txt" %(dictpath, dname), "r") as g:
dict = g.read().decode('latin-1').encode('utf-8')
dict = dict.split("\n")
while dict:
self.sendPut("dictionary", [dict[:100000], dname])
dict = dict[100000:]
print "%s sent!" %(dname)
self.factory.slavenodes.append(self)
self.work()
def getChunk(self, dic, nb, step):
with open("%s/%s.txt" %(dictpath, dic), "r") as f:
chunk = f.readlines()[nb*step:(nb+1)*step]
chunk = [w.replace("\n", "") for w in chunk]
chunk = "\n".join(chunk)
self.sendPut("chunk", [chunk])
def getDictlist(self):
dictlist = dict()
with open("%s/dictionaries" %(dictpath), "r") as f:
for line in f:
line = line.replace("\n", "")
md5digest = md5sum("%s/%s.txt" %(dictpath, line))
dictlist[md5digest] = line
self.sendPut("dictlist", [dictlist])
def getResults(self, id):
"""
Answer to a GET results command. Returns the list of results
corresponding to a given client.
"""
pass
def wait(self):
if debug:
print "MasterProtocol.wait()"
if not self.id in self.factory.jobs.keys():
self.factory.jobs[self.id] = deque()
self.job.hashtypes.append(self.hashtype)
if self.job in self.factory.jobs[self.id]:
self.factory.jobs[self.id].remove(self.job)
self.factory.jobs[self.id].appendleft(self.job)
self.id = None
self.job = None
# ======================= END RECV FUNCTIONS ======================= #
# ========================= SEND FUNCTIONS ========================= #
def sendNodetype(self):
self.sendRequest(json.dumps(["NODETYPE", self.factory.type]))
def sendOK(self, msg):
self.sendRequest(json.dumps(["OK", msg]))
def sendError(self, msg):
self.sendRequest(json.dumps(["ERROR", msg]))
def sendDo(self, type, job, hashtype, id):
self.id = id
self.sendRequest(json.dumps(["DO", type, job, hashtype]))
def sendFather(self, father, nb, id):
self.sendRequest(json.dumps(["FATHER", father, nb, id]))
def sendGet(self, type, object):
pass
def sendPut(self, type, object):
# TODO: UTF-8 conv
try:
self.sendRequest(json.dumps(["PUT", type, object]))
except UnicodeDecodeError as e:
print e.message
def sendRemove(self, job):
self.sendRequest(json.dumps(["REMOVE", "job", [job]]))
def sendResult(self, result, id=0):
self.sendRequest(json.dumps(["RESULT", result, id]))
def sendStop(self):
self.sendRequest(json.dumps(["STOP"]))
# ======================= END RECV FUNCTIONS ======================= #
# ========================= MISC FUNCTIONS ========================= #
def removeHashes(self, results):
if debug:
print "MasterProtocol.removeHashes(%s)" %(str(results[:3]))
try:
with open("%s/tmp/%s/hashes" %(home, self.id), "r") as f:
hashes = [h.strip("\n") for h in f.xreadlines()]
for result in results:
for hash in hashes:
if result.split(":", 1)[1] in hash:
hashes.remove(hash)
if not hashes:
os.remove("%s/tmp/%s/hashes" %(home, self.id))
else:
os.remove("%s/tmp/%s/hashes" %(home, self.id))
with open("%s/tmp/%s/hashes" %(home, self.id), "a") as f:
dico = "\n".join(hash for hash in hashes)
f.write(dico)
except IOError as e:
print e.message
def work(self):
if debug:
print "MasterProtocol.work()"
if self.factory.jobs.keys():
if debug:
print " There is a task to perform"
ind = random.randint(0, len(self.factory.jobs.keys())-1)
key = self.factory.jobs.keys()[ind]
if debug:
print " ID:", key
if not self.factory.jobs[key]:
if debug:
print " No job for that key!"
self.factory.jobs.pop(key)
self.work()
return
job = self.factory.jobs[key].popleft()
self.doJob(job)
if not self.factory.jobs[key]:
self.factory.jobs.pop(key)
else:
d = defer.Deferred()
d.addCallback(self.doJob)
peer = self.transport.getPeer()
self.factory.deferreds["%s:%d" %(peer.host, peer.port)] = d
def fire(self):
if debug:
print "MasterProtocol.fire()"
if self.factory.deferreds and self.factory.jobs.keys():
d = self.factory.deferreds.popitem()[1]
ind = random.randint(0, len(self.factory.jobs.keys())-1)
key = self.factory.jobs.keys()[ind]
d.callback(self.factory.jobs[key].popleft())
def addTask(self, type, chunk, father, hashtypes, id):
if debug:
print "MasterProtocol.addTask(%s)" %(id)
job = Job(type, chunk, father, list(hashtypes), id)
chunk = getChunk[type](chunk)
toSend = [type, chunk, father, list(hashtypes), id]
self.factory.api.sendPut("jobs", [toSend, id])
if not id in self.factory.jobs.keys():
self.factory.jobs[id] = deque()
self.factory.jobs[id].append(job)
if self.factory.deferreds:
job = self.factory.jobs[id].popleft()
d = self.factory.deferreds.popitem()[1]
d.callback(job)
def doJob(self, job):
if debug:
print "MasterProtocol.doJob(%s)" %(str(job))
id = job.id
if job.father in self.factory.fathers[id]:
nb = self.factory.fathers[id].pop(job.father)
self.factory.api.sendFather(job.father, nb, id)
if debug:
print "Step 1"
chunk = getChunk[job.type](job.job)
if debug:
print "Step 2"
self.job = job
self.father = job.father
if not id in self.factory.protocols:
self.factory.protocols[id] = set()
if debug:
print "Step 3"
self.factory.protocols[id].add(self)
try:
with open("%s/tmp/%s/hashes" %(home, id), "r") as f:
hashes = [h.strip("\n") for h in f.xreadlines()]
if debug:
print "Step 4"
toSend, empty = job.getOneJob()
if debug:
print "Step 5"
if not empty:
if not id in self.factory.jobs:
self.factory.jobs[id] = deque()
if job in self.factory.jobs[id]:
self.factory.jobs[id].remove(job)
self.factory.jobs[id].appendleft(job)
if debug:
print "Step 6"
while hashes:
if debug:
print "Step 7"
self.sendPut("hashes", [hashes[:150], toSend[2]])
hashes = hashes[150:]
if debug:
print "Step 8"
toSend[1] = chunk
self.sendDo(*toSend)
if debug:
print "Step 9"
self.hashtype = toSend[2]
except IOError as e:
print e.message
# ======================= END MISC FUNCTIONS ======================= #
class MasterFactory(object, protocol.ClientFactory, protocol.ServerFactory):
def __init__(self, protocol=MasterProtocol):
self.protocol = protocol
self.protocols = dict()
self.jobs = dict()
self.fathers = dict()
self.results = dict()
self.hashes = dict()
self.nbjobs = dict()
self.type = type
self.slavenodes = []
self.api = None
self.master = None
self.secondary = None
self.deferreds = dict()
factory = MasterFactory()
reactor.listenTCP(PORT, factory, interface=HOST)
reactor.connectTCP(api_host, api_port, factory)
reactor.run()
logger.write()
shutil.rmtree("%s/tmp"%home)
os.mkdir("%s/tmp"%home)
|
hsc-lab/octopus
|
src/masternode.py
|
Python
|
apache-2.0
| 25,227
|
[
"Octopus"
] |
f3cb4a0ad210ea5dc9e868f345b5a87c3a58210f9f4dbe2635e9690d68521ce8
|
#!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.4'
__CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0)
__CHEETAH_genTime__ = 1453357629.241681
__CHEETAH_genTimestamp__ = 'Thu Jan 21 15:27:09 2016'
__CHEETAH_src__ = '/home/babel/Build/Test/OpenPLi5/openpli5.0/build/tmp/work/tmnanoseplus-oe-linux/enigma2-plugin-extensions-openwebif/1+gitAUTOINC+186ea358f6-r0/git/plugin/controllers/views/web/mediaplayerplay.tmpl'
__CHEETAH_srcLastModified__ = 'Thu Jan 21 15:27:08 2016'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class mediaplayerplay(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(mediaplayerplay, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
_orig_filter_20258864 = _filter
filterName = u'WebSafe'
if self._CHEETAH__filters.has_key("WebSafe"):
_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]
else:
_filter = self._CHEETAH__currentFilter = \
self._CHEETAH__filters[filterName] = getattr(self._CHEETAH__filtersLib, filterName)(self).filter
write(u'''<?xml version="1.0" encoding="UTF-8"?>
<e2simplexmlresult>
\t<e2state>''')
_v = VFFSL(SL,"result",True) # u'$result' on line 4, col 11
if _v is not None: write(_filter(_v, rawExpr=u'$result')) # from line 4, col 11.
write(u'''</e2state>
\t<e2statetext>''')
_v = VFFSL(SL,"message",True) # u'$message' on line 5, col 15
if _v is not None: write(_filter(_v, rawExpr=u'$message')) # from line 5, col 15.
write(u'''</e2statetext>
</e2simplexmlresult>
''')
_filter = self._CHEETAH__currentFilter = _orig_filter_20258864
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_mediaplayerplay= 'respond'
## END CLASS DEFINITION
if not hasattr(mediaplayerplay, '_initCheetahAttributes'):
templateAPIClass = getattr(mediaplayerplay, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(mediaplayerplay)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=mediaplayerplay()).run()
|
MOA-2011/e2openplugin-OpenWebif
|
plugin/controllers/views/web/mediaplayerplay.py
|
Python
|
gpl-2.0
| 5,233
|
[
"VisIt"
] |
e3bb549dd434820499c5be31916c479a7683c2446e4edc15665a7f21fc3c79eb
|
#!/usr/bin/env python
########################################################################
# File : dirac-admin-get-pilot-logging-info.py
# Author : Stuart Paterson
########################################################################
"""
Retrieve logging info of a Grid pilot
"""
__RCSID__ = "$Id$"
# pylint: disable=wrong-import-position
from DIRAC.Core.Base import Script
Script.setUsageMessage('\n'.join([__doc__.split('\n')[1],
'Usage:',
' %s [option|cfgfile] ... PilotID ...' % Script.scriptName,
'Arguments:',
' PilotID: Grid ID of the pilot']))
Script.parseCommandLine(ignoreErrors=True)
args = Script.getPositionalArgs()
if len(args) < 1:
Script.showHelp()
from DIRAC import exit as DIRACExit
from DIRAC.Interfaces.API.DiracAdmin import DiracAdmin
diracAdmin = DiracAdmin()
exitCode = 0
errorList = []
for gridID in args:
result = diracAdmin.getPilotLoggingInfo(gridID)
if not result['OK']:
errorList.append((gridID, result['Message']))
exitCode = 2
else:
print 'Pilot Reference: %s', gridID
print result['Value']
print
for error in errorList:
print "ERROR %s: %s" % error
DIRACExit(exitCode)
|
andresailer/DIRAC
|
Interfaces/scripts/dirac-admin-get-pilot-logging-info.py
|
Python
|
gpl-3.0
| 1,294
|
[
"DIRAC"
] |
97ea47495746c8c1e1426304d8ce06afd2efc5a25bcebdbcb798dd2b1211fbcd
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RDose(RPackage):
"""Disease Ontology Semantic and Enrichment analysis.
This package implements five methods proposed by Resnik, Schlicker,
Jiang, Lin and Wang respectively for measuring semantic similarities
among DO terms and gene products. Enrichment analyses including
hypergeometric model and gene set enrichment analysis are also
implemented for discovering disease associations of high-throughput
biological data."""
homepage = "https://bioconductor.org/packages/DOSE"
git = "https://git.bioconductor.org/packages/DOSE.git"
version('3.10.2', commit='5ea51a2e2a04b4f3cc974cecb4537e14efd6a7e3')
version('3.8.2', commit='4d3d1ca710aa7e4288a412c8d52b054b86a57639')
version('3.6.1', commit='f2967f0482cea39222bfd15767d0f4a5994f241b')
version('3.4.0', commit='dabb70de1a0f91d1767601e871f2f1c16d29a612')
version('3.2.0', commit='71f563fc39d02dfdf65184c94e0890a63b96b86b')
depends_on('r@3.3.1:', type=('build', 'run'))
depends_on('r-annotationdbi', type=('build', 'run'))
depends_on('r-biocparallel', type=('build', 'run'))
depends_on('r-do-db', type=('build', 'run'))
depends_on('r-fgsea', type=('build', 'run'))
depends_on('r-ggplot2', type=('build', 'run'))
depends_on('r-gosemsim@2.0.0:', type=('build', 'run'))
depends_on('r-igraph', when='@3.2.0:3.4.0', type=('build', 'run'))
depends_on('r-qvalue', type=('build', 'run'))
depends_on('r-reshape2', type=('build', 'run'))
depends_on('r-s4vectors', type=('build', 'run'))
depends_on('r-scales', when='@3.2.0:3.4.0', type=('build', 'run'))
depends_on('r-rvcheck', when='@3.4.0', type=('build', 'run'))
depends_on('r@3.4.0:', when='@3.6.1:', type=('build', 'run'))
|
rspavel/spack
|
var/spack/repos/builtin/packages/r-dose/package.py
|
Python
|
lgpl-2.1
| 1,982
|
[
"Bioconductor"
] |
1e7d1e2a5174eac6ed35f3c5cf8e6d625910de87055368a24088accda279c91d
|
"""
Test the about xblock
"""
import datetime
import ddt
import mock
import pytz
import six
from ccx_keys.locator import CCXLocator
from django.conf import settings
from django.test.utils import override_settings
from django.urls import reverse
from milestones.tests.utils import MilestonesTestCaseMixin
from mock import patch
from six import text_type
from waffle.testutils import override_switch
from course_modes.models import CourseMode
from lms.djangoapps.ccx.tests.factories import CcxFactory
from openedx.core.djangoapps.waffle_utils.testutils import override_waffle_flag
from openedx.features.course_experience import COURSE_ENABLE_UNENROLLED_ACCESS_FLAG
from openedx.features.course_experience.waffle import ENABLE_COURSE_ABOUT_SIDEBAR_HTML
from openedx.features.course_experience.waffle import WAFFLE_NAMESPACE as COURSE_EXPERIENCE_WAFFLE_NAMESPACE
from shoppingcart.models import Order, PaidCourseRegistration
from student.models import CourseEnrollment
from student.tests.factories import AdminFactory, CourseEnrollmentAllowedFactory, UserFactory
from track.tests import EventTrackingTestCase
from util.milestones_helpers import get_prerequisite_courses_display, set_prerequisite_courses
from xmodule.course_module import (
CATALOG_VISIBILITY_ABOUT,
CATALOG_VISIBILITY_NONE,
COURSE_VISIBILITY_PRIVATE,
COURSE_VISIBILITY_PUBLIC,
COURSE_VISIBILITY_PUBLIC_OUTLINE
)
from xmodule.modulestore.tests.django_utils import (
TEST_DATA_MIXED_MODULESTORE,
TEST_DATA_SPLIT_MODULESTORE,
ModuleStoreTestCase,
SharedModuleStoreTestCase
)
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.modulestore.tests.utils import TEST_DATA_DIR
from xmodule.modulestore.xml_importer import import_course_from_xml
from .helpers import LoginEnrollmentTestCase
# HTML for registration button
REG_STR = "<form id=\"class_enroll_form\" method=\"post\" data-remote=\"true\" action=\"/change_enrollment\">"
SHIB_ERROR_STR = "The currently logged-in user account does not have permission to enroll in this course."
@ddt.ddt
class AboutTestCase(LoginEnrollmentTestCase, SharedModuleStoreTestCase, EventTrackingTestCase, MilestonesTestCaseMixin):
"""
Tests about xblock.
"""
@classmethod
def setUpClass(cls):
super(AboutTestCase, cls).setUpClass()
cls.course = CourseFactory.create()
cls.course_without_about = CourseFactory.create(catalog_visibility=CATALOG_VISIBILITY_NONE)
cls.course_with_about = CourseFactory.create(catalog_visibility=CATALOG_VISIBILITY_ABOUT)
cls.purchase_course = CourseFactory.create(org='MITx', number='buyme', display_name='Course To Buy')
cls.about = ItemFactory.create(
category="about", parent_location=cls.course.location,
data="OOGIE BLOOGIE", display_name="overview"
)
cls.about = ItemFactory.create(
category="about", parent_location=cls.course_without_about.location,
data="WITHOUT ABOUT", display_name="overview"
)
cls.about = ItemFactory.create(
category="about", parent_location=cls.course_with_about.location,
data="WITH ABOUT", display_name="overview"
)
def setUp(self):
super(AboutTestCase, self).setUp()
self.course_mode = CourseMode(
course_id=self.purchase_course.id,
mode_slug=CourseMode.DEFAULT_MODE_SLUG,
mode_display_name=CourseMode.DEFAULT_MODE_SLUG,
min_price=10
)
self.course_mode.save()
def test_anonymous_user(self):
"""
This test asserts that a non-logged in user can visit the course about page
"""
url = reverse('about_course', args=[text_type(self.course.id)])
resp = self.client.get(url)
self.assertContains(resp, "OOGIE BLOOGIE")
# Check that registration button is present
self.assertContains(resp, REG_STR)
def test_logged_in(self):
"""
This test asserts that a logged-in user can visit the course about page
"""
self.setup_user()
url = reverse('about_course', args=[text_type(self.course.id)])
resp = self.client.get(url)
self.assertContains(resp, "OOGIE BLOOGIE")
def test_already_enrolled(self):
"""
Asserts that the end user sees the appropriate messaging
when he/she visits the course about page, but is already enrolled
"""
self.setup_user()
self.enroll(self.course, True)
url = reverse('about_course', args=[text_type(self.course.id)])
resp = self.client.get(url)
self.assertContains(resp, "You are enrolled in this course")
self.assertContains(resp, "View Course")
@override_settings(COURSE_ABOUT_VISIBILITY_PERMISSION="see_about_page")
def test_visible_about_page_settings(self):
"""
Verify that the About Page honors the permission settings in the course module
"""
url = reverse('about_course', args=[text_type(self.course_with_about.id)])
resp = self.client.get(url)
self.assertContains(resp, "WITH ABOUT")
url = reverse('about_course', args=[text_type(self.course_without_about.id)])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 404)
@patch.dict(settings.FEATURES, {'ENABLE_MKTG_SITE': True})
def test_logged_in_marketing(self):
self.setup_user()
url = reverse('about_course', args=[text_type(self.course.id)])
resp = self.client.get(url)
# should be redirected
self.assertEqual(resp.status_code, 302)
# follow this time, and check we're redirected to the course home page
resp = self.client.get(url, follow=True)
target_url = resp.redirect_chain[-1][0]
course_home_url = reverse('openedx.course_experience.course_home', args=[text_type(self.course.id)])
self.assertTrue(target_url.endswith(course_home_url))
@patch.dict(settings.FEATURES, {'ENABLE_COURSE_HOME_REDIRECT': False})
@patch.dict(settings.FEATURES, {'ENABLE_MKTG_SITE': True})
def test_logged_in_marketing_without_course_home_redirect(self):
"""
Verify user is not redirected to course home page when
ENABLE_COURSE_HOME_REDIRECT is set to False
"""
self.setup_user()
url = reverse('about_course', args=[text_type(self.course.id)])
resp = self.client.get(url)
# should not be redirected
self.assertContains(resp, "OOGIE BLOOGIE")
@patch.dict(settings.FEATURES, {'ENABLE_COURSE_HOME_REDIRECT': True})
@patch.dict(settings.FEATURES, {'ENABLE_MKTG_SITE': False})
def test_logged_in_marketing_without_mktg_site(self):
"""
Verify user is not redirected to course home page when
ENABLE_MKTG_SITE is set to False
"""
self.setup_user()
url = reverse('about_course', args=[text_type(self.course.id)])
resp = self.client.get(url)
# should not be redirected
self.assertContains(resp, "OOGIE BLOOGIE")
@patch.dict(settings.FEATURES, {'ENABLE_PREREQUISITE_COURSES': True})
def test_pre_requisite_course(self):
pre_requisite_course = CourseFactory.create(org='edX', course='900', display_name='pre requisite course')
course = CourseFactory.create(pre_requisite_courses=[text_type(pre_requisite_course.id)])
self.setup_user()
url = reverse('about_course', args=[text_type(course.id)])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
pre_requisite_courses = get_prerequisite_courses_display(course)
pre_requisite_course_about_url = reverse('about_course', args=[text_type(pre_requisite_courses[0]['key'])])
self.assertIn(u"<span class=\"important-dates-item-text pre-requisite\"><a href=\"{}\">{}</a></span>"
.format(pre_requisite_course_about_url, pre_requisite_courses[0]['display']),
resp.content.decode(resp.charset).strip('\n'))
@patch.dict(settings.FEATURES, {'ENABLE_PREREQUISITE_COURSES': True})
def test_about_page_unfulfilled_prereqs(self):
pre_requisite_course = CourseFactory.create(
org='edX',
course='901',
display_name='pre requisite course',
)
pre_requisite_courses = [text_type(pre_requisite_course.id)]
# for this failure to occur, the enrollment window needs to be in the past
course = CourseFactory.create(
org='edX',
course='1000',
# closed enrollment
enrollment_start=datetime.datetime(2013, 1, 1),
enrollment_end=datetime.datetime(2014, 1, 1),
start=datetime.datetime(2013, 1, 1),
end=datetime.datetime(2030, 1, 1),
pre_requisite_courses=pre_requisite_courses,
)
set_prerequisite_courses(course.id, pre_requisite_courses)
self.setup_user()
self.enroll(self.course, True)
self.enroll(pre_requisite_course, True)
url = reverse('about_course', args=[text_type(course.id)])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
pre_requisite_courses = get_prerequisite_courses_display(course)
pre_requisite_course_about_url = reverse('about_course', args=[text_type(pre_requisite_courses[0]['key'])])
self.assertIn(u"<span class=\"important-dates-item-text pre-requisite\"><a href=\"{}\">{}</a></span>"
.format(pre_requisite_course_about_url, pre_requisite_courses[0]['display']),
resp.content.decode(resp.charset).strip('\n'))
url = reverse('about_course', args=[six.text_type(pre_requisite_course.id)])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
@ddt.data(
[COURSE_VISIBILITY_PRIVATE],
[COURSE_VISIBILITY_PUBLIC_OUTLINE],
[COURSE_VISIBILITY_PUBLIC],
)
@ddt.unpack
def test_about_page_public_view(self, course_visibility):
"""
Assert that anonymous or unenrolled users see View Course option
when unenrolled access flag is set
"""
with mock.patch('xmodule.course_module.CourseDescriptor.course_visibility', course_visibility):
with override_waffle_flag(COURSE_ENABLE_UNENROLLED_ACCESS_FLAG, active=True):
url = reverse('about_course', args=[text_type(self.course.id)])
resp = self.client.get(url)
if course_visibility == COURSE_VISIBILITY_PUBLIC or course_visibility == COURSE_VISIBILITY_PUBLIC_OUTLINE:
self.assertContains(resp, "View Course")
else:
self.assertContains(resp, "Enroll Now")
class AboutTestCaseXML(LoginEnrollmentTestCase, ModuleStoreTestCase):
"""
Tests for the course about page
"""
MODULESTORE = TEST_DATA_MIXED_MODULESTORE
def setUp(self):
"""
Set up the tests
"""
super(AboutTestCaseXML, self).setUp()
# The following test course (which lives at common/test/data/2014)
# is closed; we're testing that an about page still appears when
# the course is already closed
self.xml_course_id = self.store.make_course_key('edX', 'detached_pages', '2014')
import_course_from_xml(
self.store,
'test_user',
TEST_DATA_DIR,
source_dirs=['2014'],
static_content_store=None,
target_id=self.xml_course_id,
raise_on_failure=True,
create_if_not_present=True,
)
# this text appears in that course's about page
# common/test/data/2014/about/overview.html
self.xml_data = "about page 463139"
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_logged_in_xml(self):
self.setup_user()
url = reverse('about_course', args=[text_type(self.xml_course_id)])
resp = self.client.get(url)
self.assertContains(resp, self.xml_data)
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_anonymous_user_xml(self):
url = reverse('about_course', args=[text_type(self.xml_course_id)])
resp = self.client.get(url)
self.assertContains(resp, self.xml_data)
class AboutWithCappedEnrollmentsTestCase(LoginEnrollmentTestCase, SharedModuleStoreTestCase):
"""
This test case will check the About page when a course has a capped enrollment
"""
@classmethod
def setUpClass(cls):
super(AboutWithCappedEnrollmentsTestCase, cls).setUpClass()
cls.course = CourseFactory.create(metadata={"max_student_enrollments_allowed": 1})
cls.about = ItemFactory.create(
category="about", parent_location=cls.course.location,
data="OOGIE BLOOGIE", display_name="overview"
)
def test_enrollment_cap(self):
"""
This test will make sure that enrollment caps are enforced
"""
self.setup_user()
url = reverse('about_course', args=[text_type(self.course.id)])
resp = self.client.get(url)
self.assertContains(resp, '<a href="#" class="register">')
self.enroll(self.course, verify=True)
# pylint: disable=attribute-defined-outside-init
# create a new account since the first account is already enrolled in the course
self.email = 'foo_second@test.com'
self.password = 'bar'
self.username = 'test_second'
self.create_account(self.username, self.email, self.password)
self.activate_user(self.email)
self.login(self.email, self.password)
# Get the about page again and make sure that the page says that the course is full
resp = self.client.get(url)
self.assertContains(resp, "Course is full")
# Try to enroll as well
result = self.enroll(self.course)
self.assertFalse(result)
# Check that registration button is not present
self.assertNotContains(resp, REG_STR)
class AboutWithInvitationOnly(SharedModuleStoreTestCase):
"""
This test case will check the About page when a course is invitation only.
"""
@classmethod
def setUpClass(cls):
super(AboutWithInvitationOnly, cls).setUpClass()
cls.course = CourseFactory.create(metadata={"invitation_only": True})
cls.about = ItemFactory.create(
category="about", parent_location=cls.course.location,
display_name="overview"
)
def test_invitation_only(self):
"""
Test for user not logged in, invitation only course.
"""
url = reverse('about_course', args=[text_type(self.course.id)])
resp = self.client.get(url)
self.assertContains(resp, "Enrollment in this course is by invitation only")
# Check that registration button is not present
self.assertNotContains(resp, REG_STR)
def test_invitation_only_but_allowed(self):
"""
Test for user logged in and allowed to enroll in invitation only course.
"""
# Course is invitation only, student is allowed to enroll and logged in
user = UserFactory.create(username='allowed_student', password='test', email='allowed_student@test.com')
CourseEnrollmentAllowedFactory(email=user.email, course_id=self.course.id)
self.client.login(username=user.username, password='test')
url = reverse('about_course', args=[text_type(self.course.id)])
resp = self.client.get(url)
self.assertContains(resp, u"Enroll Now")
# Check that registration button is present
self.assertContains(resp, REG_STR)
class AboutWithClosedEnrollment(ModuleStoreTestCase):
"""
This test case will check the About page for a course that has enrollment start/end
set but it is currently outside of that period.
"""
def setUp(self):
super(AboutWithClosedEnrollment, self).setUp()
self.course = CourseFactory.create(metadata={"invitation_only": False})
# Setup enrollment period to be in future
now = datetime.datetime.now(pytz.UTC)
tomorrow = now + datetime.timedelta(days=1)
nextday = tomorrow + datetime.timedelta(days=1)
self.course.enrollment_start = tomorrow
self.course.enrollment_end = nextday
self.course = self.update_course(self.course, self.user.id)
self.about = ItemFactory.create(
category="about", parent_location=self.course.location,
display_name="overview"
)
def test_closed_enrollmement(self):
url = reverse('about_course', args=[text_type(self.course.id)])
resp = self.client.get(url)
self.assertContains(resp, "Enrollment is Closed")
# Check that registration button is not present
self.assertNotContains(resp, REG_STR)
def test_course_price_is_not_visble_in_sidebar(self):
url = reverse('about_course', args=[text_type(self.course.id)])
resp = self.client.get(url)
# course price is not visible ihe course_about page when the course
# mode is not set to honor
self.assertNotContains(resp, '<span class="important-dates-item-text">$10</span>')
@ddt.ddt
class AboutSidebarHTMLTestCase(SharedModuleStoreTestCase):
"""
This test case will check the About page for the content in the HTML sidebar.
"""
def setUp(self):
super(AboutSidebarHTMLTestCase, self).setUp()
self.course = CourseFactory.create()
@ddt.data(
("", "", False),
("about_sidebar_html", "About Sidebar HTML Heading", False),
("about_sidebar_html", "", False),
("", "", True),
("about_sidebar_html", "About Sidebar HTML Heading", True),
("about_sidebar_html", "", True),
)
@ddt.unpack
def test_html_sidebar_enabled(self, itemfactory_display_name, itemfactory_data, waffle_switch_value):
with override_switch(
'{}.{}'.format(
COURSE_EXPERIENCE_WAFFLE_NAMESPACE,
ENABLE_COURSE_ABOUT_SIDEBAR_HTML
),
active=waffle_switch_value
):
if itemfactory_display_name:
ItemFactory.create(
category="about",
parent_location=self.course.location,
display_name=itemfactory_display_name,
data=itemfactory_data,
)
url = reverse('about_course', args=[text_type(self.course.id)])
resp = self.client.get(url)
if waffle_switch_value and itemfactory_display_name and itemfactory_data:
self.assertContains(resp, '<section class="about-sidebar-html">')
self.assertContains(resp, itemfactory_data)
else:
self.assertNotContains(resp, '<section class="about-sidebar-html">')
@patch.dict(settings.FEATURES, {'ENABLE_SHOPPING_CART': True})
@patch.dict(settings.FEATURES, {'ENABLE_PAID_COURSE_REGISTRATION': True})
class AboutPurchaseCourseTestCase(LoginEnrollmentTestCase, SharedModuleStoreTestCase):
"""
This test class runs through a suite of verifications regarding
purchaseable courses
"""
@classmethod
def setUpClass(cls):
super(AboutPurchaseCourseTestCase, cls).setUpClass()
cls.course = CourseFactory.create(org='MITx', number='buyme', display_name='Course To Buy')
now = datetime.datetime.now(pytz.UTC)
tomorrow = now + datetime.timedelta(days=1)
nextday = tomorrow + datetime.timedelta(days=1)
cls.closed_course = CourseFactory.create(
org='MITx',
number='closed',
display_name='Closed Course To Buy',
enrollment_start=tomorrow,
enrollment_end=nextday
)
def setUp(self):
super(AboutPurchaseCourseTestCase, self).setUp()
self._set_ecomm(self.course)
self._set_ecomm(self.closed_course)
def _set_ecomm(self, course):
"""
Helper method to turn on ecommerce on the course
"""
course_mode = CourseMode(
course_id=course.id,
mode_slug=CourseMode.DEFAULT_MODE_SLUG,
mode_display_name=CourseMode.DEFAULT_MODE_SLUG,
min_price=10,
)
course_mode.save()
def test_anonymous_user(self):
"""
Make sure an anonymous user sees the purchase button
"""
url = reverse('about_course', args=[text_type(self.course.id)])
resp = self.client.get(url)
self.assertContains(resp, "Add buyme to Cart <span>($10 USD)</span>")
def test_logged_in(self):
"""
Make sure a logged in user sees the purchase button
"""
self.setup_user()
url = reverse('about_course', args=[text_type(self.course.id)])
resp = self.client.get(url)
self.assertContains(resp, "Add buyme to Cart <span>($10 USD)</span>")
def test_already_in_cart(self):
"""
This makes sure if a user has this course in the cart, that the expected message
appears
"""
self.setup_user()
cart = Order.get_cart_for_user(self.user)
PaidCourseRegistration.add_to_order(cart, self.course.id)
url = reverse('about_course', args=[text_type(self.course.id)])
resp = self.client.get(url)
self.assertContains(resp, "This course is in your")
self.assertNotContains(resp, "Add buyme to Cart <span>($10 USD)</span>")
def test_already_enrolled(self):
"""
This makes sure that the already enrolled message appears for paywalled courses
"""
self.setup_user()
# note that we can't call self.enroll here since that goes through
# the Django student views, which doesn't allow for enrollments
# for paywalled courses
CourseEnrollment.enroll(self.user, self.course.id)
url = reverse('about_course', args=[text_type(self.course.id)])
resp = self.client.get(url)
self.assertContains(resp, "You are enrolled in this course")
self.assertContains(resp, "View Course")
self.assertNotContains(resp, "Add buyme to Cart <span>($10 USD)</span>")
def test_closed_enrollment(self):
"""
This makes sure that paywalled courses also honor the registration
window
"""
self.setup_user()
url = reverse('about_course', args=[text_type(self.closed_course.id)])
resp = self.client.get(url)
self.assertContains(resp, "Enrollment is Closed")
self.assertNotContains(resp, "Add closed to Cart <span>($10 USD)</span>")
# course price is visible ihe course_about page when the course
# mode is set to honor and it's price is set
self.assertContains(resp, '<span class="important-dates-item-text">$10</span>')
def test_invitation_only(self):
"""
This makes sure that the invitation only restirction takes prescendence over
any purchase enablements
"""
course = CourseFactory.create(metadata={"invitation_only": True})
self._set_ecomm(course)
self.setup_user()
url = reverse('about_course', args=[text_type(course.id)])
resp = self.client.get(url)
self.assertContains(resp, "Enrollment in this course is by invitation only")
def test_enrollment_cap(self):
"""
Make sure that capped enrollments work even with
paywalled courses
"""
course = CourseFactory.create(
metadata={
"max_student_enrollments_allowed": 1,
"display_coursenumber": "buyme",
}
)
self._set_ecomm(course)
self.setup_user()
url = reverse('about_course', args=[text_type(course.id)])
resp = self.client.get(url)
self.assertContains(resp, "Add buyme to Cart <span>($10 USD)</span>")
# note that we can't call self.enroll here since that goes through
# the Django student views, which doesn't allow for enrollments
# for paywalled courses
CourseEnrollment.enroll(self.user, course.id)
# create a new account since the first account is already enrolled in the course
email = 'foo_second@test.com'
password = 'bar'
username = 'test_second'
self.create_account(username,
email, password)
self.activate_user(email)
self.login(email, password)
# Get the about page again and make sure that the page says that the course is full
resp = self.client.get(url)
self.assertContains(resp, "Course is full")
self.assertNotContains(resp, "Add buyme to Cart ($10)")
def test_free_course_display(self):
"""
Make sure other courses that don't have shopping cart enabled don't display the add-to-cart button
and don't display the course_price field if Cosmetic Price is disabled.
"""
course = CourseFactory.create(org='MITx', number='free', display_name='Course For Free')
self.setup_user()
url = reverse('about_course', args=[text_type(course.id)])
resp = self.client.get(url)
self.assertNotContains(resp, "Add free to Cart (Free)")
self.assertNotContains(resp, '<p class="important-dates-item-title">Price</p>')
class CourseAboutTestCaseCCX(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test for unenrolled student tries to access ccx.
Note: Only CCX coach can enroll a student in CCX. In sum self-registration not allowed.
"""
MODULESTORE = TEST_DATA_SPLIT_MODULESTORE
@classmethod
def setUpClass(cls):
super(CourseAboutTestCaseCCX, cls).setUpClass()
cls.course = CourseFactory.create()
def setUp(self):
super(CourseAboutTestCaseCCX, self).setUp()
# Create ccx coach account
self.coach = coach = AdminFactory.create(password="test")
self.client.login(username=coach.username, password="test")
def test_redirect_to_dashboard_unenrolled_ccx(self):
"""
Assert that when unenrolled user tries to access CCX do not allow the user to self-register.
Redirect him to his student dashboard
"""
# create ccx
ccx = CcxFactory(course_id=self.course.id, coach=self.coach)
ccx_locator = CCXLocator.from_course_locator(self.course.id, six.text_type(ccx.id))
self.setup_user()
url = reverse('openedx.course_experience.course_home', args=[ccx_locator])
response = self.client.get(url)
expected = reverse('dashboard')
self.assertRedirects(response, expected, status_code=302, target_status_code=200)
|
cpennington/edx-platform
|
lms/djangoapps/courseware/tests/test_about.py
|
Python
|
agpl-3.0
| 26,979
|
[
"VisIt"
] |
6978c812696188c66fd91d5b9e4d8e98c9ea604895c80d3d7ac495aae70a0d77
|
from __future__ import print_function, absolute_import, division, unicode_literals
import numpy as np
import traceback
from alis import almsgs
import sys
msgs=almsgs.msgs()
class Base :
"""
Returns a 1-dimensional gaussian of form:
p[0] = amplitude
p[1] = x offset
p[2] = dispersion (sigma)
"""
def __init__(self, prgname="", getinst=False, atomic=None, verbose=2):
self._idstr = 'base' # ID string for this class
self._pnumr = 3 # Total number of parameters fed in
self._keywd = dict({'specid':[], 'continuum':False, 'blind':False, 'wave':-1.0}) # Additional arguments to describe the model --- 'input' cannot be used as a keyword
self._keych = dict({'specid':0, 'continuum':0, 'blind':0, 'wave':1}) # Require keywd to be changed (1 for yes, 0 for no)
self._keyfm = dict({'specid':"", 'continuum':"", 'blind':"", 'wave':""}) # Format for the keyword. "" is the Default setting
self._parid = ['amplitude', 'redshift', 'dispersion'] # Name of each parameter
self._defpar = [ 0.0, 0.0, 100.0 ] # Default values for parameters that are not provided
self._fixpar = [ None, None, None ] # By default, should these parameters be fixed?
self._limited = [ [1 ,0 ], [0 ,0 ], [1 ,0 ] ] # Should any of these parameters be limited from below or above
self._limits = [ [0.0,0.0], [0.0,0.0], [1.0E-20,0.0] ] # What should these limiting values be
self._svfmt = [ "{0:.8g}", "{0:.8g}", "{0:.8g}"] # Specify the format used to print or save output
self._prekw = [] # Specify the keywords to print out before the parameters
# DON'T CHANGE THE FOLLOWING --- it tells ALIS what parameters are provided by the user.
tempinput = self._parid+list(self._keych.keys()) #
self._keywd['input'] = dict(zip((tempinput),([0]*np.size(tempinput)))) #
########################################################################
self._verbose = verbose
# Set the atomic data
self._atomic = atomic
if getinst: return
def call_CPU(self, x, p, ae='em', mkey=None, ncpus=1):
"""
Define the functional form of the model for the CPU
--------------------------------------------------------
x : array of wavelengths
p : array of parameters for this model
--------------------------------------------------------
"""
def model():
"""
Define the model here
"""
return p[0]*np.exp(-(x-p[1])**2/(2.0*(p[2]**2)))
#############
yout = np.array((p.shape[0],x.size))
for i in range(p.shape[0]):
yout[i,:] = model()
if ae == 'em': return yout.sum(axis=0)
else: return yout.prod(axis=0)
def call_GPU(self, x, p, ae='em'):
"""
Define the functional form of the model for the GPU
--------------------------------------------------------
x : array of wavelengths
p : array of parameters for this model
--------------------------------------------------------
"""
self.call_CPU(x, p, ae=ae)
def adjust_fix(self, mp, cntr, jval, parj):
"""
Adjust the parameters in the input model file
--------------------------------------------------------
cntr : The line number of the model (e.g. if it's the
first model line, cntr=0)
mp : modpass --> A dictionary with the details of all
models read in so far.
--------------------------------------------------------
Nothing should be changed here when writing a new function.
--------------------------------------------------------
"""
# Determine the adjustment that needs to be made.
value = None
if self._fixpar[parj] not in ['False','false','FALSE']:
adj = 1
if self._fixpar[parj] not in ['True','true','TRUE']:
if self._fixpar[parj] is True or self._fixpar[parj] is False: return mp # This means the default values were used and parameters are fixed
try:
value = float(self._fixpar[parj])
except:
msgs.error("Argument of 'fix' only takes True/False/None or float")
else: adj = 0
if mp['mfix'][cntr][jval] == adj and value is None: return mp # No adjustment is necessary
# Determine if the parameter is tied, if it is, store tpnum.
adjarr=[]
pradd=0
if mp['mtie'][cntr][jval] != -1:
tpnum = mp['mtie'][cntr][jval]
else:
tpnum = -1
for i in range(cntr+1):
for j in range(len(mp['mtie'][i])):
if i == cntr and j == jval:
for t in range(len(mp['tpar'])):
if mp['tpar'][t][1] == pradd:
tpnum = t
break
break
elif mp['mtie'][i][j] == -1: pradd += 1
if tpnum != -1: break
if tpnum == -1: # This parameter is not tied to anything else
mp['mfix'][cntr][jval] = adj
if value is not None:
mp['mpar'][cntr][jval] = value
mp['p0'][pradd] = value
return mp
# Now tpnum is known, find all associated values
prnum = mp['tpar'][tpnum][1]
prsum=-1
for i in range(len(mp['mtie'])):
for j in range(len(mp['mtie'][i])):
if mp['mtie'][i][j] == -1: prsum += 1
if (prnum == prsum and mp['mtie'][i][j] == -1) or tpnum == mp['mtie'][i][j]: adjarr.append([i,j])
# Apply the adjustments
for i in range(len(adjarr)):
mp['mfix'][adjarr[i][0]][adjarr[i][1]] = adj
if value is not None:
mp['mpar'][adjarr[i][0]][adjarr[i][1]] = value
if value is not None: mp['p0'][prnum] = value
return mp
def adjust_lim(self, mp, cntr, jval, jind, parj):
"""
Adjust the parameters in the input model file
--------------------------------------------------------
cntr : The line number of the model (e.g. if it's the
first model line, cntr=0)
mp : modpass --> A dictionary with the details of all
models read in so far.
--------------------------------------------------------
Nothing should be changed here when writing a new function.
--------------------------------------------------------
"""
# Determine the adjustment that needs to be made.
try:
if self._limited[parj][jind] == 0: value = None
else: value = np.float64(self._limits[parj][jind])
except:
msgs.error("Argument of 'lim' only takes None or float")
# Determine if the parameter is tied, if it is, store tpnum.
adjarr=[]
pradd=0
if mp['mtie'][cntr][jval] != -1:
tpnum = mp['mtie'][cntr][jval]
else:
tpnum = -1
for i in range(cntr+1):
for j in range(len(mp['mtie'][i])):
if i == cntr and j == jval:
for t in range(len(mp['tpar'])):
if mp['tpar'][t][1] == pradd:
tpnum = t
break
break
elif mp['mtie'][i][j] == -1: pradd += 1
if tpnum != -1: break
if tpnum == -1: # This parameter is not tied to anything else
mp['mlim'][cntr][jval][jind] = value
return mp
# Now tpnum is known, find all associated values
prnum = mp['tpar'][tpnum][1]
prsum=-1
for i in range(len(mp['mtie'])):
for j in range(len(mp['mtie'][i])):
if mp['mtie'][i][j] == -1: prsum += 1
if (prnum == prsum and mp['mtie'][i][j] == -1) or tpnum == mp['mtie'][i][j]: adjarr.append([i,j])
# Apply the adjustments
for i in range(len(adjarr)):
mp['mlim'][adjarr[i][0]][adjarr[i][1]][jind] = value
return mp
def getminmax(self, par, fitrng, Nsig=5.0):
"""
This definition is only used for specifying the
FWHM Resolution of the data.
--------------------------------------------------------
This definition will return the additional wavelength range
of the data to be extracted around the user-speficied fitrange
to ensure the edges of the model near the min and max of
fitrange aren't affected.
--------------------------------------------------------
par : The input parameters which defines the FWHM of this
function
fitrng : The fitrange specified by the user at input
Nsig : Width in number of sigma to extract either side of
fitrange
"""
return fitrng[0], fitrng[1]
def load(self, instr, cntr, mp, specid):
"""
Load the parameters in the input model file
--------------------------------------------------------
instr: input string for the parameters and keywords of
model to be loaded (ignoring the identifier at
the beginning of the model line).
cntr : The line number of the model (e.g. if it's the
first model line, cntr=0)
mp : modpass --> A dictionary with the details of all
models read in so far.
--------------------------------------------------------
Nothing should be changed here when writing a new function.
--------------------------------------------------------
"""
def check_tied_param(ival, cntr, mps, iind):
havtie = False
tieval=ival.lstrip('+-.0123456789')
if tieval[0:2] in ['E+', 'e+', 'E-', 'e-']: # Scientific Notation is used.
tieval=tieval[2:].lstrip('.0123456789')
inval=float(ival.rstrip(tieval))
if len(tieval) == 0: # Parameter is not tied
mps['mtie'][cntr].append(-1)
mps['mfix'][cntr].append(0)
else: # parameter is tied
# Determine if this parameter is fixed
if tieval[0].isupper(): mps['mfix'][cntr].append(1)
else: mps['mfix'][cntr].append(0)
# Check if this tieval has been used before
if len(mps['tpar']) == 0: # If it's the first known tied parameter in the model
mps['tpar'].append([])
mps['tpar'][0].append(tieval)
mps['tpar'][0].append(len(mps['p0']))
mps['mtie'][cntr].append(-1) # i.e. not tied to anything
else:
for j in range(0,len(mps['tpar'])):
if mps['tpar'][j][0] == tieval:
mps['mtie'][cntr].append(j)
havtie = True
if havtie == False: # create a New tied parameter
mps['tpar'].append([])
mps['tpar'][-1].append(tieval)
mps['tpar'][-1].append(len(mps['p0']))
mps['mtie'][cntr].append(-1) # i.e. not tied to anything
if havtie == False: mps['p0'].append(inval)
mps['mpar'][cntr].append(inval)
mps['mlim'][cntr].append([self._limits[iind][i] if self._limited[iind][i]==1 else None for i in range(2)])
return mps
################
isspl=instr.split()
# Seperate the parameters from the keywords
ptemp, kywrd = [], []
keywdk = list(self._keywd.keys())
keywdk[:] = (kych for kych in keywdk if kych[:] != 'input') # Remove the keyword 'input'
numink = 0
param = [None for all in range(self._pnumr)]
parid = [i for i in range(self._pnumr)]
for i in range(len(isspl)):
if "=" in isspl[i]:
kwspl = isspl[i].split('=')
if kwspl[0] in self._parid:
self._keywd['input'][kwspl[0]]=2
for j in range(self._pnumr):
if kwspl[0] == self._parid[j]:
param[j] = kwspl[1]
numink += 1
break
elif kwspl[0] in keywdk:
kywrd.append(isspl[i])
self._keywd['input'][kwspl[0]]=2
else: msgs.error("Keyword '"+isspl[i]+"' is unknown for -"+msgs.newline()+self._idstr+" "+instr)
else: ptemp.append(isspl[i])
# Set parameters that weren't given in param
cntp=0
fixpid = [0 for all in range(self._pnumr)]
for i in range(self._pnumr):
if param[i] is None:
if cntp >= len(ptemp): # Use default values
param[i] = str(self._defpar[i])
fixpid[i] = 1
else:
param[i] = ptemp[cntp]
self._keywd['input'][self._parid[i]]=1
cntp += 1
# Do some quick checks
if len(ptemp)+numink > self._pnumr:
msgs.error("Incorrect number of parameters (should be "+str(self._pnumr)+"):"+msgs.newline()+self._idstr+" "+instr)
if len(specid) > 1: # Force the user to specify a spectrum ID number
self._keych['specid'] = 1
specidset=False # Detect is the user has specified specid
# Set the parameters:
mp['mtyp'].append(self._idstr)
mp['mpar'].append([])
mp['mtie'].append([])
mp['mfix'].append([])
mp['mlim'].append([])
for i in range(self._pnumr):
mp = check_tied_param(param[i], cntr, mp, i)
if any(fixpid): # If default values had to be used, fix the values
for i in range(self._pnumr):
if fixpid[i] == 1: mp['mfix'][-1][i] = 1
# Now load the keywords:
# Make a copy of the default values
cpy_keywd = self._keywd.copy()
cpy_keych = self._keych.copy()
for i in range(len(kywrd)):
kwspl = kywrd[i].split('=')
ksspl = kwspl[1].split(',')
if kwspl[0] == 'specid':
sidlist = []
for j in range(len(ksspl)): # Do some checks
if ksspl[j] in sidlist:
msgs.error("specid: "+ksspl[j]+" is set twice for -"+msgs.newline()+self._idstr+" "+instr.replace('\t',' '))
else: sidlist.append(ksspl[j])
if ksspl[j] not in specid:
msgs.error("There is no data with specid: "+ksspl[j]+" for -"+msgs.newline()+self._idstr+" "+instr.replace('\t',' '))
specidset=True
for j in range(len(ksspl)):
if type(cpy_keywd[kwspl[0]]) is int:
typeval='integer'
cpy_keywd[kwspl[0]] = int(kwspl[1])
elif type(cpy_keywd[kwspl[0]]) is str:
typeval='string'
cpy_keywd[kwspl[0]] = kwspl[1]
elif type(cpy_keywd[kwspl[0]]) is float:
typeval='float'
cpy_keywd[kwspl[0]] = float(kwspl[1])
elif type(cpy_keywd[kwspl[0]]) is list and kwspl[0] == 'specid':
typeval='list'
cpy_keywd[kwspl[0]] = sidlist
elif type(cpy_keywd[kwspl[0]]) is bool:
if kwspl[1] in ['True', 'False']:
typeval='boolean'
cpy_keywd[kwspl[0]] = kwspl[1] in ['True']
else:
typeval='string'
cpy_keywd[kwspl[0]] = kwspl[1]
msgs.warn(kwspl[0]+" should be of type boolean (True/False)", verbose=self._verbose)
elif cpy_keywd[kwspl[0]] is None:
typeval='None'
cpy_keywd[kwspl[0]] = None
else:
msgs.error("I don't understand the format on line:"+msgs.newline()+self._idstr+" "+instr)
cpy_keych[kwspl[0]] = 0 # Set keych for this keyword to zero to show that this has been changed
# Check that all required keywords were changed
for i in range(len(keywdk)):
if cpy_keych[keywdk[i]] == 1: msgs.error(keywdk[i]+" must be set for -"+msgs.newline()+self._idstr+" "+instr)
# Check that we have set a specid
if len(specid) == 1 and not specidset:
if len(cpy_keywd['specid']) == 0: cpy_keywd['specid'].append(specid[0])
# Append the final set of keywords
mp['mkey'].append(cpy_keywd)
return mp, parid
def parin(self, i, par, parb):
"""
This routine converts a parameter in the input model file
to the parameter used in 'call'
--------------------------------------------------------
When writing a new function, one should change how each
input parameter 'par' is converted into a parameter used
in the function specified by 'call'
--------------------------------------------------------
"""
if i == 0: pin = par
elif i == 1: pin = parb['ap_1a'] * (1.0 + par)
elif i == 2: pin = parb['ap_2a'] * par/299792.458
return pin
def parout(self, params, mp, istart, level, errs=None, reletter=False, conv=None):
"""
Convert the parameter list to the input form for
printing to screen or output to a file.
--------------------------------------------------------
pval : List of all parameters
mp : modpass, which includes details of all models
istart : Model number
errs : List of all parameter errors.
reletter : Set to true if you want to reletter the tied
and fixed parameters (a will be used for the
first parameter, b is used for the second...)
conv : If convergence test is being written, conv is
the threshold for convergence (in sigma's).
--------------------------------------------------------
Nothing should be changed here when writing a new function.
--------------------------------------------------------
"""
if errs is None: errors = params
else: errors = errs
add = self._pnumr
havtie = 0
tienum = 0
levadd = 0
outstring = [' %s ' % (self._idstr)]
errstring = ['# %s ' % (self._idstr)]
for i in range(self._pnumr):
if mp['mkey'][istart]['input'][self._parid[i]] == 0: # Parameter not given as input
outstring.append( "" )
errstring.append( "" )
continue
elif mp['mkey'][istart]['input'][self._parid[i]] == 1: pretxt = "" # Parameter is given as input, without parid
else: pretxt = self._parid[i]+"=" # Parameter is given as input, with parid
if mp['mtie'][istart][i] >= 0:
if reletter:
newfmt=pretxt+self.gtoef(params[mp['tpar'][mp['mtie'][istart][i]][1]],self._svfmt[i]+'{1:c}')
outstring.append( (newfmt).format(params[mp['tpar'][mp['mtie'][istart][i]][1]],97+mp['mtie'][istart][i]-32*mp['mfix'][istart][1]) )
if conv is None:
errstring.append( (newfmt).format(errors[mp['tpar'][mp['mtie'][istart][i]][1]],97+mp['mtie'][istart][i]-32*mp['mfix'][istart][1]) )
else:
if params[mp['tpar'][mp['mtie'][istart][i]][1]] < conv: cvtxt = "CONVERGED"
else: cvtxt = "!!!!!!!!!"
errstring.append( ('--{0:s}--{1:c} ').format(cvtxt,97+tienum-32*mp['mfix'][istart][1]) )
else:
newfmt=pretxt+self.gtoef(params[mp['tpar'][mp['mtie'][istart][i]][1]],self._svfmt[i]+'{1:s}')
outstring.append( (newfmt).format(params[mp['tpar'][mp['mtie'][istart][i]][1]],mp['tpar'][mp['mtie'][istart][i]][0]) )
if conv is None:
errstring.append( (newfmt).format(errors[mp['tpar'][mp['mtie'][istart][i]][1]],mp['tpar'][mp['mtie'][istart][i]][0]) )
else:
if params[mp['tpar'][mp['mtie'][istart][i]][1]] < conv: cvtxt = "CONVERGED"
else: cvtxt = "!!!!!!!!!"
errstring.append( ('--{0:s}--{1:s} ').format(cvtxt,mp['tpar'][mp['mtie'][istart][i]][0]) )
add -= 1
else:
if havtie != 2:
if havtie == 0: # First searching for the very first instance of a tied parameter
for tn in range(0,len(mp['tpar'])):
if mp['tpar'][tn][1] == level+levadd:
tienum=tn
havtie=1
break
if len(mp['tpar']) != 0:
if mp['tpar'][tienum][1] == level+levadd:
if reletter:
newfmt=pretxt+self.gtoef(params[level+levadd],self._svfmt[i]+'{1:c}')
outstring.append( (newfmt).format(params[level+levadd],97+tienum-32*mp['mfix'][istart][1]) )
if conv is None:
errstring.append( (newfmt).format(errors[level+levadd],97+tienum-32*mp['mfix'][istart][1]) )
else:
if params[level+levadd] < conv: cvtxt = "CONVERGED"
else: cvtxt = "!!!!!!!!!"
errstring.append( ('--{0:s}--{1:c} ').format(cvtxt,97+tienum-32*mp['mfix'][istart][1]) )
else:
newfmt=pretxt+self.gtoef(params[level+levadd],self._svfmt[i]+'{1:s}')
outstring.append( (newfmt).format(params[level+levadd],mp['tpar'][tienum][0]) )
if conv is None:
errstring.append( (newfmt).format(errors[level+levadd],mp['tpar'][tienum][0]) )
else:
if params[level+levadd] < conv: cvtxt = "CONVERGED"
else: cvtxt = "!!!!!!!!!"
errstring.append( ('--{0:s}--{1:s} ').format(cvtxt,mp['tpar'][tienum][0]) )
tienum += 1
if tienum == len(mp['tpar']): havtie = 2 # Stop searching for 1st instance of tied param
else:
newfmt=pretxt+self.gtoef(params[level+levadd],self._svfmt[i])
outstring.append( (newfmt).format(params[level+levadd]) )
if conv is None:
errstring.append( (newfmt).format(errors[level+levadd]) )
else:
if params[level+levadd] < conv: cvtxt = "CONVERGED"
else: cvtxt = "!!!!!!!!!"
errstring.append( ('--{0:s}-- ').format(cvtxt) )
else: # There are no tied parameters!
newfmt=pretxt+self.gtoef(params[level+levadd],self._svfmt[i])
outstring.append( (newfmt).format(params[level+levadd]) )
if conv is None:
errstring.append( (newfmt).format(errors[level+levadd]) )
else:
if params[level+levadd] < conv: cvtxt = "CONVERGED"
else: cvtxt = "!!!!!!!!!"
errstring.append( ('--{0:s}-- ').format(cvtxt) )
else:
newfmt=pretxt+self.gtoef(params[level+levadd],self._svfmt[i])
outstring.append( (newfmt).format(params[level+levadd]) )
if conv is None:
errstring.append( (newfmt).format(errors[level+levadd]) )
else:
if params[level+levadd] < conv: cvtxt = "CONVERGED"
else: cvtxt = "!!!!!!!!!"
errstring.append( ('--{0:s}-- ').format(cvtxt) )
levadd += 1
level += add
# Now write in the keywords
keys = list(mp['mkey'][istart].keys())
keys[:] = (kych for kych in keys if kych[:] != 'input') # Remove the keyword 'input'
for i in range(len(keys)):
if mp['mkey'][istart]['input'][keys[i]] == 0: # This keyword wasn't provided as user input
outstring.append( "" )
errstring.append( "" )
continue
if type(mp['mkey'][istart][keys[i]]) is list: outkw = ','.join(map(str, mp['mkey'][istart][keys[i]]))
else: outkw = mp['mkey'][istart][keys[i]]
if self._keyfm[keys[i]] != "":
outstring.append( ('{0:s}='+self._keyfm[keys[i]]).format(keys[i],outkw) )
errstring.append( ('{0:s}='+self._keyfm[keys[i]]).format(keys[i],outkw) )
else:
outstring.append( '%s=%s' % (keys[i],outkw) )
errstring.append( '%s=%s' % (keys[i],outkw) )
# outstring.append( '{0:s}={1:s}'.format(keys[i],outkw) )
# errstring.append( '{0:s}={1:s}'.format(keys[i],outkw) )
# Now place the keywords specified in self._prekw at the beginning of the return string:
if len(self._prekw) != 0:
insind = 1
for i in range(len(self._prekw)):
delind = -1
for j in range(len(keys)):
if self._prekw[i] == keys[j]:
delind = self._pnumr+insind+j
del keys[j]
break
if delind == -1: msgs.bug("prekw variable for function "+self._idstr+" contains bad argument", verbose=self._verbose)
outstring.insert(insind,outstring[delind])
errstring.insert(insind,errstring[delind])
del outstring[delind+1]
del errstring[delind+1]
insind += 1
if mp['mkey'][istart]['blind'] and conv is None:
retout = " ------ BLIND MODEL ------\n"
#reterr = " ------ BLIND MODEL ------\n"
reterr = ' '.join(errstring) + '\n'
else:
retout = ' '.join(outstring) + '\n'
reterr = ' '.join(errstring) + '\n'
# Return the strings and the new level
if errs is not None or conv is not None:
return retout, reterr, level
else:
return retout, level
def set_pinfo(self, pinfo, level, mp, lnk, mnum):
"""
Place limits on the functions parameters (as specified in init)
Nothing should be changed here.
"""
add = self._pnumr
levadd = 0
for i in range(self._pnumr):
# First Check if there are any operations/links to perform on this parameter
if len(lnk['opA']) != 0:
breakit=False
for j in range(len(mp['tpar'])):
if breakit: break
if mp['tpar'][j][1] == level+levadd: # then this is a tied parameter
# Check if this tied parameter is to be linked to another parameter
for k in range(len(lnk['opA'])):
if mp['tpar'][j][0] == lnk['opA'][k]:
ttext = lnk['exp'][k]
for l in lnk['opB'][k]:
for m in range(len(mp['tpar'])):
if mp['tpar'][m][0] == l:
pn = mp['tpar'][m][1]
break
ttext = ttext.replace(l,'p[{0:d}]'.format(pn))
pinfo[level+levadd]['tied'] = ttext
breakit = True
break
# Now set limits and fixed values
if mp['mtie'][mnum][i] >= 0: add -= 1
elif mp['mtie'][mnum][i] <= -2:
pinfo[level+levadd]['limited'] = [0 if o is None else 1 for o in mp['mlim'][mnum][i]]
pinfo[level+levadd]['limits'] = [0.0 if o is None else np.float64(o) for o in mp['mlim'][mnum][i]]
mp['mfix'][mnum][i] = -1
levadd += 1
else:
pinfo[level+levadd]['limited'] = [0 if o is None else 1 for o in mp['mlim'][mnum][i]]
pinfo[level+levadd]['limits'] = [0.0 if o is None else np.float64(o) for o in mp['mlim'][mnum][i]]
pinfo[level+levadd]['fixed'] = mp['mfix'][mnum][i]
levadd += 1
return pinfo, add
def set_vars(self, p, level, mp, ival, wvrng=[0.0,0.0], spid='None', levid=None, nexbin=None, ddpid=None, getinfl=False):
"""
Return the parameters for a Gaussian function to be used by 'call'
The only thing that should be changed here is the parb values
"""
levadd=0
params=np.zeros(self._pnumr)
parinf=[]
for i in range(self._pnumr):
lnkprm = None
parb = dict({'ap_1a':self._keywd['wave'], 'ap_2a':params[1]})
if mp['mtie'][ival][i] >= 0:
getid = mp['tpar'][mp['mtie'][ival][i]][1]
elif mp['mtie'][ival][i] <= -2:
if len(mp['mlnk']) == 0:
lnkprm = mp['mpar'][ival][i]
else:
for j in range(len(mp['mlnk'])):
if mp['mlnk'][j][0] == mp['mtie'][ival][i]:
cmd = 'lnkprm = ' + mp['mlnk'][j][1]
namespace = dict({'p': p})
exec(cmd, namespace)
lnkprm = namespace['lnkprm']
levadd += 1
else:
getid = level+levadd
levadd+=1
if lnkprm is None:
params[i] = self.parin(i, p[getid], parb)
if mp['mfix'][ival][i] == 0: parinf.append(getid)
else:
params[i] = lnkprm
if ddpid is not None:
if ddpid not in parinf: return []
if nexbin is not None:
if params[2] == 0.0: msgs.error("Cannot calculate "+self._idstr+" subpixellation -- width = 0.0")
if nexbin[0] == "km/s": return params, 1
elif nexbin[0] == "A" : return params, 1
else: msgs.bug("bintype "+nexbin[0]+" should not have been specified in model function: "+self._idstr, verbose=self._verbose)
elif getinfl: return params, parinf
else: return params
def gtoef(self, num, fmt):
"""
A definition to make the printout much nicer looking
"""
return fmt.replace("g}","f}")+" " if np.abs(np.log10(np.abs(num))) < 3. or num==0.0 else fmt.replace("g","E")
def tick_info(self, p, level, mp, ival, wvrng=[0.0,0.0], spid='None', levid=None):
"""
For a given model, determine the wavelengths that
tick marks should be plotted, and the label that
should be associated with this tick mark.
By default, return no tick marks and no labels.
"""
return [], []
#######################################################################################
#######################################################################################
#######################################################################################
# Import all of the functions used in alis
from alis import alfunc_brokenpowerlaw
from alis import alfunc_chebyshev
from alis import alfunc_constant
from alis import alfunc_gaussian
from alis import alfunc_legendre
from alis import alfunc_linear
from alis import alfunc_lineemission
from alis import alfunc_phionxs
from alis import alfunc_polynomial
from alis import alfunc_powerlaw
from alis import alfunc_random
from alis import alfunc_spline
from alis import alfunc_splineabs
from alis import alfunc_thar
from alis import alfunc_tophat
from alis import alfunc_variable
from alis import alfunc_voigt
# Convolution+shifting routines
from alis import alfunc_afwhm
from alis import alfunc_apod
from alis import alfunc_lsf
from alis import alfunc_vfwhm
from alis import alfunc_voigtconv
from alis import alfunc_vsigma
from alis import alshift
from alis import alfunc_user
"""
Initialises the functions that are used in this file.
If you want your function to be recognised, you must
include it's idstr value and the function call here.
"""
def call(prgname="",getfuncs=False,getinst=False,atomic=None,verbose=2):
sendatomic = ['voigt', 'lineemission', 'splineabs', 'phionxs']
# Add your new function to the following:
fd = dict({ 'Afwhm' : alfunc_afwhm.AFWHM,
'apod' : alfunc_apod.APOD,
'Ashift' : alshift.Ashift,
'brokenpowerlaw' : alfunc_brokenpowerlaw.BrokenPowerLaw,
'chebyshev' : alfunc_chebyshev.Chebyshev,
'constant' : alfunc_constant.Constant,
'gaussian' : alfunc_gaussian.Gaussian,
'gaussianconst' : alfunc_gaussian.GaussianConstant,
'gaussianconsthelium' : alfunc_gaussian.GaussianConstantHelium,
'legendre' : alfunc_legendre.Legendre,
'linear' : alfunc_linear.Linear,
'lineemission' : alfunc_lineemission.LineEmission,
'phionxs' : alfunc_phionxs.PhotIon_CrossSection,
'polynomial' : alfunc_polynomial.Polynomial,
'polyshift' : alshift.polyshift,
'powerlaw' : alfunc_powerlaw.PowerLaw,
'random' : alfunc_random.Random,
'spline' : alfunc_spline.Spline,
'splineabs' : alfunc_splineabs.SplineAbs,
'thar' : alfunc_thar.ThAr,
'tophat' : alfunc_tophat.TopHat,
'variable' : alfunc_variable.Variable,
'vfwhm' : alfunc_vfwhm.vFWHM,
'lsf' : alfunc_lsf.LSF,
'voigt' : alfunc_voigt.Voigt,
'voigtconv' : alfunc_voigtconv.VoigtConv,
'vshift' : alshift.vshift,
'vshiftscale' : alshift.vshiftscale,
'vsigma' : alfunc_vsigma.vSigma
})
# Load the user-specified functions
msgs.info("Loading user functions")
try:
usr_fd, usr_atm = alfunc_user.load_user_functions()
except Exception:
msgs.warn("There appears to be a problem loading the user functions")
et, ev, tb = sys.exc_info()
while tb:
co = tb.tb_frame.f_code
filename = str(co.co_filename)
line_no = str(traceback.tb_lineno(tb))
tb = tb.tb_next
filename=filename.split('/')[-1]
msgs.bug("A bug has been spotted on Line "+line_no+" of "+filename+" with error:"+msgs.newline()+str(ev))
sys.exit()
# Incorporate the user-defined functions
kvals = list(usr_fd.keys())
if len(kvals) == 0:
msgs.info("No user functions to load!")
fdk = list(fd.keys())
# Check there is no overlap in function names
for i in range(len(kvals)):
if kvals[i] in fdk:
msgs.error("There is already a built-in function called '{0:s}'".format(kvals[i])+msgs.newline()+"Please give your function a new name.")
else:
fd[kvals[i]] = usr_fd[kvals[i]]
msgs.info("Successfully loaded user function: {0:s}".format(kvals[i]))
for i in range(len(usr_atm)):
if usr_atm[i] in kvals:
sendatomic.append(usr_atm[i])
else:
msgs.error("with user-defined function {0:s}".format(usr_atm[i])+msgs.newline()+"Atomic data request for a function that does not exist!")
# Don't touch anything below
if getfuncs and getinst:
msgs.bug("Two keywords in alfunc_base.py unexpectedly set to 'True' ...", verbose=2)
sys.exit()
if getinst:
keys = list(fd.keys()) # Python 3
for i in range(len(keys)):
if keys[i] in sendatomic:
fd[keys[i]] = fd[keys[i]](prgname=prgname, getinst=getinst, verbose=verbose, atomic=atomic)
else:
fd[keys[i]] = fd[keys[i]](prgname=prgname, getinst=getinst, verbose=verbose)
if getfuncs:
return list(fd.keys())
else:
return fd
|
rcooke-ast/ALIS
|
alis/alfunc_base.py
|
Python
|
gpl-3.0
| 37,539
|
[
"Gaussian"
] |
8408b0c0c4a44b48f862ed3b8eec9ab1a52cc4ed398c859cef728d945cdd2af8
|
# -*- coding: utf-8 -*-
import pprint
from collections import defaultdict
from string import punctuation
# Get a word count for each word in a pair of wordlists that appear in a block of text.
# Exclude the appearance of a word from the count if any of the 3 words before or after the word
# in question are a member of the negator set (no, not, never).
def main():
wordlist1 = ['mickey', 'pluto', 'goofy', 'minnie', 'donald']
wordlist2 = ['bugs', 'daffy', 'elmer', 'foghorn', 'porky']
# Whether to ensure the words in the wordlists are lowercase depends on your use-case
wordlist1 = [element.lower() for element in wordlist1]
wordlist2 = [element.lower() for element in wordlist2]
mergedwordset = set(wordlist1 + wordlist2)
negatorset = set(['no', 'not', 'never'])
# Using collections.defaultdict here so that we can add a key with the value of 1
# if it doesn't already exist and increment the value of the key if it does exist.
countincludingneg = defaultdict(int)
countexcludingneg = defaultdict(int)
# Using a multi-line string here just to simplify this example.
# This will be parsed for the word count.
# Adapt it to your own uses.
# Text excerpts from wikipedia:
# http://en.wikipedia.org/wiki/Pluto_(Disney)
textblock = '''
Pluto, also called Pluto the Pup, is a cartoon character created in 1930 by Walt Disney Productions. He is a red-colored, medium-sized, short-haired dog with black ears. Unlike most Disney characters, Pluto is not anthropomorphic beyond some characteristics such as facial expression, though he did speak for a short portion of his history. He is Mickey Mouse's pet. Officially a mixed-breed dog, he made his debut as a bloodhound in the Mickey Mouse cartoon The Chain Gang. Together with Mickey Mouse, Minnie Mouse, Donald Duck, Daisy Duck, and Goofy, Pluto is one of the "Sensational Six"—the biggest stars in the Disney universe. Though all six are non-human animals, Pluto alone is not dressed as a human.
Pluto debuted in animated cartoons and appeared in 24 Mickey Mouse films before receiving his own series in 1937. All together Pluto appeared in 89 short films between 1930 and 1953. Several of these were nominated for an Academy Award, including The Pointer (1939), Squatter's Rights (1946), Pluto's Blue Note (1947), and Mickey and the Seal (1948). One of his films, Lend a Paw (1941), won the award in 1942. Because Pluto does not speak, his films generally rely on physical humor. This made Pluto a pioneering figure in character animation, which is expressing personality through animation rather than dialogue.
Like all of Pluto's co-stars, the dog has appeared extensively in comics over the years, first making an appearance in 1931. He returned to theatrical animation in 1990 with The Prince and the Pauper and has also appeared in several direct-to-video films. Pluto also appears in the television series Mickey Mouse Works (1999–2000), House of Mouse (2001–2003), and Mickey Mouse Clubhouse (2006–2013).
In 1998, Disney's copyright on Pluto, set to expire in several years, was extended by the passage of the Sonny Bono Copyright Term Extension Act. Disney, along with other studios, lobbied for passage of the act to preserve their copyrights on characters such as Pluto for 20 additional years.
Pluto first and most often appears in the Mickey Mouse series of cartoons. On rare occasions he is paired with Donald Duck ("Donald and Pluto", "Beach Picnic", "Window Cleaners", "The Eyes Have It", "Donald's Dog Laundry", & "Put Put Troubles").
The first cartoons to feature Pluto as a solo star were two Silly Symphonies, Just Dogs (1932) and Mother Pluto (1936). In 1937, Pluto appeared in Pluto's Quin-Puplets which was the first instalment of his own film series, then headlined Pluto the Pup. However, they were not produced on a regular basis until 1940, by which time the name of the series was shortened to Pluto.
His first comics appearance was in the Mickey Mouse daily strips in 1931 two months after the release of The Moose Hunt. Pluto Saves the Ship, a comic book published in 1942, was one of the first Disney comics prepared for publication outside newspaper strips. However, not counting a few cereal give-away mini-comics in 1947 and 1951, he did not have his own comics title until 1952.
In 1936 Pluto got an early title feature in a picture book under title "Mickey Mouse and Pluto the Pup" by Whitman Publishing.
Pluto runs his own neighborhood in Disney's Toontown Online. It's called the Brrrgh and it's always snowing there except during Halloween. During April Toons Week, a weekly event that is very silly, Pluto switches playgrounds with Minnie (all other characters do this as well). Pluto actually talks in Minnie's Melodyland.
Pluto has also appeared in the television series Mickey Mouse Works (1999–2000), Disney's House of Mouse (2001–2003) and Mickey Mouse Clubhouse (2006–present). Curiously enough, however, Pluto was the only standard Disney character not included when the whole gang was reunited for the 1983 featurette Mickey's Christmas Carol, although he did return in The Prince and the Pauper (1990) and Runaway Brain (1995). He also had a cameo in Who Framed Roger Rabbit (1988). In 1996, he made a cameo in the Quack Pack episode "The Really Mighty Ducks".
'''
# Removing leading and trailing whitespace.
# Removing new-lines so we can extend the look-aheads / look-behinds across lines.
# Removing punctuation.
# Setting all text to lowercase
# Adjust to your use-cases
textblock = textblock.strip().replace('\n', ' ').translate(None, punctuation).lower()
textblockwords = textblock.split()
# Construct a list of 6-gram (or less) word windows.
# The window will center on each individual word of the textblock
# and include the 3 words before and after its appearance.
windows = n_gram_word_windows(textblockwords, 3)
# Un-comment the following line if you'd like to see a representation of the n-gram word windows
#pprint.pprint(windows)
for windowdict in windows:
for key, ngramlist in windowdict.iteritems():
# Is the word a member of the wordlists?
if key in mergedwordset:
countincludingneg[key] += 1
# Do the words preceeding or following appear in the set of negators?
if len(negatorset.intersection(set(ngramlist))) == 0:
countexcludingneg[key] += 1
print "Count including negators"
pprint.pprint(countincludingneg)
print "Count excluding negators"
pprint.pprint(countexcludingneg)
# The idea here is to examine each word in the textblock and
# create a list containing the 3 words before the word, the word itself, and the 3 words following the word.
# This method will return a list of dictionaries.
# The dictionary will be comprised of the examined word as the key, and its n-gram word window as the value.
def n_gram_word_windows(textlist, lookaheadbehind=3):
wordwindows = []
for index, item in enumerate(textlist):
intermediatelist = []
if index < lookaheadbehind:
for preceedingword in textlist[:index]:
intermediatelist.append(preceedingword)
else:
for preceedingword in textlist[index-lookaheadbehind:index]:
intermediatelist.append(preceedingword)
if index < len(textlist):
for lookaheadword in textlist[index:index+lookaheadbehind+1]:
intermediatelist.append(lookaheadword)
wordwindows.append({item: intermediatelist})
return wordwindows
if __name__ == '__main__':
main()
|
jlyoung/stackoverflow_answers
|
negator_parser/negatorparser.py
|
Python
|
mit
| 7,417
|
[
"MOOSE"
] |
e1bafcc9d4de1cc79a684dfefb6af9286858ef5ea278da8240adfee934296bd0
|
# Portions copyright (c) 2014 ZMAN(ZhangNing)
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Name: WebZ轻量级自动化框架
# Description: keyword-driven automated testing framework
# Author: ZMAN(ZhangNing)
#
# This project also used some third-party modules:
#
# selenium: Licensed under the Apache License, Version 2.0 (the "License");
# Copyright 2008-2013 Software Freedom Conservancy.
#
# splinter: Licensed under the BSD License;
# Copyright 2012 splinter authors. All rights reserved.
#
# reportlab: Licensed under the BSD License;
# Copyright ReportLab Europe Ltd. 2000-2014.
#
# xlrd: Licensed under the BSD License;
# Portions copyright 2005-2009, Stephen John Machin, Lingfo Pty Ltd. All rights reserved.
#
import time
class process :
def __init__(self) :
return
def openpage(self, url) :
import splinter
import selenium
browser = splinter.Browser('chrome')
browser.driver.maximize_window()
browser.visit(url)
time.sleep(2)
return browser
def sendcase_click(self, browser, widget, method, name) :
if widget == 'id' and method == 'click' :
cmethod = 'browser.find_by_id("%s").click()' %name
flag2 = self.docase(browser,cmethod)
elif widget == 'css' and method == 'click' :
cmethod = 'browser.find_by_css("%s").click()' %name
flag2 = self.docase(browser,cmethod)
elif widget == 'xpath' and method == 'click' :
cmethod = "browser.find_by_xpath('%s').click()" %name
flag2 = self.docase(browser,cmethod)
return(flag2)
def sendcase_fill(self, browser, widget, method, name, attr) :
if widget == 'id' and method == 'fill' :
cmethod = 'browser.find_by_id("%s").fill("%s")' %(name, attr)
elif widget == 'css' and method == 'fill' :
cmethod = 'browser.find_by_css("%s").fill("%s")' %(name, attr)
elif widget == 'xpath' and method == 'fill' :
cmethod = "browser.find_by_xpath('%s').fill('%s')" %(name, attr)
flag2 = self.docase(browser,cmethod)
return(flag2)
def sendcase_action2(self, browser, method, attr) :
if method == 'alert' :
if attr == '是' :
cmethod = 'browser.get_alert().accept()'
elif attr == '否' :
cmethod = 'browser.get_alert().dismiss()'
elif method == 'switch' :
if attr == '最新' :
cmethod = 'browser.switch_to_window(browser.windows[-1])'
else :
attr = int(attr)-1
cmethod = 'browser.switch_to_window(browser.windows[%d])' %attr
flag2 = self.docase(browser,cmethod)
return(flag2)
def sendcase_action(self, browser, method) :
if method == 'back' :
cmethod = 'browser.back()'
elif method == 'refresh' :
cmethod = 'browser.reload()'
elif method == 'screenshot' :
cmethod = 'browser.screenshot(name="F:/PyTest/WebZ轻量级自动化框架", suffix=".jpg")'
elif method == 'wait' :
cmethod = 'time.sleep(2)'
flag2 = self.docase(browser,cmethod)
return(flag2)
def sendcase_click_text(self, browser, method, name) :
if method == 'click_text' :
cmethod = 'browser.click_link_by_text("%s")' %name
elif method == 'visit' :
if 'http://' not in name :
name = 'http://%s' %name
cmethod = 'browser.visit("%s")' %name
flag2 = self.docase(browser,cmethod)
return(flag2)
def mouse_move(self, browser, method, name) :
if method == 'css' :
cmethod = 'browser.find_by_css("%s").mouse_over()' %name
elif method == 'xpath' :
cmethod = 'browser.find_by_xpath("%s").mouse_over()' %name
flag2 = self.docase(browser,cmethod)
return(flag2)
# try three times
def sendcase_verify(self, browser, widget, attr) :
i = flag2 = 0
if widget == 'xpath' :
for i in range(2) :
try :
a = browser.find_by_xpath('%s' %attr)
if len(a) != 0 :
flag2 = 1
return(flag2)
elif len(a) == 0 :
time.sleep(0.5)
i += 1
if i == 2 :
return(flag2)
except Exception as e :
print(e)
elif widget == 'css' :
for i in range(2) :
try :
a = browser.find_by_css('%s' %attr)
if len(a) != 0 :
flag2 = 1
return(flag2)
elif len(a) == 0 :
time.sleep(0.5)
i += 1
if i == 2 :
return(flag2)
except Exception as e :
print(e)
elif widget == 'url' :
for i in range(2) :
try :
a = browser.url
a = a.rstrip('/')
if "http://" + attr == a :
flag2 = 1
return(flag2)
elif "http://" + attr != a :
time.sleep(0.5)
i += 1
if i == 2 :
return(flag2)
except Exception as e :
print(e)
elif widget == 'text' :
for i in range(2) :
try :
a = browser.is_text_present('%s' %attr)
if a == True :
flag2 = 1
return(flag2)
elif a == False :
time.sleep(0.5)
i += 1
if i == 2 :
return(flag2)
except Exception as e :
print(e)
def docase(self, browser, cmethod) :
i = flag2 = 0
for i in range(3) :
try :
exec(cmethod)
flag2 = 1
return(flag2)
except Exception as e:
if "Element is not currently interactable" in str(e) :
print('something wrong with webdriver')
return(flag2)
else :
print(e)
time.sleep(1.5)
i += 1
if i == 3 :
return(flag2)
if __name__ == '__main__':
input("You can not run me!")
|
lsp84ch83/PyText
|
WebZ轻量级自动化框架/process.py
|
Python
|
gpl-3.0
| 7,006
|
[
"VisIt"
] |
f77814d9e95c25af1318a8c1dbd4cee439a49ff47bd5f38e3d1446229acf6e2d
|
#!/usr/bin/env python
from __future__ import print_function
import itertools
import subprocess
import os.path
import sys
import pickle
import yaml
from parser import *
CACHEFILE="runall_cache.pycache"
readCache = True
writeCache = True
debug = False
OPERDEFSFILE="operdefs.tex"
writeOperDefs = False
RESULTSTABLEFILE="resultstable.tex"
PREDS="numpreds.tex"
outputCache = {}
if readCache:
if os.path.isfile(CACHEFILE):
with open(CACHEFILE, 'rb') as input:
outputCache = pickle.load(input)
if debug: print(yaml.dump(outputCache, default_flow_style=False))
APPENDIXFILE="raw.tex"
FULLRESULTS="fullresults.tex"
#datastructures=['Hashtable']
#datastructures=['Counter']
datastructures=['Counter','Accumulator','Set','HashTable','Stack','Memory']#,'Queue']
labels={'Counter':'counter',
'Counter (lifted, auto-generated)':'counterauto',
'Accumulator':'accumulator',
'Set':'set',
'HashTable':'hashtable',
'Memory':'memory',
'Stack':'stack'}
forceruns=[]
bestopt = '--poke'
#forceruns=['Queue']
# ,'Queue','UniqueID']
def getPrecondition(command, force):
if (command in outputCache) and (not force):
print("Cached: " + command)
return outputCache[command]
else:
print("Running: " + command)
p = subprocess.Popen(command.split(), stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
fullinput=""
out, err = p.communicate(fullinput)
if p.returncode != 0:
print("Out:\n" + out + "\nErr:\n" + err)
sys.exit(1)
ret = {}
ret["output"] = out.strip()
for line in err.strip().split('\n'):
print(line)
l = line.split()
print(l)
ret[l[0][:-1]] = l[1]
outputCache[command] = ret
return ret
import pyparsing
operSet = set()
words=["zero", "one", "two", "three", "four",
# "five", "six", "seven", "eight", "nine",
# "ten", "eleven", "twelve", "thirteen", "fourteen",
# "fifteen", "sixteen", "seventeen", "eightteen", "nineteen",
# "twenty", "twentyone", "twentytwo", "twentythree", "twentyfour",
"lots", "lots", "lots", "lots", "lots",
"lots", "lots", "lots", "lots", "lots",
"lots", "lots", "lots", "lots", "lots",
"lots", "lots", "lots", "lots", "lots",
"lots", "lots", "lots", "lots", "lots",
"lots", "lots", "lots", "lots", "lots",
"lots", "lots", "lots", "lots", "lots",
"lots", "lots", "lots", "lots", "lots"]
maxwords = 2
operNameMap={ "=":"equal", "+":"plus","-":"minus" }
def latexifySmt2(l, checktype, compress):
varCompress={"contents":"c"} if compress else {}
if isinstance(l, list):
assert(not isinstance(l[0], list))
oper = latexifySmt2(l[0], checktype, compress)
if oper in operNameMap:
oper = operNameMap[oper]
operSet.add(oper)
args = [latexifySmt2(x, checktype, compress) if isinstance(x, list) else
"\CCvar{"+(varCompress[x] if x in varCompress else x)+"}"
for x in l[1:]]
if compress:
if(len(args)>maxwords):
args[maxwords] = "\ldots"
args = args[:maxwords+1]
if oper == "and" or oper == "or": oper += words[len(args)]
return "\CC"+oper+"{"+"}{".join(args)+"}"
else:
if oper == "and":
return " $\\wedge$ ".join(args)
elif oper == "or":
return "["+"]\n\n $\\vee$ [".join(args)+"]"
else:
return "\CC"+oper+"{"+"}{".join(args)+"}"
else:
return l
def printRow(ds, method1, method2, sC, output, checktype, append=None):
precondition = sC[bestopt]["output"]
preconditionNoPoke = sC['--no-poke']["output"]
if debug: print("printRow:", precondition)
queriesPoke = sC['--poke']["smtqueries"]
smttimePoke = sC['--poke']["time"]
queriesNoPoke = sC['--no-poke']["smtqueries"]
smttimeNoPoke = sC['--no-poke']["time"]
if precondition == "false" or precondition == "true":
pc = precondition
pcNoPoke = preconditionNoPoke
pccompress = pc
else:
parsed = pyparsing.OneOrMore(pyparsing.nestedExpr()).parseString(precondition).asList()[0]
pc = latexifySmt2(parsed, checktype, compress=False)
pccompress = latexifySmt2(parsed, checktype, compress=True)
parsedNoPoke = pyparsing.OneOrMore(pyparsing.nestedExpr()).parseString(preconditionNoPoke).asList()[0]
pcNoPoke = latexifySmt2(parsedNoPoke, checktype, compress=False)
#pc = str(len(precondition))+" characters"
if checktype == 'leftmover':
method1, method2 = method2, method1
checktype = 'rightmover'
print("\\\\ ", file=output, end='')
print(" & ".join(["\CCmethod{"+method1+"}",
"\CC"+checktype,
"\CCmethod{"+method2+"}",
"\CCqueries{"+queriesNoPoke+"} "+"(\CCtime{"+smttimeNoPoke+"})",
"\CCqueries{"+queriesPoke+"} "+"(\CCtime{"+smttimePoke+"})",
pccompress,
]), file=output)
print("\item " + "\CCmethod{"+method1+"}" + " " + "\CC"+checktype +"\\ "+ "\CCmethod{"+method2+"}"
+ "\n", file=append)
print("\n Simple:\n\n"+pcNoPoke+"\n", file=append)
print("\n Poke:\n\n"+pc+"\n", file=append)
def areEquivalent(cond1, cond2):
a = cond2
a = a.replace("x1","z1").replace("y1","x1").replace("z1","y1")
a = a.replace("x2","z2").replace("y2","x2").replace("z2","y2")
a = a.replace("x3","z3").replace("y3","x3").replace("z3","y3")
print(a+'\n'+cond1+'\n'+cond2+'\n')
if cond1 == a or cond1 == cond2:
return True
a = a.replace("(= (+ contents y1) contents)", "(= contents (+ contents y1))")
if cond1 == a:
return True
a = a.replace("(= x1 y1)", "(= y1 x1)")
if cond1 == a:
return True
return False
first = True
predsout = open(PREDS, 'wb')
with open(RESULTSTABLEFILE, 'wb') as output, open(APPENDIXFILE, 'wb') as appendix:
# Data structures case-study.
for ds in datastructures:
if "auto" not in ds:
if first:
print(ds + ":", file=predsout)
#print("\\\\ \hline \hline \n \CCrow{"+ds+"}\\\\", file=output)
first = False
else:
print(", " + ds + ":", file=predsout)
print(" \\vspace{1mm} \n\\\\ \CCrow{"+ds+"}", file=output)
force = ds in forceruns
print("\subsection{"+ds+"}", file=appendix)
print("\\scriptsize", file=appendix)
print("\\label{yml:"+labels[ds]+"}", file=appendix)
print("\\input{listings/"+labels[ds]+".tex}", file=appendix)
# print("\\begin{lstlisting}", file=appendix)
ds=ds.lower()
FILE=ds+".yml"
(datanames, data, methods) = specToV1Format(fileToSpec(FILE))
# print(open(FILE).read(), file=appendix)
# print("\end{lstlisting}\n", file=appendix)
if "auto" in FILE: continue
print("\\begin{itemize}", file=appendix)
minPred, minPredF, maxPred, maxPredF = 10000, 10000, 0, 0
for method1,method2 in itertools.combinations_with_replacement(sorted(methods.keys()), 2):
m1_str = "_".join(methods[method1][0])
m2_str = "_".join(methods[method2][0])
if m1_str == "": m1_str = "empty"
if m2_str == "": m2_str = "empty"
pred_file = "../predicates_empty.txt"
# if not os.path.isfile(pred_file):
# pred_file = ds+"/predicates_"+ds+"_"+m2_str+"_"+m1_str+".txt"
# c = "./synth.py -q " + FILE + " " + method2 + " " + method1 + " " + pred_file
# method2, method1 = method1, method2
# m2_str, m1_str = m1_str, m2_str
synthCall = {}
for check in ['bowtie', 'leftmover', 'rightmover']:
synthCall[check] = {}
for poke in ['--poke', '--no-poke']:
if check == "leftmover":
c = "../src/synth.py -q " + FILE + " " + method2 + " " + method1 + " " + pred_file
cc = c + ' --check ' + "rightmover"
else:
c = "../src/synth.py -q " + FILE + " " + method1 + " " + method2 + " " + pred_file
cc = c + ' --check ' + check
cc += " " + poke
synthCall[check][poke] = getPrecondition(cc, force)
if areEquivalent(synthCall['leftmover'][bestopt]['output'],
synthCall['rightmover'][bestopt]['output']):
#assert(synthCall['leftmover']['output'] == synthCall['bowtie']['output'])
sC = synthCall['bowtie']
printRow(ds, method1, method2, sC, output, 'bowtie', append=appendix)
minPred = min(minPred, int(sC[bestopt]["predicates"]))
maxPred = max(maxPredF, int(sC[bestopt]["predicates"]))
minPredF = min(minPred, int(sC[bestopt]["predicatesFiltered"]))
maxPredF = max(maxPredF, int(sC[bestopt]["predicatesFiltered"]))
else:
#print("Yipeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee")
sC = synthCall['leftmover']
printRow(ds, method1, method2, sC, output, 'leftmover', append=appendix)
sC = synthCall['rightmover']
printRow(ds, method1, method2, sC, output, 'rightmover', append=appendix)
minPred = min(minPred, int(sC[bestopt]["predicates"]))
maxPred = max(maxPredF, int(sC[bestopt]["predicates"]))
minPredF = min(minPred, int(sC[bestopt]["predicatesFiltered"]))
maxPredF = max(maxPredF, int(sC[bestopt]["predicatesFiltered"]))
print(str(minPred)+"-"+str(maxPred)+" ("+str(minPredF)+"-"+str(maxPredF)+")%", file=predsout)
print("\end{itemize}\n", file=appendix)
# Ethereum case-study.
print(getPrecondition("../src/synth.py -v --no-generate-predicates blockking.yml enter enter blockking_predicates.txt", False)['output'])
print(getPrecondition("../src/synth.py -v --no-generate-predicates blockking_fixed.yml enter enter blockking_predicates.txt", False)['output'])
if writeCache:
with open(CACHEFILE, 'wb') as output:
pickle.dump(outputCache, output, pickle.HIGHEST_PROTOCOL)
if writeOperDefs:
with open(OPERDEFSFILE, 'wb') as output:
for o in operSet:
print("\\newcommand{\CC"+o+"}{"+o+" }", file=output)
|
kbansal/servois
|
test/runall.py
|
Python
|
bsd-3-clause
| 10,678
|
[
"Bowtie"
] |
8c8cfff86a8feecd0c5bc798dc342e9375e169b042e26c54a280d9268781aa91
|
import argparse
import torch
import pickle
import numpy as np
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
# Training settings
parser = argparse.ArgumentParser(description='PyTorch semi-supervised MNIST')
parser.add_argument('--batch-size', type=int, default=100, metavar='N',
help='input batch size for training (default: 100)')
parser.add_argument('--epochs', type=int, default=500, metavar='N',
help='number of epochs to train (default: 10)')
args = parser.parse_args()
cuda = torch.cuda.is_available()
seed = 10
kwargs = {'num_workers': 1, 'pin_memory': True} if cuda else {}
n_classes = 10
z_dim = 2
X_dim = 784
y_dim = 10
train_batch_size = args.batch_size
valid_batch_size = args.batch_size
N = 1000
epochs = args.epochs
##################################
# Load data and create Data loaders
##################################
def load_data(data_path='../data/'):
print('loading data!')
trainset_labeled = pickle.load(open(data_path + "train_labeled.p", "rb"))
trainset_unlabeled = pickle.load(open(data_path + "train_unlabeled.p", "rb"))
# Set -1 as labels for unlabeled data
trainset_unlabeled.train_labels = torch.from_numpy(np.array([-1] * 47000))
validset = pickle.load(open(data_path + "validation.p", "rb"))
train_labeled_loader = torch.utils.data.DataLoader(trainset_labeled,
batch_size=train_batch_size,
shuffle=True, **kwargs)
train_unlabeled_loader = torch.utils.data.DataLoader(trainset_unlabeled,
batch_size=train_batch_size,
shuffle=True, **kwargs)
valid_loader = torch.utils.data.DataLoader(validset, batch_size=valid_batch_size, shuffle=True)
return train_labeled_loader, train_unlabeled_loader, valid_loader
##################################
# Define Networks
##################################
# Encoder
class Q_net(nn.Module):
def __init__(self):
super(Q_net, self).__init__()
self.lin1 = nn.Linear(X_dim, N)
self.lin2 = nn.Linear(N, N)
# Gaussian code (z)
self.lin3gauss = nn.Linear(N, z_dim)
def forward(self, x):
x = F.dropout(self.lin1(x), p=0.2, training=self.training)
x = F.relu(x)
x = F.dropout(self.lin2(x), p=0.2, training=self.training)
x = F.relu(x)
xgauss = self.lin3gauss(x)
return xgauss
# Decoder
class P_net(nn.Module):
def __init__(self):
super(P_net, self).__init__()
self.lin1 = nn.Linear(z_dim, N)
self.lin2 = nn.Linear(N, N)
self.lin3 = nn.Linear(N, X_dim)
def forward(self, x):
x = self.lin1(x)
x = F.dropout(x, p=0.2, training=self.training)
x = F.relu(x)
x = self.lin2(x)
x = F.dropout(x, p=0.2, training=self.training)
x = self.lin3(x)
return F.sigmoid(x)
class D_net_gauss(nn.Module):
def __init__(self):
super(D_net_gauss, self).__init__()
self.lin1 = nn.Linear(z_dim, N)
self.lin2 = nn.Linear(N, N)
self.lin3 = nn.Linear(N, 1)
def forward(self, x):
x = F.dropout(self.lin1(x), p=0.2, training=self.training)
x = F.relu(x)
x = F.dropout(self.lin2(x), p=0.2, training=self.training)
x = F.relu(x)
return F.sigmoid(self.lin3(x))
####################
# Utility functions
####################
def save_model(model, filename):
print('Best model so far, saving it...')
torch.save(model.state_dict(), filename)
def report_loss(epoch, D_loss_gauss, G_loss, recon_loss):
'''
Print loss
'''
print('Epoch-{}; D_loss_gauss: {:.4}; G_loss: {:.4}; recon_loss: {:.4}'.format(epoch,
D_loss_gauss.data[0],
G_loss.data[0],
recon_loss.data[0]))
def create_latent(Q, loader):
'''
Creates the latent representation for the samples in loader
return:
z_values: numpy array with the latent representations
labels: the labels corresponding to the latent representations
'''
Q.eval()
labels = []
for batch_idx, (X, target) in enumerate(loader):
X = X * 0.3081 + 0.1307
# X.resize_(loader.batch_size, X_dim)
X, target = Variable(X), Variable(target)
labels.extend(target.data.tolist())
if cuda:
X, target = X.cuda(), target.cuda()
# Reconstruction phase
z_sample = Q(X)
if batch_idx > 0:
z_values = np.concatenate((z_values, np.array(z_sample.data.tolist())))
else:
z_values = np.array(z_sample.data.tolist())
labels = np.array(labels)
return z_values, labels
####################
# Train procedure
####################
def train(P, Q, D_gauss, P_decoder, Q_encoder, Q_generator, D_gauss_solver, data_loader):
'''
Train procedure for one epoch.
'''
TINY = 1e-15
# Set the networks in train mode (apply dropout when needed)
Q.train()
P.train()
D_gauss.train()
# Loop through the labeled and unlabeled dataset getting one batch of samples from each
# The batch size has to be a divisor of the size of the dataset or it will return
# invalid samples
for X, target in data_loader:
# Load batch and normalize samples to be between 0 and 1
X = X * 0.3081 + 0.1307
X.resize_(train_batch_size, X_dim)
X, target = Variable(X), Variable(target)
if cuda:
X, target = X.cuda(), target.cuda()
# Init gradients
P.zero_grad()
Q.zero_grad()
D_gauss.zero_grad()
#######################
# Reconstruction phase
#######################
z_sample = Q(X)
X_sample = P(z_sample)
recon_loss = F.binary_cross_entropy(X_sample + TINY, X.resize(train_batch_size, X_dim) + TINY)
recon_loss.backward()
P_decoder.step()
Q_encoder.step()
P.zero_grad()
Q.zero_grad()
D_gauss.zero_grad()
#######################
# Regularization phase
#######################
# Discriminator
Q.eval()
z_real_gauss = Variable(torch.randn(train_batch_size, z_dim) * 5.)
if cuda:
z_real_gauss = z_real_gauss.cuda()
z_fake_gauss = Q(X)
D_real_gauss = D_gauss(z_real_gauss)
D_fake_gauss = D_gauss(z_fake_gauss)
D_loss = -torch.mean(torch.log(D_real_gauss + TINY) + torch.log(1 - D_fake_gauss + TINY))
D_loss.backward()
D_gauss_solver.step()
P.zero_grad()
Q.zero_grad()
D_gauss.zero_grad()
# Generator
Q.train()
z_fake_gauss = Q(X)
D_fake_gauss = D_gauss(z_fake_gauss)
G_loss = -torch.mean(torch.log(D_fake_gauss + TINY))
G_loss.backward()
Q_generator.step()
P.zero_grad()
Q.zero_grad()
D_gauss.zero_grad()
return D_loss, G_loss, recon_loss
def generate_model(train_labeled_loader, train_unlabeled_loader, valid_loader):
torch.manual_seed(10)
if cuda:
Q = Q_net().cuda()
P = P_net().cuda()
D_gauss = D_net_gauss().cuda()
else:
Q = Q_net()
P = P_net()
D_gauss = D_net_gauss()
# Set learning rates
gen_lr = 0.0001
reg_lr = 0.00005
# Set optimizators
P_decoder = optim.Adam(P.parameters(), lr=gen_lr)
Q_encoder = optim.Adam(Q.parameters(), lr=gen_lr)
Q_generator = optim.Adam(Q.parameters(), lr=reg_lr)
D_gauss_solver = optim.Adam(D_gauss.parameters(), lr=reg_lr)
for epoch in range(epochs):
D_loss_gauss, G_loss, recon_loss = train(P, Q, D_gauss, P_decoder, Q_encoder,
Q_generator,
D_gauss_solver,
train_unlabeled_loader)
if epoch % 10 == 0:
report_loss(epoch, D_loss_gauss, G_loss, recon_loss)
return Q, P
if __name__ == '__main__':
train_labeled_loader, train_unlabeled_loader, valid_loader = load_data()
Q, P = generate_model(train_labeled_loader, train_unlabeled_loader, valid_loader)
|
fducau/AAE_pytorch
|
script/aae_pytorch_basic.py
|
Python
|
gpl-3.0
| 8,635
|
[
"Gaussian"
] |
4f9314c4e03e70e4d44df09dd6aa1f167de1f53157313621a68a3555c8e21fba
|
import paraview.simple
pxm = paraview.simple.servermanager.ProxyManager()
proxy = pxm.NewProxy('testcase', 'None')
print proxy
|
HopeFOAM/HopeFOAM
|
ThirdParty-0.1/ParaView-5.0.1/Catalyst/Editions/Enable-Python/Testing/Base-Python/import.py
|
Python
|
gpl-3.0
| 128
|
[
"ParaView"
] |
e971b82bd6189224ff2fc4c75c99c95e4a382c654c95edc4341b7ac747bf22f3
|
# -*- coding:utf-8 -*-
__author__ = 'q00222219@huawei'
import time
from heat.openstack.common import log as logging
import heat.engine.resources.cloudmanager.commonutils as commonutils
import heat.engine.resources.cloudmanager.constant as constant
import heat.engine.resources.cloudmanager.exception as exception
import pdb
LOG = logging.getLogger(__name__)
class CascadedConfiger(object):
def __init__(self, public_ip_api, api_ip, domain, user, password,
cascading_domain, cascading_api_ip, cascaded_domain,
cascaded_api_ip, cascaded_api_subnet_gateway):
self.public_ip_api = public_ip_api
self.api_ip = api_ip
self.domain = domain
self.user = user
self.password = password
self.cascading_domain = cascading_domain
self.cascading_api_ip = cascading_api_ip
self.cascaded_domain = cascaded_domain
self.cascaded_ip = cascaded_api_ip
self.gateway = cascaded_api_subnet_gateway
def do_config(self):
start_time = time.time()
#pdb.set_trace()
LOG.info("start config cascaded, cascaded: %s" % self.domain)
# wait cascaded tunnel can visit
commonutils.check_host_status(host=self.public_ip_api,
user=self.user,
password=self.password,
retry_time=500, interval=1)
# config cascaded host
self._config_az_cascaded()
cost_time = time.time() - start_time
LOG.info("first config success, cascaded: %s, cost time: %d"
% (self.domain, cost_time))
# check config result
for i in range(3):
try:
# check 90s
commonutils.check_host_status(
host=self.public_ip_api,
user=constant.VcloudConstant.ROOT,
password=constant.VcloudConstant.ROOT_PWD,
retry_time=15,
interval=1)
LOG.info("cascaded api is ready..")
break
except exception.CheckHostStatusFailure:
if i == 2:
LOG.error("check cascaded api failed ...")
break
LOG.error("check cascaded api error, "
"retry config cascaded ...")
self._config_az_cascaded()
cost_time = time.time() - start_time
LOG.info("config cascaded success, cascaded: %s, cost_time: %d"
% (self.domain, cost_time))
def _config_az_cascaded(self):
LOG.info("start config cascaded host, host: %s" % self.api_ip)
# modify dns server address
address = "/%(cascading_domain)s/%(cascading_ip)s,/%(cascaded_domain)s/%(cascaded_ip)s" \
% {"cascading_domain": self.cascading_domain,
"cascading_ip": self.cascading_api_ip,
"cascaded_domain":self.cascaded_domain,
"cascaded_ip":self.cascaded_ip}
for i in range(30):
try:
commonutils.execute_cmd_without_stdout(
host=self.public_ip_api,
user=self.user,
password=self.password,
cmd='cd %(dir)s; source /root/adminrc; sh %(script)s replace %(address)s'
% {"dir": constant.PublicConstant.SCRIPTS_DIR,
"script": constant.PublicConstant.
MODIFY_DNS_SERVER_ADDRESS,
"address": address})
break
except exception.SSHCommandFailure as e:
LOG.error("modify cascaded dns address error, cascaded: "
"%s, error: %s"
% (self.domain, e.format_message()))
time.sleep(1)
LOG.info(
"config cascaded dns address success, cascaded: %s"
% self.public_ip_api)
return True
|
hgqislub/hybird-orchard
|
code/cloudmanager/install/hws/hws_cascaded_configer.py
|
Python
|
apache-2.0
| 4,081
|
[
"VisIt"
] |
e7fa32df50fe543c3a06dc1966fa2a807f0e6d6ab854006839b62ad8a79b7a5b
|
"""
This file contains view functions for wrapping the django-wiki.
"""
import logging
import re
import cgi
from django.conf import settings
from django.contrib.sites.models import Site
from django.core.exceptions import ImproperlyConfigured
from django.shortcuts import redirect
from django.utils.translation import ugettext as _
from wiki.core.exceptions import NoRootURL
from wiki.models import URLPath, Article
from courseware.courses import get_course_by_id
from course_wiki.utils import course_wiki_slug
from opaque_keys.edx.locations import SlashSeparatedCourseKey
log = logging.getLogger(__name__)
def root_create(request): # pylint: disable=unused-argument
"""
In the edX wiki, we don't show the root_create view. Instead, we
just create the root automatically if it doesn't exist.
"""
root = get_or_create_root()
return redirect('wiki:get', path=root.path)
def course_wiki_redirect(request, course_id): # pylint: disable=unused-argument
"""
This redirects to whatever page on the wiki that the course designates
as it's home page. A course's wiki must be an article on the root (for
example, "/6.002x") to keep things simple.
"""
course = get_course_by_id(SlashSeparatedCourseKey.from_deprecated_string(course_id))
course_slug = course_wiki_slug(course)
valid_slug = True
if not course_slug:
log.exception("This course is improperly configured. The slug cannot be empty.")
valid_slug = False
if re.match(r'^[-\w\.]+$', course_slug) is None:
log.exception("This course is improperly configured. The slug can only contain letters, numbers, periods or hyphens.")
valid_slug = False
if not valid_slug:
return redirect("wiki:get", path="")
# The wiki needs a Site object created. We make sure it exists here
try:
Site.objects.get_current()
except Site.DoesNotExist:
new_site = Site()
new_site.domain = settings.SITE_NAME
new_site.name = "edX"
new_site.save()
site_id = str(new_site.id)
if site_id != str(settings.SITE_ID):
raise ImproperlyConfigured("No site object was created and the SITE_ID doesn't match the newly created one. {} != {}".format(site_id, settings.SITE_ID))
try:
urlpath = URLPath.get_by_path(course_slug, select_related=True)
results = list(Article.objects.filter(id=urlpath.article.id))
if results:
article = results[0]
else:
article = None
except (NoRootURL, URLPath.DoesNotExist):
# We will create it in the next block
urlpath = None
article = None
if not article:
# create it
root = get_or_create_root()
if urlpath:
# Somehow we got a urlpath without an article. Just delete it and
# recerate it.
urlpath.delete()
content = cgi.escape(
# Translators: this string includes wiki markup. Leave the ** and the _ alone.
_("This is the wiki for **{organization}**'s _{course_name}_.").format(
organization=course.display_org_with_default,
course_name=course.display_name_with_default_escaped,
)
)
urlpath = URLPath.create_article(
root,
course_slug,
title=course_slug,
content=content,
user_message=_("Course page automatically created."),
user=None,
ip_address=None,
article_kwargs={'owner': None,
'group': None,
'group_read': True,
'group_write': True,
'other_read': True,
'other_write': True,
})
return redirect("wiki:get", path=urlpath.path)
def get_or_create_root():
"""
Returns the root article, or creates it if it doesn't exist.
"""
try:
root = URLPath.root()
if not root.article:
root.delete()
raise NoRootURL
return root
except NoRootURL:
pass
starting_content = "\n".join((
_("Welcome to the {platform_name} Wiki").format(platform_name=settings.PLATFORM_NAME),
"===",
_("Visit a course wiki to add an article."),
))
root = URLPath.create_root(title=_("Wiki"), content=starting_content)
article = root.article
article.group = None
article.group_read = True
article.group_write = False
article.other_read = True
article.other_write = False
article.save()
return root
|
MakeHer/edx-platform
|
lms/djangoapps/course_wiki/views.py
|
Python
|
agpl-3.0
| 4,668
|
[
"VisIt"
] |
2c6f8be8485ad00a42c735ba3c04a3342920980a34d12297248492ec08ec75f8
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2002 Gary Shao
# Copyright (C) 2007 Brian G. Matherly
# Copyright (C) 2009 Benny Malengier
# Copyright (C) 2009 Gary Burton
# Copyright (C) 2014 Nick Hall
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# standard python modules
#
#-------------------------------------------------------------------------
import os
from xml.sax.saxutils import escape
def escxml(string):
"""
Escapes XML special characters.
"""
return escape(string, { '"' : '"' } )
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from .paragraphstyle import ParagraphStyle
from .fontstyle import FontStyle
from .tablestyle import TableStyle, TableCellStyle
from .graphicstyle import GraphicsStyle
from gramps.gen.const import GRAMPS_LOCALE as glocale
#-------------------------------------------------------------------------
#
# set up logging
#
#-------------------------------------------------------------------------
import logging
log = logging.getLogger(".stylesheet")
#-------------------------------------------------------------------------
#
# SAX interface
#
#-------------------------------------------------------------------------
try:
from xml.sax import make_parser, handler, SAXParseException
except ImportError:
from _xmlplus.sax import make_parser, handler, SAXParseException
#------------------------------------------------------------------------
#
# cnv2color
#
#------------------------------------------------------------------------
def cnv2color(text):
"""
converts a hex value in the form of #XXXXXX into a tuple of integers
representing the RGB values
"""
return (int(text[1:3], 16), int(text[3:5], 16), int(text[5:7], 16))
#------------------------------------------------------------------------
#
# StyleSheetList
#
#------------------------------------------------------------------------
class StyleSheetList(object):
"""
Interface into the user's defined style sheets. Each StyleSheetList
has a predefined default style specified by the report. Additional
styles are loaded from a specified XML file if it exists.
"""
def __init__(self, filename, defstyle):
"""
Create a new StyleSheetList from the specified default style and
any other styles that may be defined in the specified file.
file - XML file that contains style definitions
defstyle - default style
"""
defstyle.set_name('default')
self.map = { "default" : defstyle }
self.__file = filename
self.parse()
def delete_style_sheet(self, name):
"""
Remove a style from the list. Since each style must have a
unique name, the name is used to delete the stylesheet.
name - Name of the style to delete
"""
del self.map[name]
def get_style_sheet_map(self):
"""
Return the map of names to styles.
"""
return self.map
def get_style_sheet(self, name):
"""
Return the StyleSheet associated with the name
name - name associated with the desired StyleSheet.
"""
return self.map[name]
def get_style_names(self):
"Return a list of all the style names in the StyleSheetList"
return list(self.map.keys())
def set_style_sheet(self, name, style):
"""
Add or replaces a StyleSheet in the StyleSheetList. The
default style may not be replaced.
name - name associated with the StyleSheet to add or replace.
style - definition of the StyleSheet
"""
style.set_name(name)
if name != "default":
self.map[name] = style
def save(self):
"""
Saves the current StyleSheet definitions to the associated file.
"""
xml_file = open(self.__file, "w")
xml_file.write('<?xml version="1.0" encoding="utf-8"?>\n')
xml_file.write('<stylelist>\n')
for name in sorted(self.map.keys()): # enable diff of archived copies
if name == "default":
continue
sheet = self.map[name]
xml_file.write('<sheet name="%s">\n' % escxml(name))
for p_name in sheet.get_paragraph_style_names():
self.write_paragraph_style(xml_file, sheet, p_name)
for t_name in sheet.get_table_style_names():
self.write_table_style(xml_file, sheet, t_name)
for c_name in sheet.get_cell_style_names():
self.write_cell_style(xml_file, sheet, c_name)
xml_file.write('</sheet>\n')
xml_file.write('</stylelist>\n')
xml_file.close()
def write_paragraph_style(self, xml_file, sheet, p_name):
para = sheet.get_paragraph_style(p_name)
# Get variables for substitutions
font = para.get_font()
rmargin = float(para.get_right_margin())
lmargin = float(para.get_left_margin())
findent = float(para.get_first_indent())
tmargin = float(para.get_top_margin())
bmargin = float(para.get_bottom_margin())
padding = float(para.get_padding())
bg_color = para.get_background_color()
# Write out style definition
xml_file.write(
'<style name="%s">\n' % escxml(p_name) +
'<font face="%d" ' % font.get_type_face() +
'size="%d" ' % font.get_size() +
'italic="%d" ' % font.get_italic() +
'bold="%d" ' % font.get_bold() +
'underline="%d" ' % font.get_underline() +
'color="#%02x%02x%02x" ' % font.get_color() +
'/>\n' +
'<para ' +
'description="%s" ' % escxml(para.get_description()) +
'rmargin="%.3f" ' % rmargin +
'lmargin="%.3f" ' % lmargin +
'first="%.3f" ' % findent +
'tmargin="%.3f" ' % tmargin +
'bmargin="%.3f" ' % bmargin +
'pad="%.3f" ' % padding +
'bgcolor="#%02x%02x%02x" ' % bg_color +
'level="%d" ' % para.get_header_level() +
'align="%d" ' % para.get_alignment() +
'tborder="%d" ' % para.get_top_border() +
'lborder="%d" ' % para.get_left_border() +
'rborder="%d" ' % para.get_right_border() +
'bborder="%d" ' % para.get_bottom_border() +
'/>\n' +
'</style>\n'
)
def write_table_style(self, xml_file, sheet, t_name):
t_style = sheet.get_table_style(t_name)
# Write out style definition
xml_file.write(
'<style name="%s">\n' % escxml(t_name) +
'<table width="%d" ' % t_style.get_width() +
'columns="%d"' % t_style.get_columns() +
'>\n')
for col in range(t_style.get_columns()):
column_width = t_style.get_column_width(col)
xml_file.write('<column width="%d" />\n' % column_width)
xml_file.write('</table>\n')
xml_file.write('</style>\n')
def write_cell_style(self, xml_file, sheet, c_name):
cell = sheet.get_cell_style(c_name)
# Write out style definition
xml_file.write(
'<style name="%s">\n' % escxml(c_name) +
'<cell lborder="%d" ' % cell.get_left_border() +
'rborder="%d" ' % cell.get_right_border() +
'tborder="%d" ' % cell.get_top_border() +
'bborder="%d" ' % cell.get_bottom_border() +
'pad="%.3f" ' % cell.get_padding() +
'/>\n' +
'</style>\n'
)
def parse(self):
"""
Loads the StyleSheets from the associated file, if it exists.
"""
try:
if os.path.isfile(self.__file):
parser = make_parser()
parser.setContentHandler(SheetParser(self))
the_file = open(self.__file)
parser.parse(the_file)
the_file.close()
except (IOError, OSError, SAXParseException):
pass
#------------------------------------------------------------------------
#
# StyleSheet
#
#------------------------------------------------------------------------
class StyleSheet(object):
"""
A collection of named paragraph styles.
"""
def __init__(self, obj=None):
"""
Create a new empty StyleSheet.
:param obj: if not None, creates the StyleSheet from the values in
obj, instead of creating an empty StyleSheet
"""
self.para_styles = {}
self.draw_styles = {}
self.table_styles = {}
self.cell_styles = {}
self.name = ""
if obj is not None:
for style_name, style in obj.para_styles.items():
self.para_styles[style_name] = ParagraphStyle(style)
for style_name, style in obj.draw_styles.items():
self.draw_styles[style_name] = GraphicsStyle(style)
for style_name, style in obj.table_styles.items():
self.table_styles[style_name] = TableStyle(style)
for style_name, style in obj.cell_styles.items():
self.cell_styles[style_name] = TableCellStyle(style)
def set_name(self, name):
"""
Set the name of the StyleSheet
:param name: The name to be given to the StyleSheet
"""
self.name = name
def get_name(self):
"""
Return the name of the StyleSheet
"""
return self.name
def clear(self):
"Remove all styles from the StyleSheet"
self.para_styles = {}
self.draw_styles = {}
self.table_styles = {}
self.cell_styles = {}
def is_empty(self):
"Checks if any styles are defined"
style_count = len(self.para_styles) + \
len(self.draw_styles) + \
len(self.table_styles) + \
len(self.cell_styles)
if style_count > 0:
return False
else:
return True
def add_paragraph_style(self, name, style):
"""
Add a paragraph style to the style sheet.
:param name: The name of the :class:`.ParagraphStyle`
:param style: :class:`.ParagraphStyle` instance to be added.
"""
self.para_styles[name] = ParagraphStyle(style)
def get_paragraph_style(self, name):
"""
Return the :class:`.ParagraphStyle` associated with the name
:param name: name of the :class:`.ParagraphStyle` that is wanted
"""
return ParagraphStyle(self.para_styles[name])
def get_paragraph_style_names(self):
"Return the list of paragraph names in the StyleSheet"
return list(self.para_styles.keys())
def add_draw_style(self, name, style):
"""
Add a draw style to the style sheet.
:param name: The name of the :class:`.GraphicsStyle`
:param style: :class:`.GraphicsStyle` instance to be added.
"""
self.draw_styles[name] = GraphicsStyle(style)
def get_draw_style(self, name):
"""
Return the :class:`.GraphicsStyle` associated with the name
:param name: name of the :class:`.GraphicsStyle` that is wanted
"""
return GraphicsStyle(self.draw_styles[name])
def get_draw_style_names(self):
"Return the list of draw style names in the StyleSheet"
return list(self.draw_styles.keys())
def add_table_style(self, name, style):
"""
Add a table style to the style sheet.
:param name: The name of the :class:`.TableStyle`
:param style: :class:`.TableStyle` instance to be added.
"""
self.table_styles[name] = TableStyle(style)
def get_table_style(self, name):
"""
Return the :class:`.TableStyle` associated with the name
:param name: name of the :class:`.TableStyle` that is wanted
"""
return TableStyle(self.table_styles[name])
def get_table_style_names(self):
"Return the list of table style names in the StyleSheet"
return list(self.table_styles.keys())
def add_cell_style(self, name, style):
"""
Add a cell style to the style sheet.
:param name: The name of the :class:`.TableCellStyle`
:param style: :class:`.TableCellStyle` instance to be added.
"""
self.cell_styles[name] = TableCellStyle(style)
def get_cell_style(self, name):
"""
Return the :class:`.TableCellStyle` associated with the name
:param name: name of the :class:`.TableCellStyle` that is wanted
"""
return TableCellStyle(self.cell_styles[name])
def get_cell_style_names(self):
"Return the list of cell style names in the StyleSheet"
return list(self.cell_styles.keys())
#-------------------------------------------------------------------------
#
# SheetParser
#
#-------------------------------------------------------------------------
class SheetParser(handler.ContentHandler):
"""
SAX parsing class for the StyleSheetList XML file.
"""
def __init__(self, sheetlist):
"""
Create a SheetParser class that populates the passed StyleSheetList
class.
sheetlist - :class:`StyleSheetList` instance to be loaded from the file.
"""
handler.ContentHandler.__init__(self)
self.sheetlist = sheetlist
self.f = None
self.p = None
self.s = None
self.t = None
self.columns_widths = []
self.sheet_name = None
self.style_name = None
def startElement(self, tag, attrs):
"""
Overridden class that handles the start of a XML element
"""
if tag == "sheet":
self.s = StyleSheet(self.sheetlist.map["default"])
self.sheet_name = attrs['name']
elif tag == "font":
self.f = FontStyle()
self.f.set_type_face(int(attrs['face']))
self.f.set_size(int(attrs['size']))
self.f.set_italic(int(attrs['italic']))
self.f.set_bold(int(attrs['bold']))
self.f.set_underline(int(attrs['underline']))
self.f.set_color(cnv2color(attrs['color']))
elif tag == "para":
self.p = ParagraphStyle()
if 'description' in attrs:
self.p.set_description(attrs['description'])
self.p.set_right_margin(glocale.float(attrs['rmargin']))
self.p.set_left_margin(glocale.float(attrs['lmargin']))
self.p.set_first_indent(glocale.float(attrs['first']))
try:
# This is needed to read older style files
# lacking tmargin and bmargin
self.p.set_top_margin(glocale.float(attrs['tmargin']))
self.p.set_bottom_margin(glocale.float(attrs['bmargin']))
except KeyError:
pass
self.p.set_padding(glocale.float(attrs['pad']))
self.p.set_alignment(int(attrs['align']))
self.p.set_right_border(int(attrs['rborder']))
self.p.set_header_level(int(attrs['level']))
self.p.set_left_border(int(attrs['lborder']))
self.p.set_top_border(int(attrs['tborder']))
self.p.set_bottom_border(int(attrs['bborder']))
self.p.set_background_color(cnv2color(attrs['bgcolor']))
self.p.set_font(self.f)
elif tag == "style":
self.style_name = attrs['name']
elif tag == "table":
self.t = TableStyle()
self.t.set_width(int(attrs['width']))
self.t.set_columns(int(attrs['columns']))
self.column_widths = []
elif tag == "column":
self.column_widths.append(int(attrs['width']))
elif tag == "cell":
self.c = TableCellStyle()
self.c.set_left_border(int(attrs['lborder']))
self.c.set_right_border(int(attrs['rborder']))
self.c.set_top_border(int(attrs['tborder']))
self.c.set_bottom_border(int(attrs['bborder']))
self.c.set_padding(glocale.float(attrs['pad']))
def endElement(self, tag):
"Overridden class that handles the end of a XML element"
if tag == "sheet":
self.sheetlist.set_style_sheet(self.sheet_name, self.s)
elif tag == "para":
self.s.add_paragraph_style(self.style_name, self.p)
elif tag == "table":
self.t.set_column_widths(self.column_widths)
self.s.add_table_style(self.style_name, self.t)
elif tag == "cell":
self.s.add_cell_style(self.style_name, self.c)
|
pmghalvorsen/gramps_branch
|
gramps/gen/plug/docgen/stylesheet.py
|
Python
|
gpl-2.0
| 18,041
|
[
"Brian"
] |
63a13f68f876a7dc3bf2b12bb2497410bb74186f85a5e11f84cc3048119b79f6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.