text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
#!/usr/bin/env python
#=============================================================================================
# MODULE DOCSTRING
#=============================================================================================
"""
Test YAML functions.
"""
#=============================================================================================
# GLOBAL IMPORTS
#=============================================================================================
import time
import shutil
import textwrap
import unittest
import tempfile
import itertools
from nose.tools import assert_raises
from nose.plugins.attrib import attr
from mdtraj.formats.mol2 import mol2_to_dataframes
from yank.yamlbuild import *
# ==============================================================================
# Subroutines for testing
# ==============================================================================
standard_protocol = """
absolute-binding:
complex:
alchemical_path:
lambda_electrostatics: [1.0, 0.9, 0.8, 0.6, 0.4, 0.2, 0.0]
lambda_sterics: [1.0, 0.9, 0.8, 0.6, 0.4, 0.2, 0.0]
solvent:
alchemical_path:
lambda_electrostatics: [1.0, 0.8, 0.6, 0.3, 0.0]
lambda_sterics: [1.0, 0.8, 0.6, 0.3, 0.0]"""
def indent(str):
"""Put 4 extra spaces in front of every line."""
return '\n '.join(str.split('\n'))
def examples_paths():
"""Return the absolute path to the Yank examples relevant to tests."""
paths = {}
examples_dir = utils.get_data_filename(os.path.join('..', 'examples'))
p_xylene_dir = os.path.join(examples_dir, 'p-xylene-implicit', 'input')
p_xylene_gro_dir = os.path.join(examples_dir, 'p-xylene-gromacs-example', 'setup')
ben_tol_dir = os.path.join(examples_dir, 'benzene-toluene-explicit', 'setup')
abl_imatinib_dir = os.path.join(examples_dir, 'abl-imatinib-explicit', 'input')
paths['lysozyme'] = os.path.join(p_xylene_dir, '181L-pdbfixer.pdb')
paths['p-xylene'] = os.path.join(p_xylene_dir, 'p-xylene.mol2')
paths['benzene'] = os.path.join(ben_tol_dir, 'benzene.tripos.mol2')
paths['toluene'] = os.path.join(ben_tol_dir, 'toluene.tripos.mol2')
paths['abl'] = os.path.join(abl_imatinib_dir, '2HYY-pdbfixer.pdb')
paths['imatinib'] = os.path.join(abl_imatinib_dir, 'STI02.mol2')
paths['bentol-complex'] = [os.path.join(ben_tol_dir, 'complex.prmtop'),
os.path.join(ben_tol_dir, 'complex.inpcrd')]
paths['bentol-solvent'] = [os.path.join(ben_tol_dir, 'solvent.prmtop'),
os.path.join(ben_tol_dir, 'solvent.inpcrd')]
paths['pxylene-complex'] = [os.path.join(p_xylene_gro_dir, 'complex.top'),
os.path.join(p_xylene_gro_dir, 'complex.gro')]
paths['pxylene-solvent'] = [os.path.join(p_xylene_gro_dir, 'solvent.top'),
os.path.join(p_xylene_gro_dir, 'solvent.gro')]
paths['pxylene-gro-include'] = os.path.join(p_xylene_gro_dir, 'top')
return paths
def yank_load(script):
"""Shortcut to load a string YAML script with YankLoader."""
return yaml.load(textwrap.dedent(script), Loader=YankLoader)
def get_template_script(output_dir='.'):
"""Return a YAML template script as a dict."""
paths = examples_paths()
template_script = """
---
options:
output_dir: {output_dir}
number_of_iterations: 1
temperature: 300*kelvin
pressure: 1*atmosphere
softcore_beta: 0.0
molecules:
benzene:
filepath: {benzene_path}
antechamber: {{charge_method: bcc}}
benzene-epik0:
filepath: {benzene_path}
epik: 0
antechamber: {{charge_method: bcc}}
benzene-epikcustom:
filepath: {benzene_path}
epik:
select: 0
ph: 7.0
tautomerize: yes
antechamber: {{charge_method: bcc}}
p-xylene:
filepath: {pxylene_path}
antechamber: {{charge_method: bcc}}
p-xylene-name:
name: p-xylene
openeye: {{quacpac: am1-bcc}}
antechamber: {{charge_method: null}}
toluene:
filepath: {toluene_path}
antechamber: {{charge_method: bcc}}
toluene-smiles:
smiles: Cc1ccccc1
antechamber: {{charge_method: bcc}}
toluene-name:
name: toluene
antechamber: {{charge_method: bcc}}
Abl:
filepath: {abl_path}
T4Lysozyme:
filepath: {lysozyme_path}
solvents:
vacuum:
nonbonded_method: NoCutoff
GBSA-OBC2:
nonbonded_method: NoCutoff
implicit_solvent: OBC2
PME:
nonbonded_method: PME
nonbonded_cutoff: 1*nanometer
clearance: 10*angstroms
positive_ion: Na+
negative_ion: Cl-
systems:
explicit-system:
receptor: benzene
ligand: toluene
solvent: PME
leap:
parameters: [leaprc.ff14SB, leaprc.gaff, frcmod.ionsjc_tip3p]
implicit-system:
receptor: T4Lysozyme
ligand: p-xylene
solvent: GBSA-OBC2
leap:
parameters: [leaprc.ff14SB, leaprc.gaff]
protocols:
absolute-binding:
complex:
alchemical_path:
lambda_electrostatics: [1.0, 0.9, 0.8, 0.6, 0.4, 0.2, 0.0]
lambda_sterics: [1.0, 0.9, 0.8, 0.6, 0.4, 0.2, 0.0]
solvent:
alchemical_path:
lambda_electrostatics: [1.0, 0.8, 0.6, 0.3, 0.0]
lambda_sterics: [1.0, 0.8, 0.6, 0.3, 0.0]
experiments:
system: explicit-system
protocol: absolute-binding
""".format(output_dir=output_dir, benzene_path=paths['benzene'],
pxylene_path=paths['p-xylene'], toluene_path=paths['toluene'],
abl_path=paths['abl'], lysozyme_path=paths['lysozyme'])
return yank_load(template_script)
# ==============================================================================
# YamlBuild utility functions
# ==============================================================================
def test_compute_min_dist():
"""Test computation of minimum distance between two molecules"""
mol1_pos = np.array([[-1, -1, -1], [1, 1, 1]], np.float)
mol2_pos = np.array([[3, 3, 3], [3, 4, 5]], np.float)
mol3_pos = np.array([[2, 2, 2], [2, 4, 5]], np.float)
assert compute_min_dist(mol1_pos, mol2_pos, mol3_pos) == np.sqrt(3)
def test_compute_dist_bound():
"""Test compute_dist_bound() function."""
mol1_pos = np.array([[-1, -1, -1], [1, 1, 1]])
mol2_pos = np.array([[2, 2, 2], [2, 4, 5]]) # determine min dist
mol3_pos = np.array([[3, 3, 3], [3, 4, 5]]) # determine max dist
min_dist, max_dist = compute_dist_bound(mol1_pos, mol2_pos, mol3_pos)
assert min_dist == np.linalg.norm(mol1_pos[1] - mol2_pos[0])
assert max_dist == np.linalg.norm(mol1_pos[1] - mol3_pos[1])
def test_remove_overlap():
"""Test function remove_overlap()."""
mol1_pos = np.array([[-1, -1, -1], [1, 1, 1]], np.float)
mol2_pos = np.array([[1, 1, 1], [3, 4, 5]], np.float)
mol3_pos = np.array([[2, 2, 2], [2, 4, 5]], np.float)
assert compute_min_dist(mol1_pos, mol2_pos, mol3_pos) < 0.1
mol1_pos = remove_overlap(mol1_pos, mol2_pos, mol3_pos, min_distance=0.1, sigma=2.0)
assert compute_min_dist(mol1_pos, mol2_pos, mol3_pos) >= 0.1
def test_pull_close():
"""Test function pull_close()."""
mol1_pos = np.array([[-1, -1, -1], [1, 1, 1]], np.float)
mol2_pos = np.array([[-1, -1, -1], [1, 1, 1]], np.float)
mol3_pos = np.array([[10, 10, 10], [13, 14, 15]], np.float)
translation2 = pull_close(mol1_pos, mol2_pos, 1.5, 5)
translation3 = pull_close(mol1_pos, mol3_pos, 1.5, 5)
assert isinstance(translation2, np.ndarray)
assert 1.5 <= compute_min_dist(mol1_pos, mol2_pos + translation2) <= 5
assert 1.5 <= compute_min_dist(mol1_pos, mol3_pos + translation3) <= 5
def test_pack_transformation():
"""Test function pack_transformation()."""
BOX_SIZE = 5
CLASH_DIST = 1
mol1 = np.array([[-1, -1, -1], [1, 1, 1]], np.float)
mols = [np.copy(mol1), # distance = 0
mol1 + 2 * BOX_SIZE] # distance > box
mols_affine = [np.append(mol, np.ones((2, 1)), axis=1) for mol in mols]
transformations = [pack_transformation(mol1, mol2, CLASH_DIST, BOX_SIZE) for mol2 in mols]
for mol, transf in zip(mols_affine, transformations):
assert isinstance(transf, np.ndarray)
mol2 = mol.dot(transf.T)[:, :3] # transform and "de-affine"
min_dist, max_dist = compute_dist_bound(mol1, mol2)
assert CLASH_DIST <= min_dist and max_dist <= BOX_SIZE
# ==============================================================================
# YAML parsing and validation
# ==============================================================================
def test_yaml_parsing():
"""Check that YAML file is parsed correctly."""
# Parser handles no options
yaml_content = """
---
test: 2
"""
yaml_builder = YamlBuilder(textwrap.dedent(yaml_content))
assert len(yaml_builder.options) == len(yaml_builder.DEFAULT_OPTIONS)
assert len(yaml_builder.yank_options) == 0
# Correct parsing
yaml_content = """
---
metadata:
title: Test YANK YAML YAY!
options:
verbose: true
resume_setup: true
resume_simulation: true
output_dir: /path/to/output/
setup_dir: /path/to/output/setup/
experiments_dir: /path/to/output/experiments/
temperature: 300*kelvin
pressure: 1*atmosphere
constraints: AllBonds
hydrogen_mass: 2*amus
restraint_type: harmonic
randomize_ligand: yes
randomize_ligand_sigma_multiplier: 2.0
randomize_ligand_close_cutoff: 1.5 * angstrom
mc_displacement_sigma: 10.0 * angstroms
collision_rate: 5.0 / picosecond
constraint_tolerance: 1.0e-6
timestep: 2.0 * femtosecond
nsteps_per_iteration: 2500
number_of_iterations: 1000.999
equilibration_timestep: 1.0 * femtosecond
number_of_equilibration_iterations: 100
minimize: False
minimize_tolerance: 1.0 * kilojoules_per_mole / nanometers
minimize_max_iterations: 0
replica_mixing_scheme: swap-all
online_analysis: no
online_analysis_min_iterations: 20
show_energies: True
show_mixing_statistics: yes
annihilate_sterics: no
annihilate_electrostatics: true
"""
yaml_builder = YamlBuilder(textwrap.dedent(yaml_content))
assert len(yaml_builder.options) == 33
assert len(yaml_builder.yank_options) == 23
# Check correct types
assert yaml_builder.options['constraints'] == openmm.app.AllBonds
assert yaml_builder.yank_options['replica_mixing_scheme'] == 'swap-all'
assert yaml_builder.yank_options['timestep'] == 2.0 * unit.femtoseconds
assert yaml_builder.yank_options['constraint_tolerance'] == 1.0e-6
assert yaml_builder.yank_options['nsteps_per_iteration'] == 2500
assert type(yaml_builder.yank_options['nsteps_per_iteration']) is int
assert yaml_builder.yank_options['number_of_iterations'] == 1000
assert type(yaml_builder.yank_options['number_of_iterations']) is int
assert yaml_builder.yank_options['minimize'] is False
assert yaml_builder.yank_options['show_mixing_statistics'] is True
def test_validation_wrong_options():
"""YAML validation raises exception with wrong molecules."""
options = [
{'unknown_options': 3},
{'minimize': 100}
]
for option in options:
yield assert_raises, YamlParseError, YamlBuilder._validate_options, option
def test_validation_correct_molecules():
"""Correct molecules YAML validation."""
paths = examples_paths()
molecules = [
{'name': 'toluene', 'leap': {'parameters': 'leaprc.gaff'}},
{'name': 'toluene', 'leap': {'parameters': ['leaprc.gaff', 'toluene.frcmod']}},
{'name': 'p-xylene', 'antechamber': {'charge_method': 'bcc'}},
{'smiles': 'Cc1ccccc1', 'openeye': {'quacpac': 'am1-bcc'},
'antechamber': {'charge_method': None}},
{'name': 'p-xylene', 'antechamber': {'charge_method': 'bcc'}, 'epik': 0},
{'name': 'p-xylene', 'antechamber': {'charge_method': 'bcc'},
'epik': {'ph': 7.6, 'ph_tolerance': 0.7, 'tautomerize': False, 'select': 0}},
{'smiles': 'Cc1ccccc1', 'openeye': {'quacpac': 'am1-bcc'},
'antechamber': {'charge_method': None}, 'epik': 1},
{'filepath': paths['abl']},
{'filepath': paths['abl'], 'leap': {'parameters': 'leaprc.ff99SBildn'}},
{'filepath': paths['abl'], 'leap': {'parameters': 'leaprc.ff99SBildn'}, 'select': 1},
{'filepath': paths['abl'], 'select': 'all'},
{'filepath': paths['toluene'], 'leap': {'parameters': 'leaprc.gaff'}},
{'filepath': paths['benzene'], 'epik': 1}
]
for molecule in molecules:
yield YamlBuilder._validate_molecules, {'mol': molecule}
def test_validation_wrong_molecules():
"""YAML validation raises exception with wrong molecules."""
paths = examples_paths()
paths['wrongformat'] = utils.get_data_filename(os.path.join('..', 'examples', 'README.md'))
molecules = [
{'antechamber': {'charge_method': 'bcc'}},
{'filepath': paths['wrongformat']},
{'name': 'p-xylene', 'antechamber': {'charge_method': 'bcc'}, 'unknown': 4},
{'smiles': 'Cc1ccccc1', 'openeye': {'quacpac': 'am1-bcc'}},
{'smiles': 'Cc1ccccc1', 'openeye': {'quacpac': 'invalid'},
'antechamber': {'charge_method': None}},
{'smiles': 'Cc1ccccc1', 'openeye': {'quacpac': 'am1-bcc'},
'antechamber': {'charge_method': 'bcc'}},
{'filepath': 'nonexistentfile.pdb', 'leap': {'parameters': 'leaprc.ff14SB'}},
{'filepath': paths['toluene'], 'smiles': 'Cc1ccccc1'},
{'filepath': paths['toluene'], 'strip_protons': True},
{'filepath': paths['abl'], 'leap': {'parameters': 'leaprc.ff14SB'}, 'epik': 0},
{'name': 'toluene', 'epik': {'tautomerize': 6}},
{'name': 'toluene', 'epik': {'extract_range': 1}},
{'name': 'toluene', 'smiles': 'Cc1ccccc1'},
{'name': 3},
{'smiles': 'Cc1ccccc1', 'select': 1},
{'name': 'Cc1ccccc1', 'select': 1},
{'filepath': paths['abl'], 'leap': {'parameters': 'leaprc.ff14SB'}, 'select': 'notanoption'},
]
for molecule in molecules:
yield assert_raises, YamlParseError, YamlBuilder._validate_molecules, {'mol': molecule}
def test_validation_correct_solvents():
"""Correct solvents YAML validation."""
solvents = [
{'nonbonded_method': 'NoCutoff', 'nonbonded_cutoff': '3*nanometers'},
{'nonbonded_method': 'PME', 'clearance': '3*angstroms'},
{'nonbonded_method': 'NoCutoff', 'implicit_solvent': 'OBC2'},
{'nonbonded_method': 'CutoffPeriodic', 'nonbonded_cutoff': '9*angstroms',
'clearance': '9*angstroms', 'positive_ion': 'Na+', 'negative_ion': 'Cl-'},
{'implicit_solvent': 'OBC2', 'implicit_solvent_salt_conc': '1.0*(nano*mole)/liter'},
{'nonbonded_method': 'PME', 'clearance': '3*angstroms', 'ewald_error_tolerance': 0.001},
]
for solvent in solvents:
yield YamlBuilder._validate_solvents, {'solv': solvent}
def test_validation_wrong_solvents():
"""YAML validation raises exception with wrong solvents."""
solvents = [
{'nonbonded_cutoff: 3*nanometers'},
{'nonbonded_method': 'PME', 'clearance': '3*angstroms', 'implicit_solvent': 'OBC2'},
{'nonbonded_method': 'NoCutoff', 'blabla': '3*nanometers'},
{'nonbonded_method': 'NoCutoff', 'implicit_solvent': 'OBX2'},
{'implicit_solvent': 'OBC2', 'implicit_solvent_salt_conc': '1.0*angstrom'}
]
for solvent in solvents:
yield assert_raises, YamlParseError, YamlBuilder._validate_solvents, {'solv': solvent}
def test_validation_correct_systems():
"""Correct systems YAML validation."""
yaml_builder = YamlBuilder()
basic_script = """
---
molecules:
rec: {{filepath: {}, leap: {{parameters: leaprc.ff14SB}}}}
lig: {{name: lig, leap: {{parameters: leaprc.gaff}}}}
solvents:
solv: {{nonbonded_method: NoCutoff}}
""".format(examples_paths()['lysozyme'])
basic_script = yaml.load(textwrap.dedent(basic_script))
systems = [
{'receptor': 'rec', 'ligand': 'lig', 'solvent': 'solv'},
{'receptor': 'rec', 'ligand': 'lig', 'solvent': 'solv', 'pack': True},
{'receptor': 'rec', 'ligand': 'lig', 'solvent': 'solv',
'leap': {'parameters': ['leaprc.gaff', 'leaprc.ff14SB']}},
{'phase1_path': examples_paths()['bentol-complex'],
'phase2_path': examples_paths()['bentol-solvent'],
'ligand_dsl': 'resname BEN', 'solvent': 'solv'},
{'phase1_path': examples_paths()['pxylene-complex'],
'phase2_path': examples_paths()['pxylene-solvent'],
'ligand_dsl': 'resname p-xylene', 'solvent': 'solv',
'gromacs_include_dir': examples_paths()['pxylene-gro-include']},
{'phase1_path': examples_paths()['pxylene-complex'],
'phase2_path': examples_paths()['pxylene-solvent'],
'ligand_dsl': 'resname p-xylene', 'solvent': 'solv'},
{'solute': 'lig', 'solvent1': 'solv', 'solvent2': 'solv'},
{'solute': 'lig', 'solvent1': 'solv', 'solvent2': 'solv',
'leap': {'parameters': 'leaprc.gaff'}}
]
for system in systems:
modified_script = basic_script.copy()
modified_script['systems'] = {'sys': system}
yield yaml_builder.parse, modified_script
def test_validation_wrong_systems():
"""YAML validation raises exception with wrong experiments specification."""
yaml_builder = YamlBuilder()
basic_script = """
---
molecules:
rec: {{filepath: {}, leap: {{parameters: leaprc.ff14SB}}}}
lig: {{name: lig, leap: {{parameters: leaprc.gaff}}}}
solvents:
solv: {{nonbonded_method: NoCutoff}}
""".format(examples_paths()['lysozyme'])
basic_script = yaml.load(textwrap.dedent(basic_script))
systems = [
{'receptor': 'rec', 'ligand': 'lig'},
{'receptor': 'rec', 'ligand': 1, 'solvent': 'solv'},
{'receptor': 'rec', 'ligand': 'lig', 'solvent': ['solv', 'solv']},
{'receptor': 'rec', 'ligand': 'lig', 'solvent': 'unknown'},
{'receptor': 'rec', 'ligand': 'lig', 'solvent': 'solv',
'parameters': 'leaprc.ff14SB'},
{'phase1_path': examples_paths()['bentol-complex'][0],
'phase2_path': examples_paths()['bentol-solvent'],
'ligand_dsl': 'resname BEN', 'solvent': 'solv'},
{'phase1_path': ['nonexistingpath.prmtop', 'nonexistingpath.inpcrd'],
'phase2_path': examples_paths()['bentol-solvent'],
'ligand_dsl': 'resname BEN', 'solvent': 'solv'},
{'phase1_path': examples_paths()['bentol-complex'],
'phase2_path': examples_paths()['bentol-solvent'],
'ligand_dsl': 3.4, 'solvent': 'solv'},
{'phase1_path': examples_paths()['bentol-complex'],
'phase2_path': examples_paths()['bentol-solvent'],
'ligand_dsl': 'resname BEN', 'solvent': 'unknown'},
{'phase1_path': examples_paths()['bentol-complex'],
'phase2_path': examples_paths()['pxylene-solvent'],
'ligand_dsl': 'resname p-xylene', 'solvent': 'solv',
'gromacs_include_dir': examples_paths()['pxylene-gro-include']},
{'receptor': 'rec', 'solute': 'lig', 'solvent1': 'solv', 'solvent2': 'solv'},
{'ligand': 'lig', 'solute': 'lig', 'solvent1': 'solv', 'solvent2': 'solv'},
{'solute': 'lig', 'solvent1': 'solv', 'solvent2': 'solv', 'leap': 'leaprc.gaff'}
]
for system in systems:
modified_script = basic_script.copy()
modified_script['systems'] = {'sys': system}
yield assert_raises, YamlParseError, yaml_builder.parse, modified_script
def test_order_phases():
"""YankLoader preserves protocol phase order."""
ordered_phases = ['complex', 'solvent', 'athirdphase']
yaml_content = """
---
absolute-binding:
{}:
alchemical_path:
lambda_electrostatics: [1.0, 0.9, 0.8, 0.6, 0.4, 0.2, 0.0]
lambda_sterics: [1.0, 0.9, 0.8, 0.6, 0.4, 0.2, 0.0]
{}:
alchemical_path:
lambda_electrostatics: [1.0, 0.8, 0.6, 0.3, 0.0]
lambda_sterics: [1.0, 0.8, 0.6, 0.3, 0.0]
{}:
alchemical_path:
lambda_electrostatics: [1.0, 0.8, 0.6, 0.3, 0.0]
lambda_sterics: [1.0, 0.8, 0.6, 0.3, 0.0]""".format(*ordered_phases)
# Be sure that normal parsing is not ordered or the test is useless
parsed = yaml.load(textwrap.dedent(yaml_content))
assert parsed['absolute-binding'].keys() != ordered_phases
# Insert !Ordered tag
yaml_content = yaml_content.replace('binding:', 'binding: !Ordered')
parsed = yank_load(yaml_content)
assert parsed['absolute-binding'].keys() == ordered_phases
def test_validation_correct_protocols():
"""Correct protocols YAML validation."""
basic_protocol = yank_load(standard_protocol)
# Alchemical paths
protocols = [
{'lambda_electrostatics': [1.0, 0.8, 0.6, 0.3, 0.0], 'lambda_sterics': [1.0, 0.8, 0.6, 0.3, 0.0]},
{'lambda_electrostatics': [1.0, 0.8, 0.6, 0.3, 0.0], 'lambda_sterics': [1.0, 0.8, 0.6, 0.3, 0.0],
'lambda_torsions': [1.0, 0.8, 0.6, 0.3, 0.0], 'lambda_angles': [1.0, 0.8, 0.6, 0.3, 0.0]}
]
for protocol in protocols:
modified_protocol = copy.deepcopy(basic_protocol)
modified_protocol['absolute-binding']['complex']['alchemical_path'] = protocol
yield YamlBuilder._validate_protocols, modified_protocol
# Phases
alchemical_path = copy.deepcopy(basic_protocol['absolute-binding']['complex'])
protocols = [
{'complex': alchemical_path, 'solvent': alchemical_path},
{'my-complex': alchemical_path, 'my-solvent': alchemical_path},
{'solvent1': alchemical_path, 'solvent2': alchemical_path},
{'solvent1variant': alchemical_path, 'solvent2variant': alchemical_path},
collections.OrderedDict([('a', alchemical_path), ('z', alchemical_path)]),
collections.OrderedDict([('z', alchemical_path), ('a', alchemical_path)])
]
for protocol in protocols:
modified_protocol = copy.deepcopy(basic_protocol)
modified_protocol['absolute-binding'] = protocol
yield YamlBuilder._validate_protocols, modified_protocol
sorted_protocol = YamlBuilder._validate_protocols(modified_protocol)['absolute-binding']
if isinstance(protocol, collections.OrderedDict):
assert sorted_protocol.keys() == protocol.keys()
else:
assert isinstance(sorted_protocol, collections.OrderedDict)
first_phase = sorted_protocol.keys()[0]
assert 'complex' in first_phase or 'solvent1' in first_phase
def test_validation_wrong_protocols():
"""YAML validation raises exception with wrong alchemical protocols."""
basic_protocol = yank_load(standard_protocol)
# Alchemical paths
protocols = [
{'lambda_electrostatics': [1.0, 0.8, 0.6, 0.3, 0.0]},
{'lambda_electrostatics': [1.0, 0.8, 0.6, 0.3, 0.0], 'lambda_sterics': [1.0, 0.8, 0.6, 0.3, 'wrong!']},
{'lambda_electrostatics': [1.0, 0.8, 0.6, 0.3, 0.0], 'lambda_sterics': [1.0, 0.8, 0.6, 0.3, 11000.0]},
{'lambda_electrostatics': [1.0, 0.8, 0.6, 0.3, 0.0], 'lambda_sterics': [1.0, 0.8, 0.6, 0.3, -0.5]},
{'lambda_electrostatics': [1.0, 0.8, 0.6, 0.3, 0.0], 'lambda_sterics': 0.0},
{'lambda_electrostatics': [1.0, 0.8, 0.6, 0.3, 0.0], 'lambda_sterics': [1.0, 0.8, 0.6, 0.3, 0.0], 3: 2}
]
for protocol in protocols:
modified_protocol = copy.deepcopy(basic_protocol)
modified_protocol['absolute-binding']['complex']['alchemical_path'] = protocol
yield assert_raises, YamlParseError, YamlBuilder._validate_protocols, modified_protocol
# Phases
alchemical_path = copy.deepcopy(basic_protocol['absolute-binding']['complex'])
protocols = [
{'complex': alchemical_path},
{2: alchemical_path, 'solvent': alchemical_path},
{'complex': alchemical_path, 'solvent': alchemical_path, 'thirdphase': alchemical_path},
{'my-complex-solvent': alchemical_path, 'my-solvent': alchemical_path},
{'my-complex': alchemical_path, 'my-complex-solvent': alchemical_path},
{'my-complex': alchemical_path, 'my-complex': alchemical_path},
{'complex': alchemical_path, 'solvent1': alchemical_path, 'solvent2': alchemical_path},
{'my-phase1': alchemical_path, 'my-phase2': alchemical_path},
collections.OrderedDict([('my-phase1', alchemical_path), ('my-phase2', alchemical_path),
('my-phase3', alchemical_path)])
]
for protocol in protocols:
modified_protocol = copy.deepcopy(basic_protocol)
modified_protocol['absolute-binding'] = protocol
yield assert_raises, YamlParseError, YamlBuilder._validate_protocols, modified_protocol
def test_validation_correct_experiments():
"""YAML validation raises exception with wrong experiments specification."""
yaml_builder = YamlBuilder()
basic_script = """
---
molecules:
rec: {{filepath: {}, leap: {{parameters: leaprc.ff14SB}}}}
lig: {{name: lig, leap: {{parameters: leaprc.gaff}}}}
solvents:
solv: {{nonbonded_method: NoCutoff}}
systems:
sys: {{receptor: rec, ligand: lig, solvent: solv}}
protocols:{}
""".format(examples_paths()['lysozyme'], standard_protocol)
basic_script = yank_load(basic_script)
experiments = [
{'system': 'sys', 'protocol': 'absolute-binding'}
]
for experiment in experiments:
modified_script = basic_script.copy()
modified_script['experiments'] = experiment
yield yaml_builder.parse, modified_script
def test_validation_wrong_experiments():
"""YAML validation raises exception with wrong experiments specification."""
yaml_builder = YamlBuilder()
basic_script = """
---
molecules:
rec: {{filepath: {}, leap: {{parameters: leaprc.ff14SB}}}}
lig: {{name: lig, leap: {{parameters: leaprc.gaff}}}}
solvents:
solv: {{nonbonded_method: NoCutoff}}
systems:
sys: {{receptor: rec, ligand: lig, solvent: solv}}
protocols:{}
""".format(examples_paths()['lysozyme'], standard_protocol)
basic_script = yank_load(basic_script)
experiments = [
{'system': 'unknownsys', 'protocol': 'absolute-binding'},
{'system': 'sys', 'protocol': 'unknownprotocol'},
{'system': 'sys'},
{'protocol': 'absolute-binding'}
]
for experiment in experiments:
modified_script = basic_script.copy()
modified_script['experiments'] = experiment
yield assert_raises, YamlParseError, yaml_builder.parse, modified_script
# ==============================================================================
# Molecules pipeline
# ==============================================================================
def test_yaml_mol2_antechamber():
"""Test antechamber setup of molecule files."""
with omt.utils.temporary_directory() as tmp_dir:
yaml_content = get_template_script(tmp_dir)
yaml_builder = YamlBuilder(yaml_content)
yaml_builder._db._setup_molecules('benzene')
output_dir = yaml_builder._db.get_molecule_dir('benzene')
gaff_path = os.path.join(output_dir, 'benzene.gaff.mol2')
frcmod_path = os.path.join(output_dir, 'benzene.frcmod')
# Get last modified time
last_touched_gaff = os.stat(gaff_path).st_mtime
last_touched_frcmod = os.stat(frcmod_path).st_mtime
# Check that output files have been created
assert os.path.exists(gaff_path)
assert os.path.exists(frcmod_path)
assert os.path.getsize(gaff_path) > 0
assert os.path.getsize(frcmod_path) > 0
# Check that setup_molecules do not recreate molecule files
time.sleep(0.5) # st_mtime doesn't have much precision
yaml_builder._db._setup_molecules('benzene')
assert last_touched_gaff == os.stat(gaff_path).st_mtime
assert last_touched_frcmod == os.stat(frcmod_path).st_mtime
@unittest.skipIf(not utils.is_openeye_installed(), 'This test requires OpenEye installed.')
def test_setup_name_smiles_openeye_charges():
"""Setup molecule from name and SMILES with openeye charges and gaff."""
with omt.utils.temporary_directory() as tmp_dir:
molecules_ids = ['toluene-smiles', 'p-xylene-name']
yaml_content = get_template_script(tmp_dir)
yaml_builder = YamlBuilder(yaml_content)
yaml_builder._db._setup_molecules(*molecules_ids)
for mol in molecules_ids:
output_dir = yaml_builder._db.get_molecule_dir(mol)
output_basepath = os.path.join(output_dir, mol)
# Check that all the files have been created
assert os.path.exists(output_basepath + '.mol2')
assert os.path.exists(output_basepath + '.gaff.mol2')
assert os.path.exists(output_basepath + '.frcmod')
assert os.path.getsize(output_basepath + '.mol2') > 0
assert os.path.getsize(output_basepath + '.gaff.mol2') > 0
assert os.path.getsize(output_basepath + '.frcmod') > 0
atoms_frame, _ = mol2_to_dataframes(output_basepath + '.mol2')
input_charges = atoms_frame['charge']
atoms_frame, _ = mol2_to_dataframes(output_basepath + '.gaff.mol2')
output_charges = atoms_frame['charge']
# With openeye:am1bcc charges, the final charges should be unaltered
if mol == 'p-xylene-name':
assert input_charges.equals(output_charges)
else: # With antechamber, sqm should alter the charges a little
assert not input_charges.equals(output_charges)
# Check that molecules are resumed correctly
yaml_builder = YamlBuilder(yaml_content)
yaml_builder._db._setup_molecules(*molecules_ids)
@unittest.skipIf(not utils.is_openeye_installed(), 'This test requires OpenEye installed.')
def test_clashing_atoms():
"""Check that clashing atoms are resolved."""
benzene_path = examples_paths()['benzene']
toluene_path = examples_paths()['toluene']
with omt.utils.temporary_directory() as tmp_dir:
yaml_content = get_template_script(tmp_dir)
system_id = 'explicit-system'
system_description = yaml_content['systems'][system_id]
system_description['pack'] = True
system_description['solvent'] = utils.CombinatorialLeaf(['vacuum', 'PME'])
# Sanity check: at the beginning molecules clash
toluene_pos = utils.get_oe_mol_positions(utils.read_oe_molecule(toluene_path))
benzene_pos = utils.get_oe_mol_positions(utils.read_oe_molecule(benzene_path))
assert compute_min_dist(toluene_pos, benzene_pos) < SetupDatabase.CLASH_THRESHOLD
yaml_builder = YamlBuilder(yaml_content)
for system_id in [system_id + '_vacuum', system_id + '_PME']:
system_dir = os.path.dirname(
yaml_builder._db.get_system(system_id)[0].position_path)
# Get positions of molecules in the final system
prmtop = openmm.app.AmberPrmtopFile(os.path.join(system_dir, 'complex.prmtop'))
inpcrd = openmm.app.AmberInpcrdFile(os.path.join(system_dir, 'complex.inpcrd'))
positions = inpcrd.getPositions(asNumpy=True).value_in_unit(unit.angstrom)
atom_indices = pipeline.find_components(prmtop.createSystem(), prmtop.topology, 'resname TOL')
benzene_pos2 = positions.take(atom_indices['receptor'], axis=0)
toluene_pos2 = positions.take(atom_indices['ligand'], axis=0)
# Test that clashes are resolved in the system
min_dist, max_dist = compute_dist_bound(toluene_pos2, benzene_pos2)
assert min_dist >= SetupDatabase.CLASH_THRESHOLD
# For solvent we check that molecule is within the box
if system_id == system_id + '_PME':
assert max_dist <= yaml_content['solvents']['PME']['clearance']
@unittest.skipIf(not omt.schrodinger.is_schrodinger_suite_installed(),
"This test requires Schrodinger's suite")
def test_epik_enumeration():
"""Test epik protonation state enumeration."""
with omt.utils.temporary_directory() as tmp_dir:
yaml_content = get_template_script(tmp_dir)
yaml_builder = YamlBuilder(yaml_content)
mol_ids = ['benzene-epik0', 'benzene-epikcustom']
yaml_builder._db._setup_molecules(*mol_ids)
for mol_id in mol_ids:
output_dir = yaml_builder._db.get_molecule_dir(mol_id)
output_basename = os.path.join(output_dir, mol_id + '-epik.')
assert os.path.exists(output_basename + 'mol2')
assert os.path.getsize(output_basename + 'mol2') > 0
assert os.path.exists(output_basename + 'sdf')
assert os.path.getsize(output_basename + 'sdf') > 0
def test_strip_protons():
"""Test that protons are stripped correctly for tleap."""
mol_id = 'Abl'
abl_path = examples_paths()['abl']
with omt.utils.temporary_directory() as tmp_dir:
# Safety check: protein must have protons
has_hydrogen = False
with open(abl_path, 'r') as f:
for line in f:
if line[:6] == 'ATOM ' and (line[12] == 'H' or line[13] == 'H'):
has_hydrogen = True
break
assert has_hydrogen
yaml_content = get_template_script(tmp_dir)
yaml_builder = YamlBuilder(yaml_content)
output_dir = yaml_builder._db.get_molecule_dir(mol_id)
output_path = os.path.join(output_dir, 'Abl.pdb')
# We haven't set the strip_protons options, so this shouldn't do anything
yaml_builder._db._setup_molecules(mol_id)
assert not os.path.exists(output_path)
# Now we set the strip_protons options and repeat
yaml_builder._db.molecules[mol_id]['strip_protons'] = True
yaml_builder._db._setup_molecules(mol_id)
assert os.path.exists(output_path)
assert os.path.getsize(output_path) > 0
# The new pdb does not have hydrogen atoms
has_hydrogen = False
with open(output_path, 'r') as f:
for line in f:
if line[:6] == 'ATOM ' and (line[12] == 'H' or line[13] == 'H'):
has_hydrogen = True
break
assert not has_hydrogen
# ==============================================================================
# Combinatorial expansion
# ==============================================================================
class TestMultiMoleculeFiles():
@classmethod
def setup_class(cls):
"""Create a 2-frame PDB file in pdb_path. The second frame has same positions
of the first one but with inversed z-coordinate."""
# Creating a temporary directory and generating paths for output files
cls.tmp_dir = tempfile.mkdtemp()
cls.pdb_path = os.path.join(cls.tmp_dir, 'multipdb.pdb')
cls.smiles_path = os.path.join(cls.tmp_dir, 'multismiles.smiles')
cls.sdf_path = os.path.join(cls.tmp_dir, 'multisdf.sdf')
cls.mol2_path = os.path.join(cls.tmp_dir, 'multimol2.mol2')
# Rotation matrix to invert z-coordinate, i.e. flip molecule w.r.t. x-y plane
rot = np.array([[1, 0, 0], [0, 1, 0], [0, 0, -1]])
# Create 2-frame PDB file. First frame is lysozyme, second is lysozyme with inverted z
lysozyme_path = examples_paths()['lysozyme']
lysozyme = PDBFile(lysozyme_path)
# Rotate positions to invert z for the second frame
symmetric_pos = lysozyme.getPositions(asNumpy=True).value_in_unit(unit.angstrom)
symmetric_pos = symmetric_pos.dot(rot) * unit.angstrom
with open(cls.pdb_path, 'w') as f:
PDBFile.writeHeader(lysozyme.topology, file=f)
PDBFile.writeModel(lysozyme.topology, lysozyme.positions, file=f, modelIndex=0)
PDBFile.writeModel(lysozyme.topology, symmetric_pos, file=f, modelIndex=1)
# Create 2-molecule SMILES file
with open(cls.smiles_path, 'w') as f:
f.write('benzene,c1ccccc1\n')
f.write('toluene,Cc1ccccc1\n')
# Create 2-molecule sdf and mol2 with OpenEye
if utils.is_openeye_installed():
from openeye import oechem
oe_benzene = utils.read_oe_molecule(examples_paths()['benzene'])
oe_benzene_pos = utils.get_oe_mol_positions(oe_benzene).dot(rot)
oe_benzene.NewConf(oechem.OEFloatArray(oe_benzene_pos.flatten()))
# Save 2-conformer benzene in sdf and mol2 format
utils.write_oe_molecule(oe_benzene, cls.sdf_path)
utils.write_oe_molecule(oe_benzene, cls.mol2_path, mol2_resname='MOL')
@classmethod
def teardown_class(cls):
shutil.rmtree(cls.tmp_dir)
@unittest.skipIf(not utils.is_openeye_installed(), 'This test requires OpenEye installed.')
def test_expand_molecules(self):
"""Check that combinatorial molecules are handled correctly."""
yaml_content = """
---
molecules:
rec:
filepath: !Combinatorial [{}, {}]
leap: {{parameters: leaprc.ff14SB}}
lig:
name: !Combinatorial [iupac1, iupac2]
leap: {{parameters: leaprc.gaff}}
epik: !Combinatorial [0, 2]
multi:
filepath: {}
leap: {{parameters: leaprc.ff14SB}}
select: all
smiles:
filepath: {}
leap: {{parameters: leaprc.gaff}}
select: all
sdf:
filepath: {}
leap: {{parameters: leaprc.gaff}}
select: all
mol2:
filepath: {}
leap: {{parameters: leaprc.gaff}}
select: all
solvents:
solv1:
nonbonded_method: NoCutoff
solv2:
nonbonded_method: PME
nonbonded_cutoff: 1*nanometer
clearance: 10*angstroms
protocols:{}
systems:
sys:
receptor: !Combinatorial [rec, multi]
ligand: lig
solvent: !Combinatorial [solv1, solv2]
experiments:
system: sys
protocol: absolute-binding
""".format(self.sdf_path, self.mol2_path, self.pdb_path,
self.smiles_path, self.sdf_path, self.mol2_path,
indent(indent(standard_protocol)))
yaml_content = textwrap.dedent(yaml_content)
expected_content = """
---
molecules:
rec_multisdf:
filepath: {}
leap: {{parameters: leaprc.ff14SB}}
rec_multimol2:
filepath: {}
leap: {{parameters: leaprc.ff14SB}}
lig_0_iupac1:
name: iupac1
leap: {{parameters: leaprc.gaff}}
epik: 0
lig_2_iupac1:
name: iupac1
leap: {{parameters: leaprc.gaff}}
epik: 2
lig_0_iupac2:
name: iupac2
leap: {{parameters: leaprc.gaff}}
epik: 0
lig_2_iupac2:
name: iupac2
leap: {{parameters: leaprc.gaff}}
epik: 2
multi_0:
filepath: {}
leap: {{parameters: leaprc.ff14SB}}
select: 0
multi_1:
filepath: {}
leap: {{parameters: leaprc.ff14SB}}
select: 1
smiles_0:
filepath: {}
leap: {{parameters: leaprc.gaff}}
select: 0
smiles_1:
filepath: {}
leap: {{parameters: leaprc.gaff}}
select: 1
sdf_0:
filepath: {}
leap: {{parameters: leaprc.gaff}}
select: 0
sdf_1:
filepath: {}
leap: {{parameters: leaprc.gaff}}
select: 1
mol2_0:
filepath: {}
leap: {{parameters: leaprc.gaff}}
select: 0
mol2_1:
filepath: {}
leap: {{parameters: leaprc.gaff}}
select: 1
solvents:
solv1:
nonbonded_method: NoCutoff
solv2:
nonbonded_method: PME
nonbonded_cutoff: 1*nanometer
clearance: 10*angstroms
protocols:{}
systems:
sys:
receptor: !Combinatorial [rec_multisdf, rec_multimol2, multi_1, multi_0]
ligand: !Combinatorial [lig_0_iupac2, lig_2_iupac1, lig_2_iupac2, lig_0_iupac1]
solvent: !Combinatorial [solv1, solv2]
experiments:
system: sys
protocol: absolute-binding
""".format(self.sdf_path, self.mol2_path, self.pdb_path, self.pdb_path,
self.smiles_path, self.smiles_path, self.sdf_path, self.sdf_path,
self.mol2_path, self.mol2_path, indent(standard_protocol))
expected_content = textwrap.dedent(expected_content)
raw = yank_load(yaml_content)
expanded = YamlBuilder(yaml_content)._expand_molecules(raw)
assert expanded == yank_load(expected_content)
def test_select_pdb_conformation(self):
"""Check that frame selection in multi-model PDB files works."""
with omt.utils.temporary_directory() as tmp_dir:
yaml_content = """
---
options:
output_dir: {}
setup_dir: .
molecules:
selected:
filepath: {}
leap: {{parameters: leaprc.ff14SB}}
select: 1
""".format(tmp_dir, self.pdb_path)
yaml_content = textwrap.dedent(yaml_content)
yaml_builder = YamlBuilder(yaml_content)
# The molecule now is neither set up nor processed
is_setup, is_processed = yaml_builder._db.is_molecule_setup('selected')
assert is_setup is False
assert is_processed is False
# The setup of the molecule must isolate the frame in a single-frame PDB
yaml_builder._db._setup_molecules('selected')
selected_pdb_path = os.path.join(tmp_dir, SetupDatabase.MOLECULES_DIR,
'selected', 'selected.pdb')
assert os.path.exists(os.path.join(selected_pdb_path))
assert os.path.getsize(os.path.join(selected_pdb_path)) > 0
# The positions must be the ones of the second frame
selected_pdb = PDBFile(selected_pdb_path)
selected_pos = selected_pdb.getPositions(asNumpy=True)
second_pos = PDBFile(self.pdb_path).getPositions(asNumpy=True, frame=1)
assert selected_pdb.getNumFrames() == 1
assert (selected_pos == second_pos).all()
# The description of the molecule is now updated
assert os.path.normpath(yaml_builder._db.molecules['selected']['filepath']) == selected_pdb_path
# The molecule now both set up and processed
is_setup, is_processed = yaml_builder._db.is_molecule_setup('selected')
assert is_setup is True
assert is_processed is True
# A new instance of YamlBuilder is able to resume with correct molecule
yaml_builder = YamlBuilder(yaml_content)
is_setup, is_processed = yaml_builder._db.is_molecule_setup('selected')
assert is_setup is True
assert is_processed is True
@unittest.skipIf(not utils.is_openeye_installed(), 'This test requires OpenEye installed.')
def test_setup_smiles(self):
"""Check that setup molecule from SMILES files works."""
from openeye.oechem import OEMolToSmiles
with omt.utils.temporary_directory() as tmp_dir:
yaml_content = """
---
options:
output_dir: {}
setup_dir: .
molecules:
take-first:
filepath: {}
antechamber: {{charge_method: bcc}}
leap: {{parameters: leaprc.gaff}}
select-second:
filepath: {}
antechamber: {{charge_method: bcc}}
leap: {{parameters: leaprc.gaff}}
select: 1
""".format(tmp_dir, self.smiles_path, self.smiles_path)
yaml_content = textwrap.dedent(yaml_content)
yaml_builder = YamlBuilder(yaml_content)
for i, mol_id in enumerate(['take-first', 'select-second']):
# The molecule now is neither set up nor processed
is_setup, is_processed = yaml_builder._db.is_molecule_setup(mol_id)
assert is_setup is False
assert is_processed is False
# The single SMILES has been converted to mol2 file
yaml_builder._db._setup_molecules(mol_id)
mol2_path = os.path.join(tmp_dir, SetupDatabase.MOLECULES_DIR, mol_id, mol_id + '.mol2')
assert os.path.exists(os.path.join(mol2_path))
assert os.path.getsize(os.path.join(mol2_path)) > 0
# The mol2 represents the right molecule
csv_smiles_str = (open(self.smiles_path, 'r').readlines()[i]).strip().split(',')[1]
mol2_smiles_str = OEMolToSmiles(utils.read_oe_molecule(mol2_path))
assert mol2_smiles_str == csv_smiles_str
# The molecule now both set up and processed
is_setup, is_processed = yaml_builder._db.is_molecule_setup(mol_id)
assert is_setup is True
assert is_processed is True
# A new instance of YamlBuilder is able to resume with correct molecule
yaml_builder = YamlBuilder(yaml_content)
is_setup, is_processed = yaml_builder._db.is_molecule_setup(mol_id)
assert is_setup is True
assert is_processed is True
@unittest.skipIf(not utils.is_openeye_installed(), 'This test requires OpenEye installed.')
def test_select_sdf_mol2(self):
"""Check that selection in sdf and mol2 files works."""
with omt.utils.temporary_directory() as tmp_dir:
yaml_content = """
---
options:
output_dir: {}
setup_dir: .
molecules:
sdf_0:
filepath: {}
antechamber: {{charge_method: bcc}}
leap: {{parameters: leaprc.gaff}}
select: 0
sdf_1:
filepath: {}
antechamber: {{charge_method: bcc}}
leap: {{parameters: leaprc.gaff}}
select: 1
mol2_0:
filepath: {}
antechamber: {{charge_method: bcc}}
leap: {{parameters: leaprc.gaff}}
select: 0
mol2_1:
filepath: {}
antechamber: {{charge_method: bcc}}
leap: {{parameters: leaprc.gaff}}
select: 1
""".format(tmp_dir, self.sdf_path, self.sdf_path, self.mol2_path, self.mol2_path)
yaml_content = textwrap.dedent(yaml_content)
yaml_builder = YamlBuilder(yaml_content)
for extension in ['sdf', 'mol2']:
multi_path = getattr(self, extension + '_path')
for model_idx in [0, 1]:
mol_id = extension + '_' + str(model_idx)
# The molecule now is neither set up nor processed
is_setup, is_processed = yaml_builder._db.is_molecule_setup(mol_id)
assert is_setup is False
assert is_processed is False
yaml_builder._db._setup_molecules(mol_id)
# The setup of the molecule must isolate the frame in a single-frame PDB
single_mol_path = os.path.join(tmp_dir, SetupDatabase.MOLECULES_DIR,
mol_id, mol_id + '.' + extension)
assert os.path.exists(os.path.join(single_mol_path))
assert os.path.getsize(os.path.join(single_mol_path)) > 0
# sdf files must be converted to mol2 to be fed to antechamber
if extension == 'sdf':
single_mol_path = os.path.join(tmp_dir, SetupDatabase.MOLECULES_DIR,
mol_id, mol_id + '.mol2')
assert os.path.exists(os.path.join(single_mol_path))
assert os.path.getsize(os.path.join(single_mol_path)) > 0
# Check antechamber parametrization
single_mol_path = os.path.join(tmp_dir, SetupDatabase.MOLECULES_DIR,
mol_id, mol_id + '.gaff.mol2')
assert os.path.exists(os.path.join(single_mol_path))
assert os.path.getsize(os.path.join(single_mol_path)) > 0
# The positions must be approximately correct (antechamber move the molecule)
selected_oe_mol = utils.read_oe_molecule(single_mol_path)
selected_pos = utils.get_oe_mol_positions(selected_oe_mol)
second_oe_mol = utils.read_oe_molecule(multi_path, conformer_idx=model_idx)
second_pos = utils.get_oe_mol_positions(second_oe_mol)
assert selected_oe_mol.NumConfs() == 1
assert np.allclose(selected_pos, second_pos, atol=1e-1)
# The molecule now both set up and processed
is_setup, is_processed = yaml_builder._db.is_molecule_setup(mol_id)
assert is_setup is True
assert is_processed is True
# A new instance of YamlBuilder is able to resume with correct molecule
yaml_builder = YamlBuilder(yaml_content)
is_setup, is_processed = yaml_builder._db.is_molecule_setup(mol_id)
assert is_setup is True
assert is_processed is True
def test_system_expansion():
"""Combinatorial systems are correctly expanded."""
# We need 2 combinatorial systems
template_script = get_template_script()
template_system = template_script['systems']['implicit-system']
del template_system['leap']
template_script['systems'] = {'system1': template_system.copy(),
'system2': template_system.copy()}
template_script['systems']['system1']['receptor'] = utils.CombinatorialLeaf(['Abl', 'T4Lysozyme'])
template_script['systems']['system2']['ligand'] = utils.CombinatorialLeaf(['p-xylene', 'toluene'])
template_script['experiments']['system'] = utils.CombinatorialLeaf(['system1', 'system2'])
# Expected expanded script
expected_script = yank_load("""
systems:
system1_Abl: {receptor: Abl, ligand: p-xylene, solvent: GBSA-OBC2}
system1_T4Lysozyme: {receptor: T4Lysozyme, ligand: p-xylene, solvent: GBSA-OBC2}
system2_pxylene: {receptor: T4Lysozyme, ligand: p-xylene, solvent: GBSA-OBC2}
system2_toluene: {receptor: T4Lysozyme, ligand: toluene, solvent: GBSA-OBC2}
experiments:
system: !Combinatorial ['system1_Abl', 'system1_T4Lysozyme', 'system2_pxylene', 'system2_toluene']
protocol: absolute-binding
""")
expanded_script = template_script.copy()
expanded_script['systems'] = expected_script['systems']
expanded_script['experiments'] = expected_script['experiments']
assert YamlBuilder(template_script)._expand_systems(template_script) == expanded_script
def test_exp_sequence():
"""Test all experiments in a sequence are parsed."""
yaml_content = """
---
molecules:
rec:
filepath: {}
leap: {{parameters: leaprc.ff14SB}}
lig:
name: lig
leap: {{parameters: leaprc.gaff}}
solvents:
solv1:
nonbonded_method: NoCutoff
solv2:
nonbonded_method: PME
nonbonded_cutoff: 1*nanometer
clearance: 10*angstroms
protocols:{}
systems:
system1:
receptor: rec
ligand: lig
solvent: !Combinatorial [solv1, solv2]
system2:
receptor: rec
ligand: lig
solvent: solv1
experiment1:
system: system1
protocol: absolute-binding
experiment2:
system: system2
protocol: absolute-binding
experiments: [experiment1, experiment2]
""".format(examples_paths()['lysozyme'], standard_protocol)
yaml_builder = YamlBuilder(textwrap.dedent(yaml_content))
assert len(yaml_builder._experiments) == 2
# ==============================================================================
# Systems pipeline
# ==============================================================================
def test_setup_implicit_system_leap():
"""Create prmtop and inpcrd for implicit solvent protein-ligand system."""
with omt.utils.temporary_directory() as tmp_dir:
yaml_content = get_template_script(tmp_dir)
yaml_builder = YamlBuilder(yaml_content)
output_dir = os.path.dirname(
yaml_builder._db.get_system('implicit-system')[0].position_path)
last_modified_path = os.path.join(output_dir, 'complex.prmtop')
last_modified = os.stat(last_modified_path).st_mtime
# Test that output files exist and there is no water
for phase in ['complex', 'solvent']:
found_resnames = set()
pdb_path = os.path.join(output_dir, phase + '.pdb')
prmtop_path = os.path.join(output_dir, phase + '.prmtop')
inpcrd_path = os.path.join(output_dir, phase + '.inpcrd')
with open(pdb_path, 'r') as f:
for line in f:
if len(line) > 10:
found_resnames.add(line[17:20])
assert os.path.exists(prmtop_path)
assert os.path.exists(inpcrd_path)
assert os.path.getsize(prmtop_path) > 0
assert os.path.getsize(inpcrd_path) > 0
assert 'MOL' in found_resnames
assert 'WAT' not in found_resnames
# Test that another call do not regenerate the system
time.sleep(0.5) # st_mtime doesn't have much precision
yaml_builder._db.get_system('implicit-system')
assert last_modified == os.stat(last_modified_path).st_mtime
def test_setup_explicit_system_leap():
"""Create prmtop and inpcrd protein-ligand system in explicit solvent."""
with omt.utils.temporary_directory() as tmp_dir:
yaml_content = get_template_script(tmp_dir)
yaml_builder = YamlBuilder(yaml_content)
output_dir = os.path.dirname(
yaml_builder._db.get_system('explicit-system')[0].position_path)
# Test that output file exists and that there is water
expected_resnames = {'complex': set(['BEN', 'TOL', 'WAT']),
'solvent': set(['TOL', 'WAT'])}
for phase in expected_resnames:
found_resnames = set()
pdb_path = os.path.join(output_dir, phase + '.pdb')
prmtop_path = os.path.join(output_dir, phase + '.prmtop')
inpcrd_path = os.path.join(output_dir, phase + '.inpcrd')
with open(pdb_path, 'r') as f:
for line in f:
if len(line) > 10:
found_resnames.add(line[17:20])
assert os.path.exists(prmtop_path)
assert os.path.exists(inpcrd_path)
assert os.path.getsize(prmtop_path) > 0
assert os.path.getsize(inpcrd_path) > 0
assert found_resnames == expected_resnames[phase]
def test_neutralize_system():
"""Test whether the system charge is neutralized correctly."""
with omt.utils.temporary_directory() as tmp_dir:
yaml_content = get_template_script(tmp_dir)
yaml_content['systems']['explicit-system']['receptor'] = 'T4Lysozyme'
yaml_content['systems']['explicit-system']['ligand'] = 'p-xylene'
yaml_builder = YamlBuilder(yaml_content)
output_dir = os.path.dirname(
yaml_builder._db.get_system('explicit-system')[0].position_path)
# Test that output file exists and that there are ions
found_resnames = set()
with open(os.path.join(output_dir, 'complex.pdb'), 'r') as f:
for line in f:
if len(line) > 10:
found_resnames.add(line[17:20])
assert set(['MOL', 'WAT', 'Cl-']) <= found_resnames
# Check that parameter files exist
prmtop_path = os.path.join(output_dir, 'complex.prmtop')
inpcrd_path = os.path.join(output_dir, 'complex.inpcrd')
assert os.path.exists(prmtop_path)
assert os.path.exists(inpcrd_path)
@unittest.skipIf(not utils.is_openeye_installed(), "This test requires OpenEye toolkit")
def test_charged_ligand():
"""Check that there are alchemical counterions for charged ligands."""
imatinib_path = examples_paths()['imatinib']
with omt.utils.temporary_directory() as tmp_dir:
receptors = {'Asp': -1, 'Abl': -8} # receptor name -> net charge
updates = yank_load("""
molecules:
Asp:
name: "(3S)-3-amino-4-hydroxy-4-oxo-butanoate"
openeye: {{quacpac: am1-bcc}}
antechamber: {{charge_method: null}}
imatinib:
filepath: {}
openeye: {{quacpac: am1-bcc}}
antechamber: {{charge_method: null}}
explicit-system:
receptor: !Combinatorial {}
ligand: imatinib
""".format(imatinib_path, receptors.keys()))
yaml_content = get_template_script(tmp_dir)
yaml_content['molecules'].update(updates['molecules'])
yaml_content['systems']['explicit-system'].update(updates['explicit-system'])
yaml_builder = YamlBuilder(yaml_content)
for receptor in receptors:
system_files_paths = yaml_builder._db.get_system('explicit-system_' + receptor)
for i, phase_name in enumerate(['complex', 'solvent']):
inpcrd_file_path = system_files_paths[i].position_path
prmtop_file_path = system_files_paths[i].topology_path
phase = pipeline.prepare_phase(inpcrd_file_path, prmtop_file_path, 'resname MOL',
{'nonbondedMethod': openmm.app.PME})
# Safety check: receptor must be negatively charged as expected
if phase_name == 'complex':
receptor_net_charge = pipeline.compute_net_charge(phase.reference_system,
phase.atom_indices['receptor'])
assert receptor_net_charge == receptors[receptor]
# 'ligand_counterions' component contain one cation
assert len(phase.atom_indices['ligand_counterions']) == 1
ion_idx = phase.atom_indices['ligand_counterions'][0]
ion_atom = next(itertools.islice(phase.reference_topology.atoms(), ion_idx, None))
assert '-' in ion_atom.residue.name
# In complex, there should be both ions even if the system is globally
# neutral (e.g. asp lys system), because of the alchemical ion
found_resnames = set()
output_dir = os.path.dirname(system_files_paths[0].position_path)
with open(os.path.join(output_dir, phase_name + '.pdb'), 'r') as f:
for line in f:
if len(line) > 10:
found_resnames.add(line[17:20])
if phase_name == 'complex':
assert set(['Na+', 'Cl-']) <= found_resnames
else:
assert set(['Cl-']) <= found_resnames
def test_setup_explicit_solvation_system():
"""Create prmtop and inpcrd files for solvation free energy in explicit solvent."""
with omt.utils.temporary_directory() as tmp_dir:
yaml_script = get_template_script(tmp_dir)
yaml_script['systems'] = {
'system1':
{'solute': 'toluene', 'solvent1': 'PME', 'solvent2': 'vacuum',
'leap': {'parameters': ['leaprc.gaff', 'leaprc.ff14SB']}}}
del yaml_script['experiments']
yaml_builder = YamlBuilder(yaml_script)
output_dir = os.path.dirname(
yaml_builder._db.get_system('system1')[0].position_path)
# Test that output file exists and that it has correct components
expected_resnames = {'solvent1': set(['TOL', 'WAT']), 'solvent2': set(['TOL'])}
for phase in expected_resnames:
found_resnames = set()
pdb_path = os.path.join(output_dir, phase + '.pdb')
prmtop_path = os.path.join(output_dir, phase + '.prmtop')
inpcrd_path = os.path.join(output_dir, phase + '.inpcrd')
with open(pdb_path, 'r') as f:
for line in f:
if len(line) > 10:
found_resnames.add(line[17:20])
assert os.path.exists(prmtop_path)
assert os.path.exists(inpcrd_path)
assert os.path.getsize(prmtop_path) > 0
assert os.path.getsize(inpcrd_path) > 0
assert found_resnames == expected_resnames[phase]
def test_setup_multiple_parameters_system():
"""Set up system with molecule that needs many parameter files."""
with omt.utils.temporary_directory() as tmp_dir:
yaml_script = get_template_script(tmp_dir)
# Force antechamber parametrization of benzene to output frcmod file
yaml_builder = YamlBuilder(yaml_script)
yaml_builder._db._setup_molecules('benzene')
benzene_dir = yaml_builder._db.get_molecule_dir('benzene')
frcmod_path = os.path.join(benzene_dir, 'benzene.frcmod')
benzene_path = os.path.join(benzene_dir, 'benzene.gaff.mol2')
# Redefine benzene to use leaprc.gaff and benzene.frcmod
# and set up system for hydration free energy calculation
yaml_script['molecules'] = {
'benzene-frcmod': {'filepath': benzene_path,
'leap': {'parameters': ['leaprc.gaff', frcmod_path]}}}
yaml_script['systems'] = {
'system':
{'solute': 'benzene-frcmod', 'solvent1': 'PME', 'solvent2': 'vacuum',
'leap': {'parameters': 'leaprc.ff14SB'}}
}
del yaml_script['experiments']
yaml_builder = YamlBuilder(yaml_script)
system_files_path = yaml_builder._db.get_system('system')
# Check that output exist:
for phase in system_files_path:
assert os.path.exists(phase.topology_path)
assert os.path.exists(phase.position_path)
assert os.path.getsize(phase.topology_path) > 0
assert os.path.getsize(phase.position_path) > 0
# ==============================================================================
# Experiment execution
# ==============================================================================
def test_yaml_creation():
"""Test the content of generated single experiment YAML files."""
ligand_path = examples_paths()['p-xylene']
toluene_path = examples_paths()['toluene']
with omt.utils.temporary_directory() as tmp_dir:
molecules = """
T4lysozyme:
filepath: {}
leap: {{parameters: leaprc.ff14SB}}""".format(examples_paths()['lysozyme'])
solvent = """
vacuum:
nonbonded_method: NoCutoff"""
protocol = indent(standard_protocol)
system = """
system:
ligand: p-xylene
receptor: T4lysozyme
solvent: vacuum"""
experiment = """
protocol: absolute-binding
system: system"""
yaml_content = """
---
options:
output_dir: {}
molecules:{}
p-xylene:
filepath: {}
antechamber: {{charge_method: bcc}}
leap: {{parameters: leaprc.gaff}}
benzene:
filepath: {}
antechamber: {{charge_method: bcc}}
leap: {{parameters: leaprc.gaff}}
solvents:{}
GBSA-OBC2:
nonbonded_method: NoCutoff
implicit_solvent: OBC2
systems:{}
protocols:{}
experiments:{}
""".format(os.path.relpath(tmp_dir), molecules,
os.path.relpath(ligand_path), toluene_path,
solvent, system, protocol, experiment)
# We need to check whether the relative paths to the output directory and
# for p-xylene are handled correctly while absolute paths (T4lysozyme) are
# left untouched
expected_yaml_content = textwrap.dedent("""
---
options:
experiments_dir: .
output_dir: .
molecules:{}
p-xylene:
filepath: {}
antechamber: {{charge_method: bcc}}
leap: {{parameters: leaprc.gaff}}
solvents:{}
systems:{}
protocols:{}
experiments:{}
""".format(molecules, os.path.relpath(ligand_path, tmp_dir),
solvent, system, protocol, experiment))
expected_yaml_content = expected_yaml_content[1:] # remove first '\n'
yaml_builder = YamlBuilder(textwrap.dedent(yaml_content))
# during setup we can modify molecule's fields, so we need
# to check that it doesn't affect the YAML file exported
experiment_dict = yaml.load(experiment)
yaml_builder._db.get_system(experiment_dict['system'])
generated_yaml_path = os.path.join(tmp_dir, 'experiment.yaml')
yaml_builder._generate_yaml(experiment_dict, generated_yaml_path)
with open(generated_yaml_path, 'r') as f:
assert yaml.load(f) == yank_load(expected_yaml_content)
def test_get_alchemical_path():
"""Check that conversion to list of AlchemicalStates is correct."""
yaml_content = """
---
protocols:{}
""".format(standard_protocol)
yaml_builder = YamlBuilder(textwrap.dedent(yaml_content))
alchemical_paths = yaml_builder._get_alchemical_paths('absolute-binding')
assert len(alchemical_paths) == 2
assert 'complex' in alchemical_paths
assert 'solvent' in alchemical_paths
complex_path = alchemical_paths['complex']
assert isinstance(complex_path[0], AlchemicalState)
assert complex_path[3]['lambda_electrostatics'] == 0.6
assert complex_path[4]['lambda_sterics'] == 0.4
assert complex_path[5]['lambda_restraints'] == 0.0
assert len(complex_path) == 7
def test_run_experiment_from_amber_files():
"""Test experiment run from prmtop/inpcrd files."""
complex_path = examples_paths()['bentol-complex']
solvent_path = examples_paths()['bentol-solvent']
with omt.utils.temporary_directory() as tmp_dir:
yaml_script = get_template_script(tmp_dir)
del yaml_script['molecules'] # we shouldn't need any molecule
yaml_script['systems'] = {'explicit-system':
{'phase1_path': complex_path, 'phase2_path': solvent_path,
'ligand_dsl': 'resname TOL', 'solvent': 'PME'}}
yaml_builder = YamlBuilder(yaml_script)
yaml_builder._check_resume() # check_resume should not raise exceptions
yaml_builder.build_experiments()
# The experiments folders are correctly named and positioned
output_dir = yaml_builder._get_experiment_dir(yaml_builder.options, '')
assert os.path.isdir(output_dir)
assert os.path.isfile(os.path.join(output_dir, 'complex.nc'))
assert os.path.isfile(os.path.join(output_dir, 'solvent.nc'))
assert os.path.isfile(os.path.join(output_dir, 'experiments.yaml'))
assert os.path.isfile(os.path.join(output_dir, 'experiments.log'))
# Analysis script is correct
analysis_script_path = os.path.join(output_dir, 'analysis.yaml')
with open(analysis_script_path, 'r') as f:
assert yaml.load(f) == [['complex', 1], ['solvent', -1]]
@attr('slow') # Skip on Travis-CI
def test_run_experiment_from_gromacs_files():
"""Test experiment run from top/gro files."""
complex_path = examples_paths()['pxylene-complex']
solvent_path = examples_paths()['pxylene-solvent']
include_path = examples_paths()['pxylene-gro-include']
with omt.utils.temporary_directory() as tmp_dir:
yaml_script = get_template_script(tmp_dir)
del yaml_script['molecules'] # we shouldn't need any molecule
yaml_script['systems'] = {'explicit-system':
{'phase1_path': complex_path, 'phase2_path': solvent_path,
'ligand_dsl': 'resname "p-xylene"', 'solvent': 'PME',
'gromacs_include_dir': include_path}}
yaml_script['experiments']['system'] = 'explicit-system'
yaml_builder = YamlBuilder(yaml_script)
yaml_builder._check_resume() # check_resume should not raise exceptions
yaml_builder.build_experiments()
# The experiments folders are correctly named and positioned
output_dir = yaml_builder._get_experiment_dir(yaml_builder.options, '')
assert os.path.isdir(output_dir)
assert os.path.isfile(os.path.join(output_dir, 'complex.nc'))
assert os.path.isfile(os.path.join(output_dir, 'solvent.nc'))
assert os.path.isfile(os.path.join(output_dir, 'experiments.yaml'))
assert os.path.isfile(os.path.join(output_dir, 'experiments.log'))
# Analysis script is correct
analysis_script_path = os.path.join(output_dir, 'analysis.yaml')
with open(analysis_script_path, 'r') as f:
assert yaml.load(f) == [['complex', 1], ['solvent', -1]]
@attr('slow') # Skip on Travis-CI
def test_run_experiment():
"""Test experiment run and resuming."""
with omt.utils.temporary_directory() as tmp_dir:
yaml_content = """
---
options:
resume_setup: no
resume_simulation: no
number_of_iterations: 1
output_dir: overwritten
annihilate_sterics: yes
molecules:
T4lysozyme:
filepath: {}
leap: {{parameters: leaprc.ff14SB}}
select: 0
p-xylene:
filepath: {}
antechamber: {{charge_method: bcc}}
leap: {{parameters: leaprc.gaff}}
solvents:
vacuum:
nonbonded_method: NoCutoff
GBSA-OBC2:
nonbonded_method: NoCutoff
implicit_solvent: OBC2
protocols:{}
systems:
system:
receptor: T4lysozyme
ligand: p-xylene
solvent: !Combinatorial [vacuum, GBSA-OBC2]
experiments:
system: system
options:
output_dir: {}
setup_dir: ''
experiments_dir: ''
protocol: absolute-binding
""".format(examples_paths()['lysozyme'], examples_paths()['p-xylene'],
indent(standard_protocol), tmp_dir)
yaml_builder = YamlBuilder(textwrap.dedent(yaml_content))
# Now check_setup_resume should not raise exceptions
yaml_builder._check_resume()
# We setup a molecule and with resume_setup: now we can't do the experiment
err_msg = ''
yaml_builder._db._setup_molecules('p-xylene')
try:
yaml_builder.build_experiments()
except YamlParseError as e:
err_msg = str(e)
assert 'molecule' in err_msg
# Same thing with a system
err_msg = ''
system_dir = os.path.dirname(
yaml_builder._db.get_system('system_vacuum')[0].position_path)
try:
yaml_builder.build_experiments()
except YamlParseError as e:
err_msg = str(e)
assert 'system' in err_msg
# Now we set resume_setup to True and things work
yaml_builder.options['resume_setup'] = True
ligand_dir = yaml_builder._db.get_molecule_dir('p-xylene')
frcmod_file = os.path.join(ligand_dir, 'p-xylene.frcmod')
prmtop_file = os.path.join(system_dir, 'complex.prmtop')
molecule_last_touched = os.stat(frcmod_file).st_mtime
system_last_touched = os.stat(prmtop_file).st_mtime
yaml_builder.build_experiments()
# Neither the system nor the molecule has been processed again
assert molecule_last_touched == os.stat(frcmod_file).st_mtime
assert system_last_touched == os.stat(prmtop_file).st_mtime
# The experiments folders are correctly named and positioned
for exp_name in ['systemvacuum', 'systemGBSAOBC2']:
# The output directory must be the one in the experiment section
output_dir = os.path.join(tmp_dir, exp_name)
assert os.path.isdir(output_dir)
assert os.path.isfile(os.path.join(output_dir, 'complex.nc'))
assert os.path.isfile(os.path.join(output_dir, 'solvent.nc'))
assert os.path.isfile(os.path.join(output_dir, exp_name + '.yaml'))
assert os.path.isfile(os.path.join(output_dir, exp_name + '.log'))
# Analysis script is correct
analysis_script_path = os.path.join(output_dir, 'analysis.yaml')
with open(analysis_script_path, 'r') as f:
assert yaml.load(f) == [['complex', 1], ['solvent', -1]]
# Now we can't run the experiment again with resume_simulation: no
try:
yaml_builder.build_experiments()
except YamlParseError as e:
err_msg = str(e)
assert 'experiment' in err_msg
# We set resume_simulation: yes and now things work
yaml_builder.options['resume_simulation'] = True
yaml_builder.build_experiments()
def test_run_solvation_experiment():
"""Test solvation free energy experiment run."""
with omt.utils.temporary_directory() as tmp_dir:
yaml_script = get_template_script(tmp_dir)
yaml_script['systems'] = {
'system1':
{'solute': 'toluene', 'solvent1': 'PME', 'solvent2': 'vacuum',
'leap': {'parameters': ['leaprc.gaff', 'leaprc.ff14SB']}}}
protocol = yaml_script['protocols']['absolute-binding']['solvent']
yaml_script['protocols'] = {
'hydration-protocol': {
'solvent1' : protocol,
'solvent2' : protocol
}
}
yaml_script['experiments'] = {
'system' : 'system1',
'protocol' : 'hydration-protocol'
}
yaml_builder = YamlBuilder(yaml_script)
yaml_builder._check_resume() # check_resume should not raise exceptions
yaml_builder.build_experiments()
# The experiments folders are correctly named and positioned
output_dir = yaml_builder._get_experiment_dir(yaml_builder.options, '')
assert os.path.isdir(output_dir)
assert os.path.isfile(os.path.join(output_dir, 'solvent1.nc'))
assert os.path.isfile(os.path.join(output_dir, 'solvent2.nc'))
assert os.path.isfile(os.path.join(output_dir, 'experiments.yaml'))
assert os.path.isfile(os.path.join(output_dir, 'experiments.log'))
# Analysis script is correct
analysis_script_path = os.path.join(output_dir, 'analysis.yaml')
with open(analysis_script_path, 'r') as f:
assert yaml.load(f) == [['solvent1', 1], ['solvent2', -1]]
# Check that solvated phase has a barostat.
from netCDF4 import Dataset
from simtk import openmm
ncfile = Dataset(os.path.join(output_dir, 'solvent1.nc'), 'r')
ncgrp_stateinfo = ncfile.groups['thermodynamic_states']
system = openmm.System()
system.__setstate__(str(ncgrp_stateinfo.variables['reference_system'][0]))
has_barostat = False
for force in system.getForces():
if force.__class__.__name__ == 'MonteCarloBarostat':
has_barostat = True
if not has_barostat:
raise Exception('Explicit solvent phase of hydration free energy calculation does not have a barostat.')
if __name__ == '__main__':
test_run_solvation_experiment()
|
jchodera/yank
|
Yank/tests/test_yaml.py
|
Python
|
lgpl-3.0
| 78,332
|
[
"Gromacs",
"MDTraj",
"OpenMM"
] |
8b562283ffaa298bd7773da7d4e8f25fac441aad71f9b171e0e21eea505cc2ad
|
#
# Copyright (C) 2013,2014,2015,2016 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Tests particle property setters/getters
from __future__ import print_function
import unittest as ut
import espressomd
import numpy as np
class CellSystem(ut.TestCase):
S = espressomd.System()
def test_cell_system(self):
self.S.cell_system.set_n_square(use_verlet_lists=False)
s = self.S.cell_system.get_state()
self.assertEqual([s['use_verlet_lists'], s['type']], [0, "nsquare"])
self.S.cell_system.set_layered(n_layers=5)
s = self.S.cell_system.get_state()
self.assertEqual([s['type'], s['n_layers']], ["layered", 5])
self.S.cell_system.set_domain_decomposition(use_verlet_lists=True)
s = self.S.cell_system.get_state()
self.assertEqual(
[s['use_verlet_lists'], s['type']], [1, "domain_decomposition"])
if __name__ == "__main__":
print("Features: ", espressomd.features())
ut.main()
|
lahnerml/espresso
|
testsuite/python/cellsystem.py
|
Python
|
gpl-3.0
| 1,626
|
[
"ESPResSo"
] |
95200e90a5e254f7fd4328a595ff0256062cdd7bb0b423976092eea78a24e8e5
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import atexit
import logging
import re
from devil.android import device_errors
logger = logging.getLogger(__name__)
_atexit_messages = set()
# Defines how to switch between the default performance configuration
# ('default_mode') and the mode for use when benchmarking ('high_perf_mode').
# For devices not in the list the defaults are to set up the scaling governor to
# 'performance' and reset it back to 'ondemand' when benchmarking is finished.
#
# The 'default_mode_governor' is mandatory to define, while
# 'high_perf_mode_governor' is not taken into account. The latter is because the
# governor 'performance' is currently used for all benchmarking on all devices.
#
# TODO(crbug.com/383566): Add definitions for all devices used in the perf
# waterfall.
_PERFORMANCE_MODE_DEFINITIONS = {
# Fire TV Edition - 4K
'AFTKMST12': {
'default_mode_governor': 'interactive',
},
'Pixel 5': {
'big_cores': ['cpu6', 'cpu7'],
'default_mode_governor': 'schedutil',
},
'Pixel 4a': {
'big_cores': ['cpu6', 'cpu7'],
'default_mode_governor': 'schedutil',
},
'Pixel 4': {
'big_cores': ['cpu6', 'cpu7'],
'default_mode_governor': 'schedutil',
},
# Pixel 3
'blueline': {
'high_perf_mode': {
'bring_cpu_cores_online': True,
# The SoC is Arm big.LITTLE. The cores 0..3 are LITTLE,
# the 4..7 are big.
'cpu_max_freq': {
'0..3': 1228800,
'4..7': 1536000
},
'gpu_max_freq': 520000000,
},
'default_mode': {
'cpu_max_freq': {
'0..3': 1766400,
'4..7': 2649600
},
'gpu_max_freq': 710000000,
},
'big_cores': ['cpu4', 'cpu5', 'cpu6', 'cpu7'],
'default_mode_governor': 'schedutil',
},
'Pixel 2': {
'high_perf_mode': {
'bring_cpu_cores_online': True,
# These are set to roughly 7/8 of the max frequency. The purpose of
# this is to ensure that thermal throttling doesn't kick in midway
# through a test and cause flaky results. It should also improve the
# longevity of the devices by keeping them cooler.
'cpu_max_freq': {
'0..3': 1670400,
'4..7': 2208000,
},
'gpu_max_freq': 670000000,
},
'default_mode': {
# These are the maximum frequencies available for these CPUs and
# GPUs.
'cpu_max_freq': {
'0..3': 1900800,
'4..7': 2457600,
},
'gpu_max_freq': 710000000,
},
'big_cores': ['cpu4', 'cpu5', 'cpu6', 'cpu7'],
'default_mode_governor': 'schedutil',
},
'GT-I9300': {
'default_mode_governor': 'pegasusq',
},
'Galaxy Nexus': {
'default_mode_governor': 'interactive',
},
# Pixel
'msm8996': {
'high_perf_mode': {
'bring_cpu_cores_online': True,
'cpu_max_freq': 1209600,
'gpu_max_freq': 315000000,
},
'default_mode': {
# The SoC is Arm big.LITTLE. The cores 0..1 are LITTLE,
# the 2..3 are big.
'cpu_max_freq': {
'0..1': 1593600,
'2..3': 2150400
},
'gpu_max_freq': 624000000,
},
'big_cores': ['cpu2', 'cpu3'],
'default_mode_governor': 'sched',
},
'Nexus 7': {
'default_mode_governor': 'interactive',
},
'Nexus 10': {
'default_mode_governor': 'interactive',
},
'Nexus 4': {
'high_perf_mode': {
'bring_cpu_cores_online': True,
},
'default_mode_governor': 'ondemand',
},
'Nexus 5': {
# The list of possible GPU frequency values can be found in:
# /sys/class/kgsl/kgsl-3d0/gpu_available_frequencies.
# For CPU cores the possible frequency values are at:
# /sys/devices/system/cpu/cpu0/cpufreq/scaling_available_frequencies
'high_perf_mode': {
'bring_cpu_cores_online': True,
'cpu_max_freq': 1190400,
'gpu_max_freq': 200000000,
},
'default_mode': {
'cpu_max_freq': 2265600,
'gpu_max_freq': 450000000,
},
'default_mode_governor': 'ondemand',
},
'Nexus 5X': {
'high_perf_mode': {
'bring_cpu_cores_online': True,
'cpu_max_freq': 1248000,
'gpu_max_freq': 300000000,
},
'default_mode': {
'governor': 'ondemand',
# The SoC is ARM big.LITTLE. The cores 4..5 are big,
# the 0..3 are LITTLE.
'cpu_max_freq': {
'0..3': 1440000,
'4..5': 1824000
},
'gpu_max_freq': 600000000,
},
'big_cores': ['cpu4', 'cpu5'],
'default_mode_governor': 'ondemand',
},
}
def _GetPerfModeDefinitions(product_model):
if product_model.startswith('AOSP on '):
product_model = product_model.replace('AOSP on ', '')
return _PERFORMANCE_MODE_DEFINITIONS.get(product_model)
def _NoisyWarning(message):
message += ' Results may be NOISY!!'
logger.warning(message)
# Add an additional warning at exit, such that it's clear that any results
# may be different/noisy (due to the lack of intended performance mode).
if message not in _atexit_messages:
_atexit_messages.add(message)
atexit.register(logger.warning, message)
class PerfControl(object):
"""Provides methods for setting the performance mode of a device."""
_AVAILABLE_GOVERNORS_REL_PATH = 'cpufreq/scaling_available_governors'
_CPU_FILE_PATTERN = re.compile(r'^cpu\d+$')
_CPU_PATH = '/sys/devices/system/cpu'
_KERNEL_MAX = '/sys/devices/system/cpu/kernel_max'
def __init__(self, device):
self._device = device
self._cpu_files = []
for file_name in self._device.ListDirectory(self._CPU_PATH, as_root=True):
if self._CPU_FILE_PATTERN.match(file_name):
self._cpu_files.append(file_name)
assert self._cpu_files, 'Failed to detect CPUs.'
self._cpu_file_list = ' '.join(self._cpu_files)
logger.info('CPUs found: %s', self._cpu_file_list)
self._have_mpdecision = self._device.FileExists('/system/bin/mpdecision')
raw = self._ReadEachCpuFile(self._AVAILABLE_GOVERNORS_REL_PATH)
self._available_governors = [
(cpu, raw_governors.strip().split() if not exit_code else None)
for cpu, raw_governors, exit_code in raw
]
def _SetMaxFrequenciesFromMode(self, mode):
"""Set maximum frequencies for GPU and CPU cores.
Args:
mode: A dictionary mapping optional keys 'cpu_max_freq' and 'gpu_max_freq'
to integer values of frequency supported by the device.
"""
cpu_max_freq = mode.get('cpu_max_freq')
if cpu_max_freq:
if not isinstance(cpu_max_freq, dict):
self._SetScalingMaxFreqForCpus(cpu_max_freq, self._cpu_file_list)
else:
for key, max_frequency in cpu_max_freq.items():
# Convert 'X' to 'cpuX' and 'X..Y' to 'cpuX cpu<X+1> .. cpuY'.
if '..' in key:
range_min, range_max = key.split('..')
range_min, range_max = int(range_min), int(range_max)
else:
range_min = range_max = int(key)
cpu_files = [
'cpu%d' % number for number in range(range_min, range_max + 1)
]
# Set the |max_frequency| on requested subset of the cores.
self._SetScalingMaxFreqForCpus(max_frequency, ' '.join(cpu_files))
gpu_max_freq = mode.get('gpu_max_freq')
if gpu_max_freq:
self._SetMaxGpuClock(gpu_max_freq)
def SetHighPerfMode(self):
"""Sets the highest stable performance mode for the device."""
try:
self._device.EnableRoot()
except device_errors.CommandFailedError:
_NoisyWarning('Need root for performance mode.')
return
mode_definitions = _GetPerfModeDefinitions(self._device.product_model)
if not mode_definitions:
self.SetScalingGovernor('performance')
return
high_perf_mode = mode_definitions.get('high_perf_mode')
if not high_perf_mode:
self.SetScalingGovernor('performance')
return
if high_perf_mode.get('bring_cpu_cores_online', False):
self._ForceAllCpusOnline(True)
if not self._AllCpusAreOnline():
_NoisyWarning('Failed to force CPUs online.')
# Scaling governor must be set _after_ bringing all CPU cores online,
# otherwise it would not affect the cores that are currently offline.
self.SetScalingGovernor('performance')
self._SetMaxFrequenciesFromMode(high_perf_mode)
def SetLittleOnlyMode(self):
"""Turns off big CPU cores on the device."""
try:
self._device.EnableRoot()
except device_errors.CommandFailedError:
_NoisyWarning('Need root to turn off cores.')
return
mode_definitions = _GetPerfModeDefinitions(self._device.product_model)
if not mode_definitions:
_NoisyWarning('Unknown device: %s. Can\'t turn off cores.'
% self._device.product_model)
return
big_cores = mode_definitions.get('big_cores', [])
if not big_cores:
_NoisyWarning('No mode definition for device: %s.' %
self._device.product_model)
return
self._ForceCpusOffline(cpu_list=' '.join(big_cores))
def SetDefaultPerfMode(self):
"""Sets the performance mode for the device to its default mode."""
if not self._device.HasRoot():
return
mode_definitions = _GetPerfModeDefinitions(self._device.product_model)
if not mode_definitions:
self.SetScalingGovernor('ondemand')
else:
default_mode_governor = mode_definitions.get('default_mode_governor')
assert default_mode_governor, ('Default mode governor must be provided '
'for all perf mode definitions.')
self.SetScalingGovernor(default_mode_governor)
default_mode = mode_definitions.get('default_mode')
if default_mode:
self._SetMaxFrequenciesFromMode(default_mode)
self._ForceAllCpusOnline(False)
def SetPerfProfilingMode(self):
"""Enables all cores for reliable perf profiling."""
self._ForceAllCpusOnline(True)
self.SetScalingGovernor('performance')
if not self._AllCpusAreOnline():
if not self._device.HasRoot():
raise RuntimeError('Need root to force CPUs online.')
raise RuntimeError('Failed to force CPUs online.')
def GetCpuInfo(self):
online = (output.rstrip() == '1' and status == 0
for (_, output, status) in self._ForEachCpu('cat "$CPU/online"'))
governor = (
output.rstrip() if status == 0 else None
for (_, output,
status) in self._ForEachCpu('cat "$CPU/cpufreq/scaling_governor"'))
return list(zip(self._cpu_files, online, governor))
def _ForEachCpu(self, cmd, cpu_list=None):
"""Runs a command on the device for each of the CPUs.
Args:
cmd: A string with a shell command, may may use shell expansion: "$CPU" to
refer to the current CPU in the string form (e.g. "cpu0", "cpu1",
and so on).
cpu_list: A space-separated string of CPU core names, like in the example
above
Returns:
A list of tuples in the form (cpu_string, command_output, exit_code), one
tuple per each command invocation. As usual, all lines of the output
command are joined into one line with spaces.
"""
if cpu_list is None:
cpu_list = self._cpu_file_list
script = '; '.join([
'for CPU in %s' % cpu_list,
'do %s' % cmd, 'echo -n "%~%$?%~%"', 'done'
])
output = self._device.RunShellCommand(
script, cwd=self._CPU_PATH, check_return=True, as_root=True, shell=True)
output = '\n'.join(output).split('%~%')
return zip(self._cpu_files, output[0::2], (int(c) for c in output[1::2]))
def _ConditionallyWriteCpuFiles(self, path, value, cpu_files, condition):
template = (
'{condition} && test -e "$CPU/{path}" && echo {value} > "$CPU/{path}"')
results = self._ForEachCpu(
template.format(path=path, value=value, condition=condition), cpu_files)
cpus = ' '.join(cpu for (cpu, _, status) in results if status == 0)
if cpus:
logger.info('Successfully set %s to %r on: %s', path, value, cpus)
else:
logger.warning('Failed to set %s to %r on any cpus', path, value)
def _WriteCpuFiles(self, path, value, cpu_files):
self._ConditionallyWriteCpuFiles(path, value, cpu_files, condition='true')
def _ReadEachCpuFile(self, path):
return self._ForEachCpu('cat "$CPU/{path}"'.format(path=path))
def SetScalingGovernor(self, value):
"""Sets the scaling governor to the given value on all possible CPUs.
This does not attempt to set a governor to a value not reported as available
on the corresponding CPU.
Args:
value: [string] The new governor value.
"""
condition = 'test -e "{path}" && grep -q {value} {path}'.format(
path=('${CPU}/%s' % self._AVAILABLE_GOVERNORS_REL_PATH), value=value)
self._ConditionallyWriteCpuFiles('cpufreq/scaling_governor', value,
self._cpu_file_list, condition)
def GetScalingGovernor(self):
"""Gets the currently set governor for each CPU.
Returns:
An iterable of 2-tuples, each containing the cpu and the current
governor.
"""
raw = self._ReadEachCpuFile('cpufreq/scaling_governor')
return [(cpu, raw_governor.strip() if not exit_code else None)
for cpu, raw_governor, exit_code in raw]
def ListAvailableGovernors(self):
"""Returns the list of available governors for each CPU.
Returns:
An iterable of 2-tuples, each containing the cpu and a list of available
governors for that cpu.
"""
return self._available_governors
def _SetScalingMaxFreqForCpus(self, value, cpu_files):
self._WriteCpuFiles('cpufreq/scaling_max_freq', '%d' % value, cpu_files)
def _SetMaxGpuClock(self, value):
self._device.WriteFile(
'/sys/class/kgsl/kgsl-3d0/max_gpuclk', str(value), as_root=True)
def _AllCpusAreOnline(self):
results = self._ForEachCpu('cat "$CPU/online"')
# The file 'cpu0/online' is missing on some devices (example: Nexus 9). This
# is likely because on these devices it is impossible to bring the cpu0
# offline. Assuming the same for all devices until proven otherwise.
return all(output.rstrip() == '1' and status == 0
for (cpu, output, status) in results if cpu != 'cpu0')
def _ForceAllCpusOnline(self, force_online):
"""Enable all CPUs on a device.
Some vendors (or only Qualcomm?) hot-plug their CPUs, which can add noise
to measurements:
- In perf, samples are only taken for the CPUs that are online when the
measurement is started.
- The scaling governor can't be set for an offline CPU and frequency scaling
on newly enabled CPUs adds noise to both perf and tracing measurements.
It appears Qualcomm is the only vendor that hot-plugs CPUs, and on Qualcomm
this is done by "mpdecision".
"""
if self._have_mpdecision:
cmd = ['stop', 'mpdecision'] if force_online else ['start', 'mpdecision']
self._device.RunShellCommand(cmd, check_return=True, as_root=True)
if not self._have_mpdecision and not self._AllCpusAreOnline():
logger.warning('Unexpected cpu hot plugging detected.')
if force_online:
self._ForEachCpu('echo 1 > "$CPU/online"')
def _ForceCpusOffline(self, cpu_list):
"""Disable selected CPUs on a device."""
if self._have_mpdecision:
cmd = ['stop', 'mpdecision']
self._device.RunShellCommand(cmd, check_return=True, as_root=True)
self._ForEachCpu('echo 0 > "$CPU/online"', cpu_list=cpu_list)
|
catapult-project/catapult
|
devil/devil/android/perf/perf_control.py
|
Python
|
bsd-3-clause
| 16,185
|
[
"Galaxy"
] |
eca164a6758916f7b988df698d45499160a53a46244782f83e86068e898efd3e
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import atexit
import logging
from pylib import android_commands
from pylib.device import device_utils
class PerfControl(object):
"""Provides methods for setting the performance mode of a device."""
_CPU_PATH = '/sys/devices/system/cpu'
_KERNEL_MAX = '/sys/devices/system/cpu/kernel_max'
def __init__(self, device):
# TODO(jbudorick) Remove once telemetry gets switched over.
if isinstance(device, android_commands.AndroidCommands):
device = device_utils.DeviceUtils(device)
self._device = device
# this will raise an AdbCommandFailedError if no CPU files are found
self._cpu_files = self._device.RunShellCommand(
'ls -d cpu[0-9]*', cwd=self._CPU_PATH, check_return=True, as_root=True)
assert self._cpu_files, 'Failed to detect CPUs.'
self._cpu_file_list = ' '.join(self._cpu_files)
logging.info('CPUs found: %s', self._cpu_file_list)
self._have_mpdecision = self._device.FileExists('/system/bin/mpdecision')
def SetHighPerfMode(self):
"""Sets the highest stable performance mode for the device."""
if not self._device.HasRoot():
message = 'Need root for performance mode. Results may be NOISY!!'
logging.warning(message)
# Add an additional warning at exit, such that it's clear that any results
# may be different/noisy (due to the lack of intended performance mode).
atexit.register(logging.warning, message)
return
product_model = self._device.product_model
# TODO(epenner): Enable on all devices (http://crbug.com/383566)
if 'Nexus 4' == product_model:
self._ForceAllCpusOnline(True)
if not self._AllCpusAreOnline():
logging.warning('Failed to force CPUs online. Results may be NOISY!')
self._SetScalingGovernorInternal('performance')
elif 'Nexus 5' == product_model:
self._ForceAllCpusOnline(True)
if not self._AllCpusAreOnline():
logging.warning('Failed to force CPUs online. Results may be NOISY!')
self._SetScalingGovernorInternal('performance')
self._SetScalingMaxFreq(1190400)
self._SetMaxGpuClock(200000000)
else:
self._SetScalingGovernorInternal('performance')
def SetPerfProfilingMode(self):
"""Enables all cores for reliable perf profiling."""
self._ForceAllCpusOnline(True)
self._SetScalingGovernorInternal('performance')
if not self._AllCpusAreOnline():
if not self._device.HasRoot():
raise RuntimeError('Need root to force CPUs online.')
raise RuntimeError('Failed to force CPUs online.')
def SetDefaultPerfMode(self):
"""Sets the performance mode for the device to its default mode."""
if not self._device.HasRoot():
return
product_model = self._device.product_model
if 'Nexus 5' == product_model:
if self._AllCpusAreOnline():
self._SetScalingMaxFreq(2265600)
self._SetMaxGpuClock(450000000)
governor_mode = {
'GT-I9300': 'pegasusq',
'Galaxy Nexus': 'interactive',
'Nexus 4': 'ondemand',
'Nexus 5': 'ondemand',
'Nexus 7': 'interactive',
'Nexus 10': 'interactive'
}.get(product_model, 'ondemand')
self._SetScalingGovernorInternal(governor_mode)
self._ForceAllCpusOnline(False)
def GetCpuInfo(self):
online = (output.rstrip() == '1' and status == 0
for (_, output, status) in self._ForEachCpu('cat "$CPU/online"'))
governor = (output.rstrip() if status == 0 else None
for (_, output, status)
in self._ForEachCpu('cat "$CPU/cpufreq/scaling_governor"'))
return zip(self._cpu_files, online, governor)
def _ForEachCpu(self, cmd):
script = '; '.join([
'for CPU in %s' % self._cpu_file_list,
'do %s' % cmd,
'echo -n "%~%$?%~%"',
'done'
])
output = self._device.RunShellCommand(
script, cwd=self._CPU_PATH, check_return=True, as_root=True)
output = '\n'.join(output).split('%~%')
return zip(self._cpu_files, output[0::2], (int(c) for c in output[1::2]))
def _WriteEachCpuFile(self, path, value):
results = self._ForEachCpu(
'test -e "$CPU/{path}" && echo {value} > "$CPU/{path}"'.format(
path=path, value=value))
cpus = ' '.join(cpu for (cpu, _, status) in results if status == 0)
if cpus:
logging.info('Successfully set %s to %r on: %s', path, value, cpus)
else:
logging.warning('Failed to set %s to %r on any cpus')
def _SetScalingGovernorInternal(self, value):
self._WriteEachCpuFile('cpufreq/scaling_governor', value)
def _SetScalingMaxFreq(self, value):
self._WriteEachCpuFile('cpufreq/scaling_max_freq', '%d' % value)
def _SetMaxGpuClock(self, value):
self._device.WriteFile('/sys/class/kgsl/kgsl-3d0/max_gpuclk',
str(value),
as_root=True)
def _AllCpusAreOnline(self):
results = self._ForEachCpu('cat "$CPU/online"')
# TODO(epenner): Investigate why file may be missing
# (http://crbug.com/397118)
return all(output.rstrip() == '1' and status == 0
for (cpu, output, status) in results
if cpu != 'cpu0')
def _ForceAllCpusOnline(self, force_online):
"""Enable all CPUs on a device.
Some vendors (or only Qualcomm?) hot-plug their CPUs, which can add noise
to measurements:
- In perf, samples are only taken for the CPUs that are online when the
measurement is started.
- The scaling governor can't be set for an offline CPU and frequency scaling
on newly enabled CPUs adds noise to both perf and tracing measurements.
It appears Qualcomm is the only vendor that hot-plugs CPUs, and on Qualcomm
this is done by "mpdecision".
"""
if self._have_mpdecision:
script = 'stop mpdecision' if force_online else 'start mpdecision'
self._device.RunShellCommand(script, check_return=True, as_root=True)
if not self._have_mpdecision and not self._AllCpusAreOnline():
logging.warning('Unexpected cpu hot plugging detected.')
if force_online:
self._ForEachCpu('echo 1 > "$CPU/online"')
|
collinjackson/mojo
|
build/android/pylib/perf/perf_control.py
|
Python
|
bsd-3-clause
| 6,284
|
[
"Galaxy"
] |
36a0c97befb1c66adcbff399e657f55aae6bfbb786d58d2e32e39129bd95ff8f
|
import numpy as np
import cv2
import sys
def uso():
print 'Erro: nao foi possivel abrir a imagem fornecida'
print 'Uso: python2 ' + sys.argv[0] + ' src dst'
sys.exit()
def salt_and_pepper(src, a, b, Pa=0, Pb=255):
dst = np.copy(src)
for i in range(dst.shape[0]):
for j in range(dst.shape[1]):
if dst[i][j] == a:
dst[i][j] = Pa
elif dst[i][j] == b:
dst[i][j] = Pb
return dst
def gaussian(src, a, b):
dst = np.copy(src)
noise = np.random.normal(a, b, dst.shape)
for i in range(dst.shape[0]):
for j in range(dst.shape[1]):
dst[i][j] = dst[i][j] + noise[i][j]
return dst
def aniso_diff(img,niter=10,kappa=50,gamma=0.1,step=(1.,1.)):
img = img.astype('float32')
imgout = img.copy()
deltaS = np.zeros_like(imgout)
deltaE = deltaS.copy()
NS = deltaS.copy()
EW = deltaS.copy()
gS = np.ones_like(imgout)
gE = gS.copy()
for ii in xrange(niter):
deltaS[:-1,: ] = np.diff(imgout,axis=0)
deltaE[: ,:-1] = np.diff(imgout,axis=1)
gS = np.exp(-(deltaS/kappa)**2.)/step[0]
gE = np.exp(-(deltaE/kappa)**2.)/step[1]
S = gS*deltaS
E = gE*deltaE
NS[:] = S
EW[:] = E
NS[1:,:] -= S[:-1,:]
EW[:,1:] -= E[:,:-1]
imgout += gamma*(NS+EW)
return imgout
src = cv2.imread(sys.argv[1])
if src == None:
uso()
src = cv2.cvtColor(src, cv2.cv.CV_BGR2GRAY)
dst_sp = salt_and_pepper(src, 135, 17)
dst_ga = gaussian(src, 0.9, 1)
aniso = aniso_diff(src,gamma=0.25)
aniso_sp = aniso_diff(dst_sp,gamma=0.25)
aniso_ga = aniso_diff(dst_ga,gamma=0.25)
cv2.imwrite('src.jpg', src)
cv2.imwrite('dst_sp.jpg', dst_sp)
cv2.imwrite('dst_ga.jpg', dst_ga)
cv2.imwrite('aniso.jpg', aniso)
cv2.imwrite('aniso_sp.jpg', aniso_sp)
cv2.imwrite('aniso_ga.jpg', aniso_ga)
|
rerthal/mc920
|
proj07/proj07.py
|
Python
|
gpl-3.0
| 1,884
|
[
"Gaussian"
] |
89047028771a3e879f4aea85fcb8e219983c3dafe06e590176d0d2404db3f2bc
|
"""
Compatibility package with the high-level API of PyEVTK.
This package is meant as a drop-in replacement for high-level functions of
PyEVTK. However, it does not reproduce exactly the behavior of PyEVTK, most
notably:
- uvw.Writer keyword arguments (such as compression=True) can be passed to
the functions
- Data fields with any number of components are accepted. In PyEVTK, data
fields have to be either scalar or 3D by specifying a tuple of 3 Numpy
arrays. This is inconvenient and inefficient. UVW's native DataArray object
does not impose such requirements.
- The function imageToVTK() does not impose all dimensions to be of equal size
- Requirements on data are not enforced with assertions
- Data is written in base64 format instead of raw binary
Original version of PyEVTK by Paulo A. Herrera can be found at:
- https://github.com/paulo-herrera/PyEVTK
- https://github.com/pyscience-projects/pyevtk (corresponding to PyPI version)
Below is the copyright notice for PyEVTK:
MIT License
Copyright (c) 2010-2021 Paulo A. Herrera
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
__copyright__ = "Copyright © 2018-2021 Lucas Frérot"
__license__ = "SPDX-License-Identifier: MIT"
from .. import vtk_files as vtk
from ..data_array import DataArray
import typing as ts
import numpy as np
from collections.abc import Sequence
DataType = ts.Mapping[
str, ts.Union[np.ndarray,
ts.Tuple[np.ndarray, np.ndarray, np.ndarray]]
]
def _assemble(data):
if isinstance(data, Sequence):
assembled_data = np.zeros(list(data[0].shape) + [3],
dtype=data[0].dtype)
for i in range(3):
assembled_data[..., i] = data[i]
return assembled_data
return data
def _add_data(cellData, pointData, fieldData, fh, dims=range(3)):
valid_data = zip([cellData, pointData, fieldData],
['addCellData', 'addPointData', 'addFieldData'])
valid_data = filter(lambda d: d[0] is not None, valid_data)
for data, m in valid_data:
for k, v in data.items():
v = _assemble(v)
getattr(fh, m)(DataArray(v, dims, k))
def imageToVTK(
path: str,
origin: ts.Tuple[float, float, float] = (0.0, 0.0, 0.0),
spacing: ts.Tuple[float, float, float] = (1.0, 1.0, 1.0),
cellData: DataType = None,
pointData: DataType = None,
fieldData: DataType = None,
**kwargs
) -> str:
"""
Write data to an ImageData file.
:param path: path to file without extension
:param origin: 3-tuple giving the domain origin
:param spacing: 3-tuple giving the point spacing
:param cellData: dict of data defined on cells
:param pointData: dict of data defined on points
:param fieldData: dict of data with arbitrary support
:param **kwargs: passed on to uvw.Writer (e.g. compression=True)
:returns: full path of created file
Notes:
- domain shape is inferred from either cellData or pointData
- unlike PyEVTK arrays do not have to have the same dimension in each
direction
"""
if cellData is None and pointData is None:
raise ValueError("Cannot infer image size if cell "
"and point data are absent")
def _extract_shape(value, offset, name):
value = next(iter(value.values()))
if isinstance(value, np.ndarray) and value.ndim == 3:
return [s + offset for s in value.shape]
if isinstance(value, Sequence) and value[0].ndim == 3:
return [s + offset for s in value[0].shape]
raise RuntimeError(f"Invalid dimension of {name}")
# Deducing image size
if cellData:
shape = _extract_shape(cellData, 1, 'cellData')
elif pointData:
shape = _extract_shape(pointData, 0, 'pointData')
ranges = [(o, o + n * s) for o, n, s in zip(origin, shape, spacing)]
filename = path + '.vti'
with vtk.ImageData(filename, ranges, shape, **kwargs) as fh:
_add_data(cellData, pointData, fieldData, fh)
return filename
def rectilinearToVTK(
path: str,
x: np.ndarray,
y: np.ndarray,
z: np.ndarray,
cellData: DataType = None,
pointData: DataType = None,
fieldData: DataType = None,
**kwargs
) -> str:
"""Write data to a RectilinearGrid.
:param path: path to file without extension
:param x: 1d array of x coordinates
:param y: 1d array of y coordinates
:param z: 1d array of z coordinates
:param cellData: dict of data defined on cells
:param pointData: dict of data defined on points
:param fieldData: dict of data with arbitrary support
:param **kwargs: passed on to uvw.Writer (e.g. compression=True)
:returns: full path of created file
"""
filename = path + '.vtr'
with vtk.RectilinearGrid(filename, (x, y, z), **kwargs) as fh:
_add_data(cellData, pointData, fieldData, fh)
return filename
def structuredToVTK(
path: str,
x: np.ndarray,
y: np.ndarray,
z: np.ndarray,
cellData: DataType = None,
pointData: DataType = None,
fieldData: DataType = None,
**kwargs
) -> str:
"""Write data to a RectilinearGrid.
:param path: path to file without extension
:param x: 3d array of x coordinates
:param y: 3d array of y coordinates
:param z: 3d array of z coordinates
:param cellData: dict of data defined on cells
:param pointData: dict of data defined on points
:param fieldData: dict of data with arbitrary support
:param **kwargs: passed on to uvw.Writer (e.g. compression=True)
:returns: full path of created file
"""
filename = path + '.vts'
points = np.vstack([x.ravel(), y.ravel(), z.ravel()]).T
shape = [x.shape[0], y.shape[0], z.shape[0]]
with vtk.StructuredGrid(filename, points, shape, **kwargs) as fh:
_add_data(cellData, pointData, fieldData, fh)
return filename
def gridToVTK(
path: str,
x: np.ndarray,
y: np.ndarray,
z: np.ndarray,
cellData: DataType = None,
pointData: DataType = None,
fieldData: DataType = None,
**kwargs
) -> str:
"""
Write data to either a RectilinearGrid or StructuredGrid file.
:param path: path to file without extension
:param x: {1,3}-d array of x coordinates
:param y: {1,3}-d array of y coordinates
:param z: {1,3}-d array of z coordinates
:param cellData: dict of data defined on cells
:param pointData: dict of data defined on points
:param fieldData: dict of data with arbitrary support
:param **kwargs: passed on to uvw.Writer (e.g. compression=True)
:returns: full path of created file
Notes:
- file type is infered from dimension of coordinate arrays:
- 1D -> RectilinearGrid
- 3D -> StructuredGrid
3D grid coordinates can be obtained with numpy.meshgrid
"""
dims = [x.ndim, y.ndim, z.ndim]
if dims == [1] * 3:
return rectilinearToVTK(path, x, y, z,
cellData, pointData, fieldData, **kwargs)
if dims == [3] * 3:
return structuredToVTK(path, x, y, z,
cellData, pointData, fieldData, **kwargs)
raise ValueError(f"Wrong dimensions for x, y, z arrays: {dims}")
def pointsToVTK(
path: str,
x: np.ndarray,
y: np.ndarray,
z: np.ndarray,
data: DataType = None,
fieldData: DataType = None,
**kwargs
) -> str:
"""
Write point set to UnstructuredGrid file.
:param path: path to file without extension
:param x: 1d array of x coordinates
:param y: 1d array of y coordinates
:param z: 1d array of z coordinates
:param data: dict of data defined on points
:param fieldData: dict of data with arbitrary support
:param **kwargs: passed on to uvw.Writer (e.g. compression=True)
:returns: full path of created file
"""
filename = path + '.vts'
coords = np.vstack([x.ravel(), y.ravel(), z.ravel()]).T
connectivity = {1: np.arange(coords.shape[0], dtype=np.int)}
with vtk.UnstructuredGrid(filename, coords, connectivity, **kwargs) as fh:
_add_data(None, data, fieldData, fh, [0])
return filename
def linesToVTK(
path: str,
x: np.ndarray,
y: np.ndarray,
z: np.ndarray,
cellData: DataType = None,
pointData: DataType = None,
fieldData: DataType = None,
**kwargs
) -> str:
"""
Write segment set to an UnstructuredGrid file.
:param path: path to file without extension
:param x: {1,3}-d array of x coordinates
:param y: {1,3}-d array of y coordinates
:param z: {1,3}-d array of z coordinates
:param cellData: dict of data defined on cells
:param pointData: dict of data defined on points
:param fieldData: dict of data with arbitrary support
:param **kwargs: passed on to uvw.Writer (e.g. compression=True)
:returns: full path of created file
Notes:
- number of points should be even
- in the final file, no point will appear twice in connectivity
(i.e. the lines are all disconected)
The above requirements are from pyevtk and are not restrictions of the file
format. Use uvw.UnstructuredGrid for more flexibility.
"""
points = np.vstack([x.ravel(), y.ravel(), z.ravel()]).T
if points.shape[0] % 2 != 2:
raise ValueError('Number of points should be even')
connectivity = {
3: np.arange(points.shape[0], dtype=np.int).reshape(points.shape[0], 2)
}
filename = path + '.vtu'
with vtk.UnstructuredGrid(filename, points, connectivity, **kwargs) as fh:
_add_data(cellData, pointData, fieldData, fh)
return filename
def polyLinesToVTK(
path: str,
x: np.ndarray,
y: np.ndarray,
z: np.ndarray,
pointsPerLine: np.ndarray,
cellData: DataType = None,
pointData: DataType = None,
fieldData: DataType = None,
**kwargs
) -> str:
"""
Write segmented lines set to an UnstructuredGrid file.
:param path: path to file without extension
:param x: {1,3}-d array of x coordinates
:param y: {1,3}-d array of y coordinates
:param z: {1,3}-d array of z coordinates
:param pointsPerLine: number of points per segmented line
:param cellData: dict of data defined on cells
:param pointData: dict of data defined on points
:param fieldData: dict of data with arbitrary support
:param **kwargs: passed on to uvw.Writer (e.g. compression=True)
:returns: full path of created file
"""
points = np.vstack([x.ravel(), y.ravel(), z.ravel()]).T
start_ids = np.add.accumulate(pointsPerLine, dtype=np.int)
connectivity = {4: np.array([
np.arange(N, dtype=np.int) + s
for N, s in zip(pointsPerLine, start_ids)
])}
filename = path + '.vtu'
with vtk.UnstructuredGrid(filename, points, connectivity, **kwargs) as fh:
_add_data(cellData, pointData, fieldData, fh)
return filename
def unstructuredGridToVTK(
path: str,
x: np.ndarray,
y: np.ndarray,
z: np.ndarray,
connectivity: np.ndarray,
offsets: np.ndarray,
cell_types: np.ndarray,
cellData: DataType = None,
pointData: DataType = None,
fieldData: DataType = None,
**kwargs
) -> str:
"""Write UnstructuredGrid file.
:param path: path to file without extension
:param x: 1d array of x coordinates
:param y: 1d array of y coordinates
:param z: 1d array of z coordinates
:param connectivity: 1d array of nodes for each element
:param offsets: 1d array giving the position in connectivity of the first
element of each cell
:param cell_types: 1d array giving the type of each cell
:param cellData: dict of data defined on cells
:param pointData: dict of data defined on points
:param fieldData: dict of data with arbitrary support
:param **kwargs: passed on to uvw.Writer (e.g. compression=True)
:returns: full path of created file
Note: this implementation does not use uvw.UnstructuredGrid because the
function expects connectivity data in a format that uvw.UnstructuredGrid
already converts to.
"""
filename = path + '.vtu'
points = np.vstack([x.ravel(), y.ravel(), z.ravel()]).T
ncells = cell_types.size
npoints = points.shape[0]
with vtk.VTKFile(filename, 'UnstructuredGrid', **kwargs) as fh:
fh.piece.settAttributes({
'NumberOfPoints': str(npoints),
'NumberOfCells': str(ncells),
})
fh.piece.register('Points').registerDataArray(
DataArray(points, [0], 'points'),
)
cells_component = fh.register('Cells')
connectivity_data = {
"connectivity": connectivity,
"offsets": offsets,
"types": cell_types,
}
for label, array in connectivity_data.items():
cells_component.registerDataArray(
DataArray(array, [0], label),
)
_add_data(cellData, pointData, fieldData, fh)
def cylinderToVTK(
path: str,
x0: float,
y0: float,
z0: float,
z1: float,
radius: float,
nlayers: int,
npilars: int = 16,
cellData: DataType = None,
pointData: DataType = None,
fieldData: DataType = None,
**kwargs
):
"""Not yet implemented."""
raise NotImplementedError()
|
prs513rosewood/uvw
|
uvw/dropin/hl.py
|
Python
|
mit
| 14,613
|
[
"VTK"
] |
98d922fb6c80f110e58c06cb4880876245e0c30e0bb6e83144f2966bd7cc009c
|
# pre_NAMD.py
# Creates the files used for NAMD based on the .pdb file dowloaded from PDB bank
#
# Usage:
# python pre_NAMD.py $PDBID
#
# $PDBID=the 4 characters identification code of the .pdb file
#
# Input:
# $PDBID.pdb: .pdb file downloaded from PDB bank
#
# Output:
# $PDBID_p.pdb: .pdb file with water molecules removed
# $PDBID_p_h.pdb: .pdb file with water removed and hydrogen atoms added
# $PDBID_p_h.psf: .psf file of $PDBID_p_h.pdb
# $PDBID_p_h.log: Log file of adding hydrogen atoms
# $PDBID_wb.pdb: .pdb file of the water box model
# $PDBID_wb.psf: .psf file of $PDBID_wb.pdb
# $PDBID_wb.log: Log file of the water box model generation
# $PDBID_wb_i.pdb: .pdb file of the ionized water box model (For NAMD)
# $PDBID_wb_i.psf: .psf file of PDBID_wb_i.pdb (For NAMD)
# $PDBID.log: Log file of the whole process (output of VMD)
# $PDBID_center.txt: File contains the grid and center information of
# the ionized water box model
#
# Author: Xiaofei Zhang
# Date: June 20 2016
from __future__ import print_function
import sys, os
def print_error(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
# main
if len(sys.argv) != 2:
print_error("Usage: python pre_NAMD.py $PDBID")
sys.exit(-1)
mypath = os.path.realpath(__file__)
tclpath = os.path.split(mypath)[0] + os.path.sep + 'tcl' + os.path.sep
pdbid = sys.argv[1]
logfile = pdbid+'.log'
# Using the right path of VMD
vmd = "/Volumes/VMD-1.9.2/VMD 1.9.2.app/Contents/vmd/vmd_MACOSXX86"
print("Input: "+pdbid+".pdb")
# Create .psf
print("Create PSF file...")
cmdline = '\"'+ vmd + '\"' +' -dispdev text -eofexit < '+ tclpath + 'pdb_to_psf.tcl' + ' ' + '-args' + ' '+ pdbid +'>> '+ logfile
os.system(cmdline)
|
Xiaofei-Zhang/NAMD_Docking_pipeline
|
pre_NAMD/pdb_to_psf.py
|
Python
|
mit
| 1,736
|
[
"NAMD",
"VMD"
] |
c977c9765f1dbd46182ea3da0f85312d7f02529433c080b10472d2ac4229cabb
|
# Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest as ut
import unittest_decorators as utx
import numpy as np
import espressomd
import tests_common
@utx.skipIfMissingFeatures(["MASS", "ROTATIONAL_INERTIA"])
class RotationalInertia(ut.TestCase):
longMessage = True
# Handle for espresso system
system = espressomd.System(box_l=[1.0, 1.0, 1.0])
system.cell_system.skin = 0
# Particle's angular momentum: initial and ongoing
L_0_lab = np.zeros((3))
L_lab = np.zeros((3))
# Angular momentum
def L_body(self, part):
return self.system.part[part].omega_body[:] * \
self.system.part[part].rinertia[:]
# Set the angular momentum
def set_L_0(self, part):
L_0_body = self.L_body(part)
self.L_0_lab = tests_common.convert_vec_body_to_space(
self.system, part, L_0_body)
def set_L(self, part):
L_body = self.L_body(part)
self.L_lab = tests_common.convert_vec_body_to_space(
self.system, part, L_body)
def test_stability(self):
self.system.part.clear()
self.system.part.add(
pos=np.array([0.0, 0.0, 0.0]), id=0, rotation=(1, 1, 1))
# Inertial motion around the stable and unstable axes
tol = 4E-3
# Anisotropic inertial moment. Stable axes correspond to J[1] and J[2].
# The unstable axis corresponds to J[0]. These values relation is J[1]
# < J[0] < J[2].
J = np.array([5, 0.5, 18.5])
self.system.part[0].rinertia = J[:]
# Validation of J[1] stability
# ----------------------------
self.system.time_step = 0.0006
# Stable omega component should be larger than other components.
stable_omega = 57.65
self.system.part[0].omega_body = np.array([0.15, stable_omega, -0.043])
self.set_L_0(0)
for i in range(100):
self.set_L(0)
for k in range(3):
self.assertAlmostEqual(
self.L_lab[k], self.L_0_lab[k], delta=tol,
msg='Inertial motion around stable axis J1: Deviation in '
'angular momentum is too large. Step {0}, coordinate '
'{1}, expected {2}, got {3}'.format(
i, k, self.L_0_lab[k], self.L_lab[k]))
self.assertAlmostEqual(
self.system.part[0].omega_body[1], stable_omega, delta=tol,
msg='Inertial motion around stable axis J1: Deviation in omega '
'is too large. Step {0}, coordinate 1, expected {1}, got {2}'
.format(i, stable_omega, self.system.part[0].omega_body[1]))
self.system.integrator.run(10)
# Validation of J[2] stability
# ----------------------------
self.system.time_step = 0.01
# Stable omega component should be larger than other components.
stable_omega = 3.2
self.system.part[0].omega_body = np.array(
[0.011, -0.043, stable_omega])
self.set_L_0(0)
for i in range(100):
self.set_L(0)
for k in range(3):
self.assertAlmostEqual(
self.L_lab[k], self.L_0_lab[k], delta=tol,
msg='Inertial motion around stable axis J2: Deviation in '
'angular momentum is too large. Step {0}, coordinate '
'{1}, expected {2}, got {3}'.format(
i, k, self.L_0_lab[k], self.L_lab[k]))
self.assertAlmostEqual(
self.system.part[0].omega_body[2], stable_omega, delta=tol,
msg='Inertial motion around stable axis J2: Deviation in omega '
'is too large. Step {0}, coordinate 2, expected {1}, got {2}'
.format(i, stable_omega, self.system.part[0].omega_body[2]))
self.system.integrator.run(10)
# Validation of J[0]
# ------------------
self.system.time_step = 0.001
# Unstable omega component should be larger than other components.
unstable_omega = 5.76
self.system.part[0].omega_body = np.array(
[unstable_omega, -0.043, 0.15])
self.set_L_0(0)
for i in range(100):
self.set_L(0)
for k in range(3):
self.assertAlmostEqual(
self.L_lab[k], self.L_0_lab[k], delta=tol,
msg='Inertial motion around stable axis J0: Deviation in '
'angular momentum is too large. Step {0}, coordinate '
'{1}, expected {2}, got {3}'.format(
i, k, self.L_0_lab[k], self.L_lab[k]))
self.system.integrator.run(10)
def energy(self, p):
return 0.5 * np.dot(p.rinertia, p.omega_body**2)
def momentum(self, p):
return np.linalg.norm(p.rinertia * p.omega_body)
def test_energy_and_momentum_conservation(self):
system = self.system
system.part.clear()
system.thermostat.turn_off()
p = system.part.add(pos=(0, 0, 0), rinertia=(1.1, 1.3, 1.5),
rotation=(1, 1, 1), omega_body=(2, 1, 4))
E0 = self.energy(p)
m0 = self.momentum(p)
system.time_step = 0.001
for _ in range(1000):
system.integrator.run(100)
self.assertAlmostEqual(self.energy(p), E0, places=3)
self.assertAlmostEqual(self.momentum(p), m0, places=3)
if __name__ == '__main__':
ut.main()
|
KaiSzuttor/espresso
|
testsuite/python/rotational_inertia.py
|
Python
|
gpl-3.0
| 6,230
|
[
"ESPResSo"
] |
0aa41c27f6a325752b37e7b512f18e0002ab57bfcf39a3727c6a7a6ab3200d1f
|
"""Import tasks for GAIA."""
import csv
import os
import re
from astrocats.catalog.utils import jd_to_mjd, pbar
from decimal import Decimal
from ..supernova import SUPERNOVA
def do_gaia(catalog):
"""Import from the GAIA alerts page."""
task_str = catalog.get_current_task_str()
fname = os.path.join(catalog.get_current_task_repo(), 'GAIA/alerts.csv')
csvtxt = catalog.load_url('http://gsaweb.ast.cam.ac.uk/alerts/alerts.csv',
fname)
if not csvtxt:
return
tsvin = list(
csv.reader(
csvtxt.splitlines(), delimiter=',', skipinitialspace=True))
reference = 'Gaia Photometric Science Alerts'
refurl = 'http://gsaweb.ast.cam.ac.uk/alerts/alertsindex'
loopcnt = 0
for ri, row in enumerate(pbar(tsvin, task_str)):
if ri == 0 or not row:
continue
name = catalog.add_entry(row[0])
source = catalog.entries[name].add_source(name=reference, url=refurl)
catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)
year = '20' + re.findall(r'\d+', row[0])[0]
catalog.entries[name].add_quantity(SUPERNOVA.DISCOVER_DATE, year,
source)
catalog.entries[name].add_quantity(
SUPERNOVA.RA, row[2], source, u_value='floatdegrees')
catalog.entries[name].add_quantity(
SUPERNOVA.DEC, row[3], source, u_value='floatdegrees')
if row[7] and row[7] != 'unknown':
type = row[7].replace('SNe', '').replace('SN', '').strip()
catalog.entries[name].add_quantity(SUPERNOVA.CLAIMED_TYPE, type,
source)
elif any([
xx in row[9].upper()
for xx in ['SN CANDIATE', 'CANDIDATE SN', 'HOSTLESS SN']
]):
catalog.entries[name].add_quantity(SUPERNOVA.CLAIMED_TYPE,
'Candidate', source)
if ('aka' in row[9].replace('gakaxy', 'galaxy').lower() and
'AKARI' not in row[9]):
commentsplit = (row[9].replace('_', ' ').replace('MLS ', 'MLS')
.replace('CSS ', 'CSS').replace('SN iPTF', 'iPTF')
.replace('SN ', 'SN').replace('AT ', 'AT'))
commentsplit = commentsplit.split()
for csi, cs in enumerate(commentsplit):
if 'aka' in cs.lower() and csi < len(commentsplit) - 1:
alias = commentsplit[csi + 1].strip('(),:.').replace(
'PSNJ', 'PSN J')
if alias[:6] == 'ASASSN' and alias[6] != '-':
alias = 'ASASSN-' + alias[6:]
if alias.lower() != 'master':
catalog.entries[name].add_quantity(SUPERNOVA.ALIAS,
alias, source)
break
fname = os.path.join(catalog.get_current_task_repo(),
'GAIA/') + row[0] + '.csv'
csvtxt = catalog.load_url('http://gsaweb.ast.cam.ac.uk/alerts/alert/' +
row[0] + '/lightcurve.csv', fname)
if csvtxt:
tsvin2 = csv.reader(csvtxt.splitlines())
for ri2, row2 in enumerate(tsvin2):
if ri2 <= 1 or not row2:
continue
mjd = str(jd_to_mjd(Decimal(row2[1].strip())))
magnitude = row2[2].strip()
if magnitude == 'null':
continue
e_mag = 0.
telescope = 'GAIA'
band = 'G'
catalog.entries[name].add_photometry(
time=mjd,
u_time='MJD',
telescope=telescope,
band=band,
magnitude=magnitude,
e_magnitude=e_mag,
source=source)
if catalog.args.update:
catalog.journal_entries()
loopcnt = loopcnt + 1
if catalog.args.travis and loopcnt % catalog.TRAVIS_QUERY_LIMIT == 0:
break
catalog.journal_entries()
return
|
astrocatalogs/supernovae
|
tasks/gaia.py
|
Python
|
mit
| 4,220
|
[
"Galaxy"
] |
2e5355c12ee576bba2717f48ddcd071e9d2dd654291a58d0757f9254bd3d0b4c
|
# -*- coding: utf-8 -*-
"""
Generate centreline and write it out as .vtk legacy format.
"""
import os
import sys
# Run in current directory.
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# Import path for the CentrelineGenerator script.
importPath = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../util'))
if not importPath in sys.path:
sys.path.insert(1, importPath)
del importPath
import CentrelineGenerator
# A centreline for a mesh with 4000 cores.
CentrelineGenerator.segmentList = [13.0, None, None]
CentrelineGenerator.radiusBase = 1.2732395447351628
CentrelineGenerator.outputFileName = "c512Centreline.vtk"
CentrelineGenerator.sphereRadius = None
def main():
# CentrelineGenerator.GenerateCentreline(CentrelineGenerator.BuildDecreasingRadiiScalars)
CentrelineGenerator.GenerateCentreline()
if __name__ == '__main__':
print "Starting", os.path.basename(__file__)
main()
print "Exiting", os.path.basename(__file__)
|
BlueFern/DBiharMesher
|
meshes/c512/Generate512Centreline.py
|
Python
|
gpl-2.0
| 975
|
[
"VTK"
] |
52781b8815741cc74ecd13a78964de20d1a7e0b2737160d42c8cd4362baa36fe
|
""" Variable Unittest. """
import cwt, os
# datainputs = """[domain=[{"name":"d0","lat":{"start":70,"end":90,"system":"values"},"lon":{"start":5,"end":45,"system":"values"}}],
# variable=[{"uri":"file:///dass/nobackup/tpmaxwel/.edas/cache/collections/NCML/MERRA_TAS1hr.ncml","name":"tas:v1","domain":"d0"}],
# operation=[{"name":"CDSpark.average","input":"v1","domain":"d0","axes":"xy"}]]"""
class DemoWorkflow:
def run( self ):
d0 = cwt.Domain([], name="d0")
op1 = cwt.Operation.from_dict( { 'name': "CDSpark.multiAverage" } )
op1.add_input( cwt.Variable("file:///dass/nobackup/tpmaxwel/.edas/cache/collections/NCML/MERRA_TAS1hr.ncml", "tas" ) )
op3 = cwt.Operation.from_dict( { 'name': 'CDSpark.regrid', 'crs':'gaussian~128' } )
op3.add_input( op1 )
op2 = cwt.Operation.from_dict( { 'name': "CDSpark.multiAverage" } )
for i in range(1,3): op2.add_input( cwt.Variable('collection:/GISS-E2-R_r%di1p1'%(i), "tas" ) )
op4 = cwt.Operation.from_dict( { 'name': 'CDSpark.regrid', 'crs':'gaussian~128' } )
op4.add_input( op2 )
op5 = cwt.Operation.from_dict( { 'name': 'CDSpark.multiAverage' } )
op5.add_input( op3 )
op5.add_input( op4 )
wps = cwt.WPS( 'http://localhost:9001/wps', log=True, log_file=os.path.expanduser("~/esgf_api.log") )
wps.init()
process = cwt.Process( wps, op5 )
process.execute( None, d0, [], True, True, "GET" )
executor = DemoWorkflow()
executor.run()
|
nasa-nccs-cds/EDAS
|
python/test/localPerformanceTests/DASS_CWTPythonClientTest.py
|
Python
|
gpl-2.0
| 1,508
|
[
"Gaussian"
] |
f1c72a0928eb9e6f7ed0cd2c69b0967f2f44f753360efc1f123629b5733c3044
|
"""
A number of argparse helpers, generally using custom argument typing for
additional input value transformations.
"""
import argparse
import re
import sys
from typing import List, Tuple, Union
def add_standard_params(parser: argparse.ArgumentParser) -> None:
"""
Adds -v, -p and -t.
"""
parser.add_argument("-v", "--verbose",
dest="verbose",
help="Print debug info to stdout",
action='store_true')
parser.add_argument("-p", "--post-to-github",
help="Post all issues to GitHub instead of stdout",
action="store_true")
parser.add_argument("-t", "--time-log-dir",
help="If set, time logs are written to that directory",
action="store", default=None)
def add_event_param(
parser: argparse.ArgumentParser,
dest: str = "event",
required: bool = False,
template: str = "{}y visit",
accepted_regex: str = r'^\d+$',
keep_nonmatch: bool = False) -> None:
"""
Handles and transforms arbitrarily entered events.
"""
nargs = '+' if required else '*'
def __event_handler(arg: str) -> Union[str, None]:
if re.match(accepted_regex, arg):
return template.format(arg)
elif keep_nonmatch:
return arg
else:
return None
parser.add_argument("-e", "--event",
help=("Event name matching regex {}, before "
"transformation to {}").format(accepted_regex,
template),
nargs=nargs,
required=required,
type=__event_handler)
def add_subject_param(parser: argparse.ArgumentParser,
dest="subject",
required: bool = False,
choices: List[str] = None) -> None:
"""
Add parameter for subject ID entry.
"""
nargs = '+' if required else '*'
parser.add_argument("-s", "--study-id", "--subject",
help="Subject IDs that the script should affect",
nargs=nargs,
dest=dest,
choices=choices,
# metavar="STUDY_ID"
required=required,
action="store")
def add_form_param(parser: argparse.ArgumentParser,
dest='forms',
required: bool = False,
eligible_forms: List[Tuple[str]] = [],
raise_missing: bool = True,
short_switch="-i") -> None:
"""
Add the forms parameter, with checking for form aliases.
eligible_forms: a list of tuples, where the first element in the tuple is
the canonical name of the form, and the remaining are allowable names
for the form
short_switch: the "short" option - `-i` by default (short for instruments),
but some CLIs use -f to mean --forms instead of --force.
"""
# _eligible_forms = []
# for x in eligible_forms:
# if isinstance(x, string_types):
# _eligible_forms.append((x))
# else:
# _eligible_forms.append(x)
def __form_handler(arg):
if len(eligible_forms) == 0:
return arg
forms_found = [x for x in eligible_forms if arg in x]
if len(forms_found) == 1:
return forms_found[0][0]
elif not raise_missing:
return None
else:
if len(forms_found) == 0:
raise ValueError("{} not found in eligible forms {}"
.format(arg, eligible_forms))
elif len(forms_found) > 1:
raise ValueError("Ambiguous {}, found in following forms: {}"
.format(arg, forms_found))
nargs = '+' if required else '*'
parser.add_argument(short_switch, '--forms', '--form', '--instrument',
help="Forms that the script should affect",
dest=dest,
nargs=nargs,
required=required,
type=__form_handler)
def transform_commas_in_args(arg_list: List[str] = sys.argv[1:]) -> List[str]:
"""
Essentially, turn comma splits into spaces.
Limited, in that it splices *any* commas, which might be actually valid
filename parts etc. Ideally, this would be part of an argparse.Action.
"""
_arg_list = []
for x in arg_list:
if "," in x:
_arg_list.extend(x.split(","))
else:
_arg_list.append(x)
return _arg_list
# if __name__ == '__main__':
# args = transform_commas_in_args(sys.argv[1:])
# parser = argparse.ArgumentParser()
# add_event_param(parser, template="visit_{}")
# add_subject_param(parser)
# add_form_param(parser, eligible_forms=[
# ('limesurvey_ssaga_youth', 'lssaga_youth', 'lssaga1_youth')
# ])
# add_standard_params(parser)
# print(parser.parse_args(args))
|
sibis-platform/sibis
|
cli.py
|
Python
|
bsd-3-clause
| 5,215
|
[
"VisIt"
] |
9ebdb37aa2a78480b289f381b90012d1f345e748136bdf884185ba1cdb1b3c47
|
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2021, Shuup Commerce Inc. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
import os
import pytest
import time
from django.test.utils import override_settings
from django.utils.translation import activate, get_language
from shuup.testing import factories
from shuup.testing.browser_utils import (
click_element,
initialize_admin_browser_test,
page_has_loaded,
wait_until_appeared,
wait_until_condition,
)
pytestmark = pytest.mark.skipif(os.environ.get("SHUUP_BROWSER_TESTS", "0") != "1", reason="No browser tests run.")
@pytest.mark.parametrize("default_language", ["it", "pt-br", "fi"])
@pytest.mark.django_db
@pytest.mark.skipif(os.environ.get("SHUUP_TESTS_CI", "0") == "1", reason="Disable when run in CI.")
def test_xtheme_plugin_form_language_order(admin_user, browser, live_server, settings, default_language):
"""
Test that the first language option is the Parler default
As you can see, we check for that the page has loaded and we use a sleep of 1 second.
This is necessary specially into iframes. On this test, when we click to add a new plugin row
or after a row selection, the iframe content is changed through a request,
like a internal link when user clicks on a anchor. We have to make sure the NEW content is loaded
before doing any element check, because it looks like the iframe won't find the correct elements
if you start checking that before the new content gets loaded.
"""
with override_settings(PARLER_DEFAULT_LANGUAGE_CODE=default_language):
browser = initialize_admin_browser_test(browser, live_server, settings)
browser.visit(live_server + "/")
# Start edit
wait_until_condition(browser, lambda x: page_has_loaded(x), timeout=20)
wait_until_appeared(browser, ".xt-edit-toggle button[type='submit']")
click_element(browser, ".xt-edit-toggle button[type='submit']")
placeholder_selector = "#xt-ph-front_content-xtheme-person-contact-layout"
placeholder_name = "front_content"
wait_until_condition(browser, lambda x: x.is_element_present_by_css(placeholder_selector))
click_element(browser, placeholder_selector)
with browser.get_iframe("xt-edit-sidebar-iframe") as iframe:
# make sure all scripts are loaded
wait_until_condition(iframe, lambda x: page_has_loaded(x), timeout=20)
wait_until_condition(iframe, lambda x: x.is_text_present("Edit Placeholder: %s" % placeholder_name))
wait_until_appeared(iframe, "button.layout-add-row-btn")
time.sleep(1)
wait_until_condition(iframe, lambda x: page_has_loaded(x), timeout=20)
# click to add a new row
click_element(iframe, "button.layout-add-row-btn")
time.sleep(1)
wait_until_condition(iframe, lambda x: page_has_loaded(x), timeout=20)
# select the last row (the added one)
click_element(iframe, "button.layout-add-row-btn")
iframe.find_by_css("div.layout-cell").last.click()
time.sleep(1)
wait_until_condition(iframe, lambda x: page_has_loaded(x), timeout=20)
# select the TextPlugin
wait_until_appeared(iframe, "select[name='general-plugin']")
click_element(iframe, "#select2-id_general-plugin-container")
wait_until_appeared(iframe, "input.select2-search__field")
iframe.find_by_css("input.select2-search__field").first.value = "Text"
wait_until_appeared(browser, ".select2-results__option:not([aria-live='assertive'])")
iframe.execute_script('$($(".select2-results__option")[1]).trigger({type: "mouseup"})')
time.sleep(1)
wait_until_condition(iframe, lambda x: page_has_loaded(x), timeout=20)
wait_until_appeared(iframe, "ul.editor-tabs")
# check the languages order
languages = [el.text for el in iframe.find_by_css("ul.editor-tabs li a")]
assert languages[0] == default_language
@pytest.mark.parametrize("language", ["it", "pt-br", "fi", "en"])
@pytest.mark.django_db
@pytest.mark.skipif(os.environ.get("SHUUP_TESTS_CI", "0") == "1", reason="Disable when run in CI.")
def test_xtheme_plugin_form_selected_language_pane(admin_user, browser, live_server, settings, language):
"""
Test that the current language is selected by default
"""
browser = initialize_admin_browser_test(browser, live_server, settings, language=language)
browser.visit(live_server + "/")
# Start edit
wait_until_condition(browser, lambda x: page_has_loaded(x), timeout=20)
wait_until_appeared(browser, ".xt-edit-toggle button[type='submit']")
click_element(browser, ".xt-edit-toggle button[type='submit']")
placeholder_selector = "#xt-ph-front_content-xtheme-person-contact-layout"
wait_until_condition(browser, lambda x: x.is_element_present_by_css(placeholder_selector))
click_element(browser, placeholder_selector)
with browser.get_iframe("xt-edit-sidebar-iframe") as iframe:
# make sure all scripts are loaded
wait_until_condition(iframe, lambda x: page_has_loaded(x), timeout=20)
wait_until_condition(iframe, lambda x: x.is_text_present("front_content"))
wait_until_appeared(iframe, "button.layout-add-row-btn")
time.sleep(1)
wait_until_condition(iframe, lambda x: page_has_loaded(x), timeout=20)
# click to add a new row
click_element(iframe, "button.layout-add-row-btn")
time.sleep(1)
wait_until_condition(iframe, lambda x: page_has_loaded(x), timeout=20)
# select the last row (the added one)
click_element(iframe, "button.layout-add-row-btn")
iframe.find_by_css("div.layout-cell").last.click()
time.sleep(1)
wait_until_condition(iframe, lambda x: page_has_loaded(x), timeout=20)
# select the TextPlugin
wait_until_appeared(iframe, "select[name='general-plugin']")
click_element(iframe, "#select2-id_general-plugin-container")
wait_until_appeared(iframe, "input.select2-search__field")
iframe.find_by_css("input.select2-search__field").first.value = "Text"
wait_until_appeared(browser, ".select2-results__option:not([aria-live='assertive'])")
iframe.execute_script('$($(".select2-results__option")[1]).trigger({type: "mouseup"})')
time.sleep(1)
wait_until_condition(iframe, lambda x: page_has_loaded(x), timeout=20)
wait_until_appeared(iframe, "ul.editor-tabs")
# check the active language
assert language == iframe.find_by_css("ul.editor-tabs li.active a").first.text
@pytest.mark.django_db
@pytest.mark.skipif(os.environ.get("SHUUP_TESTS_CI", "0") == "1", reason="Disable when run in CI.")
def test_xtheme_editor_form_picture(admin_user, browser, live_server, settings):
"""
Test that is is possible to add image fron media browser
"""
browser = initialize_admin_browser_test(browser, live_server, settings)
browser.visit(live_server + "/")
wait_until_condition(browser, lambda x: page_has_loaded(x), timeout=20)
wait_until_appeared(browser, ".xt-edit-toggle button[type='submit']")
click_element(browser, ".xt-edit-toggle button[type='submit']")
placeholder_selector = "#xt-ph-front_content-xtheme-person-contact-layout"
wait_until_condition(browser, lambda x: x.is_element_present_by_css(placeholder_selector))
click_element(browser, placeholder_selector)
with browser.get_iframe("xt-edit-sidebar-iframe") as iframe:
# make sure all scripts are loaded
wait_until_condition(iframe, lambda x: page_has_loaded(x), timeout=20)
wait_until_condition(iframe, lambda x: x.is_text_present("front_content"))
wait_until_appeared(iframe, "button.layout-add-row-btn")
time.sleep(1)
wait_until_condition(iframe, lambda x: page_has_loaded(x), timeout=20)
# click to add a new row
click_element(iframe, "button.layout-add-row-btn")
time.sleep(1)
wait_until_condition(iframe, lambda x: page_has_loaded(x), timeout=20)
# select the last row (the added one)
click_element(iframe, "button.layout-add-row-btn")
iframe.find_by_css("div.layout-cell").last.click()
time.sleep(1)
wait_until_condition(iframe, lambda x: page_has_loaded(x), timeout=20)
# select the TextPlugin
wait_until_appeared(iframe, "select[name='general-plugin']")
iframe.select("general-plugin", "text")
time.sleep(1)
wait_until_condition(iframe, lambda x: page_has_loaded(x), timeout=20)
wait_until_appeared(iframe, "ul.editor-tabs")
filer_image = factories.get_random_filer_image()
wait_until_appeared(browser, "#id_plugin-text_en-editor-wrap button[aria-label='Picture']")
click_element(browser, "#id_plugin-text_en-editor-wrap button[aria-label='Picture']")
wait_until_condition(browser, lambda b: len(b.windows) == 2, timeout=20)
# change to the media browser window
browser.windows.current = browser.windows[1]
# click to select the picture
wait_until_appeared(browser, "a.file-preview")
browser.find_by_css("a.file-preview").first.click()
# back to the main window
wait_until_condition(browser, lambda b: len(b.windows) == 1)
browser.windows.current = browser.windows[0]
# make sure the image was added to the editor
wait_until_appeared(
browser, "#id_plugin-text_en-editor-wrap .note-editable img[src='%s']" % filer_image.url, timeout=20
)
|
shoopio/shoop
|
shuup_tests/browser/front/test_xtheme_plugin_form.py
|
Python
|
agpl-3.0
| 9,875
|
[
"VisIt"
] |
2a936da4407b944ce36724b68f59ce1bc84103c4f30dc969b698ff1f59d2b725
|
import sys
sys.path.insert(1, "../../")
import h2o
def get_model_test(ip,port):
# Connect to h2o
h2o.init(ip,port)
prostate = h2o.import_frame(path=h2o.locate("smalldata/logreg/prostate.csv"))
r = prostate[0].runif()
train = prostate[r < 0.70]
test = prostate[r >= 0.70]
# Regression
regression_gbm1 = h2o.gbm(y=train[1], x=train[2:9], distribution="gaussian")
predictions1 = regression_gbm1.predict(test)
regression_gbm2 = h2o.get_model(regression_gbm1._id)
assert regression_gbm2._model_json['output']['model_category'] == "Regression"
predictions2 = regression_gbm2.predict(test)
for r in range(predictions1.nrow()):
p1 = predictions1[r,0]
p2 = predictions2[r,0]
assert p1 == p2, "expected regression predictions to be the same for row {}, but got {} and {}".format(r, p1, p2)
# Binomial
train[1] = train[1].asfactor()
bernoulli_gbm1 = h2o.gbm(y=train[1], x=train[2:], distribution="bernoulli")
predictions1 = bernoulli_gbm1.predict(test)
bernoulli_gbm2 = h2o.get_model(bernoulli_gbm1._id)
assert bernoulli_gbm2._model_json['output']['model_category'] == "Binomial"
predictions2 = bernoulli_gbm2.predict(test)
for r in range(predictions1.nrow()):
p1 = predictions1[r,0]
p2 = predictions2[r,0]
assert p1 == p2, "expected binomial predictions to be the same for row {}, but got {} and {}".format(r, p1, p2)
# Clustering
benign_h2o = h2o.import_frame(path=h2o.locate("smalldata/logreg/benign.csv"))
km_h2o = h2o.kmeans(x=benign_h2o, k=3)
benign_km = h2o.get_model(km_h2o._id)
assert benign_km._model_json['output']['model_category'] == "Clustering"
# Multinomial
train[4] = train[4].asfactor()
multinomial_dl1 = h2o.deeplearning(x=train[0:2], y=train[4], loss='CrossEntropy')
predictions1 = multinomial_dl1.predict(test)
multinomial_dl2 = h2o.get_model(multinomial_dl1._id)
assert multinomial_dl2._model_json['output']['model_category'] == "Multinomial"
predictions2 = multinomial_dl2.predict(test)
for r in range(predictions1.nrow()):
p1 = predictions1[r,0]
p2 = predictions2[r,0]
assert p1 == p2, "expected multinomial predictions to be the same for row {0}, but got {1} and {2}" \
"".format(r, p1, p2)
if __name__ == "__main__":
h2o.run_test(sys.argv, get_model_test)
|
ChristosChristofidis/h2o-3
|
h2o-py/tests/testdir_misc/pyunit_get_model.py
|
Python
|
apache-2.0
| 2,421
|
[
"Gaussian"
] |
8e188de430d0e61163cbda5a0933fc0264fb78d3d6de45fc2e10d910f165c132
|
"""This demo solves the Stokes equations, using quadratic elements for
the velocity and first degree elements for the pressure (Taylor-Hood
elements). The sub domains for the different boundary conditions
used in this simulation are computed by the demo program in
src/demo/mesh/subdomains."""
# Copyright (C) 2007 Kristian B. Oelgaard
#
# This file is part of DOLFIN.
#
# DOLFIN is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DOLFIN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DOLFIN. If not, see <http://www.gnu.org/licenses/>.
#
# Modified by Anders Logg, 2008-2009.
#
# First added: 2007-11-16
# Last changed: 2009-11-26
from dolfin import *
# Load mesh and subdomains
mesh = Mesh("dolfin-2.xml.gz")
sub_domains = MeshFunction("size_t", mesh, "../subdomains.xml.gz")
# Define function spaces
V = VectorFunctionSpace(mesh, "CG", 2)
Q = FunctionSpace(mesh, "CG", 1)
W = V * Q
# No-slip boundary condition for velocity
noslip = Constant((0, 0))
bc0 = DirichletBC(W.sub(0), noslip, sub_domains, 0)
# Inflow boundary condition for velocity
inflow = Expression(("-sin(x[1]*pi)", "0.0"))
bc1 = DirichletBC(W.sub(0), inflow, sub_domains, 1)
# Boundary condition for pressure at outflow
zero = Constant(0)
bc2 = DirichletBC(W.sub(1), zero, sub_domains, 2)
# Collect boundary conditions
bcs = [bc0, bc1, bc2]
# Define variational problem
(u, p) = TrialFunctions(W)
(v, q) = TestFunctions(W)
f = Constant((0, 0))
a = (inner(grad(u), grad(v)) - div(v)*p + q*div(u))*dx
L = inner(f, v)*dx
# Compute solution
w = Function(W)
solve(a == L, w, bcs)
# Split the mixed solution using deepcopy
# (needed for further computation on coefficient vector)
(u, p) = w.split(True)
print "Norm of velocity coefficient vector: %.15g" % u.vector().norm("l2")
print "Norm of pressure coefficient vector: %.15g" % p.vector().norm("l2")
# Split the mixed solution using a shallow copy
(u, p) = w.split()
# Save solution in VTK format
ufile_pvd = File("velocity.pvd")
ufile_pvd << u
pfile_pvd = File("pressure.pvd")
pfile_pvd << p
# Plot solution
plot(u)
plot(p)
interactive()
|
alogg/dolfin
|
demo/undocumented/stokes-taylor-hood/python/demo_stokes-taylor-hood.py
|
Python
|
gpl-3.0
| 2,540
|
[
"VTK"
] |
5c378175855faa55c264b50a5ab2273350b64b42b237b5869052d564c9db14b4
|
# Script Version: 1.0
# Author: Tsz Kwong Lee
# Project: AMA3D
# Task Step: 4
# This script use information from PDB to select the best CATH topology representative among all homologous domains.
# CathDomainList File format: CLF format 2.0, to find more info, please visit www.cathdb.info
# R observed - Residual factor R for reflections that satisfy the resolution and observation limits. This quantity includes reflections set aside for cross-validation in a "free" R-factor set. Residual factor R for reflections that satisfy the resolution and observation limits.
# R all - Residual factor R for all reflections that satisfy the limits established by high and low resolution cut-offs. It includes reflections which do not satisfy the sigma cutoff criteria as well as those set aside for cross-validation in a "free" R-factor set.
# R work - Residual factor R for reflections that satisfy the resolution and observation limits and that were used as the working reflections in the refinement. This quantity does not include reflections set aside for cross-validation in a "free" R-factor set.
# R free - Residual factor R for reflections that satisfy the resolution and observation limits and that were excluded from refinement to be used for cross validation in a "free" R factor set. R free is a less biased metric, and is typically higher than an R work. If R free is significantly higher than the R work, the structural model may be over fitted.
import re
import sys
import urllib2
import subprocess
import xmltodict
import xml.etree.ElementTree as ET
from Domain import domain
from Domain import segment
import time
import math
import requests
R_CUT = 0.20
RESO_CUT = 2.0
CHAIN_LEN = 80
file_name = "CathDomainDescriptionFile"
#return a score to help in ranking the domain
def filter_pdb(domain, topo):
""" (Domain, String) -> int
This function takes an domain object (prepared by setup_domain) and a String (the name of topology).
It returns a score for the domain that is used for future ranking and also writes the result to the
Domain_Result file.
"""
try:
pdb_id = domain.pdb_id
R = domain.r
reso = domain.reso
mutation = domain.mutation
if passCutOff(domain):
print "The domain passed the cutoff "
score = motif_score(domain)
print "The score of this domain is: %f" %(score)
# Write result to a tab delimited file.
with open("Domain_Result", "a") as f:
f.write("%s\t%s\t%.3f\t%.2f\t%d\t%.2f\n" % (topo, pdb_id, R, reso, mutation, score))
return score
else:
print "The domain did not pass the cutoff "
with open("Domain_Result", "a") as f:
f.write("%s\t%s\t%.3f\t%.2f\t%d\tnull\n" % (topo, pdb_id, R, reso, mutation))
return -1
except Exception, err:
pdb_id = domain.pdb_id
print err
print "Error! Exiting filter_pdb"
with open("Domain_Result", "a") as f:
f.write("%s\t%s\t%.3f\t%.2f\t%d\tnull\n" % (topo, pdb_id, R, reso, mutation))
return -1
#Return a new domain class object that includes the info we need for calculating the score
def setup_domain(domain_name):
"""String -> Domain
This function will take a String (the name of the domain), set up an Domain class object and return it.
A domain class object will contain the following information: 1) domain name, 2) PDB ID, 3) Chain Character,
4) number of segments, 5) the starting position and end position of each segment, 6) experiment method,
7) R value, 8) Resolution, 9) Length of Domain, 10) number of mutations, 11) Contains prosthetic group or not
The above information are found in the following sources: 1) PDB Custom Report, 2) CATH Domain Description File,
3) PDB Website
"""
print "setting up protein domain %s" %(domain_name)
try:
counter = 0
#create new domain object
d = domain.newdomain(domain_name)
pdb_id = d.pdb_id
#get data from PDB, store as an xml
print "getting information of experimental technique, R value and resolution"
pdb_info = requests.get('http://www.rcsb.org/pdb/rest/customReport.xml?pdbids=' + pdb_id +
'&customReportColumns=experimentalTechnique,rObserved,rAll,rFree,refinementResolution,chainLength')
check_Requests(pdb_info)
dictionary = xmltodict.parse(pdb_info.text)
info_dataset = dictionary['dataset']['record']
#This means the protein has multiple chains
if type(info_dataset) is list:
#here we don't care which chain to look into since method, R value and resolution are the same for all chains
info = info_dataset[0]
#this means the protein has only one chain
else:
info = info_dataset
method = info['dimStructure.experimentalTechnique']
if (info['dimStructure.rFree'] == 'null'):
if (info['dimStructure.rAll'] == 'null'):
if (info['dimStructure.rObserved'] == 'null'):
R = None
else:
R = float(info['dimStructure.rObserved'])*1.25
else:
R = float(info['dimStructure.rAll'])*1.25
else:
R = float(info['dimStructure.rFree'])
if (info['dimStructure.refinementResolution'] == 'null'):
reso = None
else:
reso = float(info['dimStructure.refinementResolution'])
d.setMethod(method)
d.setR(R)
d.setReso(reso)
print "getting information of domain length and segments"
#get data from Cath Domain Description File
file = open(file_name, 'r')
for line in file:
#Search for domain name that starts with the PDB_id
if "DOMAIN" in line and d.name in line:
#Search for the line that contains the domain length
for line in file:
if "DLENGTH" in line:
domainLength = trim(line)
d.setLength(int(domainLength))
break
#Search for the number of segments and append to the domain info
for line in file:
if "NSEGMENTS" in line:
numSegments = int(trim(line))
d.num_segment = numSegments
counter = numSegments
break
segment_list = setupSegmentList(domain_name, counter)
d.segment = segment_list
mutation = hasMutation(d)
print "getting information of mutation and prosthetic group"
d.setMutation(mutation)
d.sethasProstheticGrp(has_prosthetic_grp(d))
print "setup is completed"
return d
except Exception, err:
print "Error! %s" %(err)
#create new domain object
d = domain.newdomain(domain_name)
pdb_id = d.pdb_id
return d
#--------------------- Helper functions for filter_pdb ---------------------------------------
#check if the domain pass all the cutoffs
def passCutOff(domain):
"""Domain -> Boolean
This function takes a Domain Class Object and evaluate it to check if the domain passes the
cut off in order for it to be included for ranking.
"""
if domain.method != 'X-RAY DIFFRACTION':
print "method does not meet cutoff"
return False
elif domain.r > 0.25:
print "R value does not meet cutoff"
return False
elif domain.reso > 2.2:
print "Resolution does not meet cutoff"
return False
elif domain.length < 80:
print "Domain Length does not meet cutoff"
return False
elif domain.mutation > 5:
print "Number of mutations does not meet cutoff"
return False
else:
return True
#Calculate the score for the Domain used for furture ranking
def motif_score(domain):
"""Domain -> int
This function takes a Domain Class Object, run a few calculations and return the score of the
domain for future ranking.
"""
print "Calculating the score for the domain"
length = domain.length
bFactorAdjust = logisticFunction_BFactor(domain)
mutationAdjust = domain.mutation*5
if domain.hasProstheticGrp == False:
pAdjust = 20
else:
pAdjust = 0
resoAdjust = logisticFunction_Reso(domain)
rAdjust = logisticFunction_Rvalue(domain)
occupancyAdjust = occupancyTest(domain)
score = (length - bFactorAdjust - mutationAdjust - pAdjust - occupancyAdjust) * resoAdjust * rAdjust
return score
#----------------- Helper functions for Setting up the Domain----------------------
def setupSegmentList(domain_name, counter):
"""(String, int) -> list of Segments
This function takes a String (domain name) and an int (counter = number of segments). It returns
a list of Segment Class Objects.
A segment class object containts the following: 1) segment number, 2) start residue position,
3) start residue, 4) end residue position, 5) end residue
The above information are collected from CATH Domain Description File
"""
file = open(file_name, 'r')
segmentlist = []
print "setting up segments"
for line in file:
#Search for domain name
if "DOMAIN" in line and domain_name in line:
#Search for Segment range and append to the Domain info
for line in file:
if "SRANGE" in line and counter > 0:
seg = segment.newsegment()
range = line.split(" ")[-1]
start = line.split(" ")[-1].split(" ")[0]
stop = line.split(" ")[-1].split(" ")[1]
startpos = start.split("=")[-1]
stop2 = stop.split("=")[-1]
stoppos = stop2.replace("\n", "")
seg.startpos = int(startpos)
seg.endpos = int(stoppos)
seg.num = len(segmentlist) + 1
sequence = getDomainSequence(domain_name)
seg.startres = sequence[0]
seg.endres = sequence[-1]
segmentlist.append(seg)
counter = counter - 1
if counter == 0:
break
return segmentlist
#Helper function to get Domain Sequence based on the domain name
#require the exact domain name (i.e. 3h3uA01)
def getDomainSequence(name):
"""String -> String
This function takes the domain name and return the sequence of the domain in String
"""
file = open(file_name, 'r')
domainLength = 0
domainSequence = ""
for line in file:
if "DOMAIN" in line and name in line:
#Search for the line that contains the domain length
for line in file:
if "DLENGTH" in line:
domainLength = int(trim(line))
break
#Search for the line that contains the domain sequence
for line in file:
if "DSEQS" in line and len(domainSequence) < domainLength:
domainSequence = domainSequence + trim(line)
continue
file.close()
return domainSequence
#return the number of mutations located in the domain
def hasMutation (domain):
"""Domain -> int
This function takes a Domain Class object and returns the number of mutation that the domain contains
"""
pdb_info = requests.get('http://www.rcsb.org/pdb/files/' + domain.pdb_id +'.pdb')
check_Requests(pdb_info)
num = 0
for line in pdb_info.iter_lines():
if ('COMPND' in line and 'MUTATION' in line):
mutationpos = getmutpos(domain.pdb_id)
for segment in domain.segment:
for mut in mutationpos:
if ((int(mut) >= segment.startpos) and (int(mut) <= segment.endpos)):
num = num + 1
return num
#return the sequence number where the mutation occur
def getmutpos (pdb_id):
"""String -> list of int
This function takes the PDB ID of the domain and returns a list of sequence number where
a mutation occurs
"""
pdb_info = requests.get('http://www.rcsb.org/pdb/explore.do?structureId='+ pdb_id)
check_Requests(pdb_info)
mutation = []
for line in pdb_info:
if '<td class="entityDetails">' in line:
for line in pdb_info:
if 'Mutation:' in line:
split_line = line.split(":")[-1]
mut_str = split_line.replace(" ","").split(",")
for s in mut_str:
s1 = re.sub("[^0-9]", "", s)
mutation.append(s1)
break
return mutation
def has_prosthetic_grp (domain):
"""Domain -> boolean
This function takes a Domain class object and returns a boolean that indicates
whether the domain has prosthetic group
"""
pdb_info = requests.get('http://www.rcsb.org/pdb/files/' + domain.pdb_id +'.pdb')
check_Requests(pdb_info)
reach = 0
for line in pdb_info.iter_lines():
if not('TITLE' in line):
if reach != 0:
return True
else:
reach = reach + 1
if 'APO' in line:
return False
return True
#--------Helper Functions for B-Factor, Reso, R-value correction, and Occupancy ----------------------
def logisticFunction_BFactor(domain):
"""Domain -> int
This function is responsible for calculating the score of the BFactor using logistic function
"""
factor_total = 0
pdbID = domain.pdb_id
pdb_info = requests.get('http://www.rcsb.org/pdb/files/'+ pdbID+'.pdb')
check_Requests(pdb_info)
for line in pdb_info.iter_lines():
if (line[0:4] == 'ATOM'):
serial_num = int(line[22:26].strip())
for segment in domain.segment:
if (serial_num >= segment.startpos and serial_num <= segment.endpos):
BFactor = float(line[60:66].strip())
factor_total = factor_total + (1 - logisticFunction(-0.22, BFactor, 45))
return factor_total
def logisticFunction_Reso(domain):
"""Domain -> int
This function is responsible for calculating the score of resolution using logistic function
"""
reso = domain.reso
score = logisticFunction(-7.324, reso, 2.1)
return score
def logisticFunction_Rvalue(domain):
"""Domain -> int
This function is responsible for calculating the score of R value using logistic function
"""
r = domain.r
reso = domain.reso
x = (10*r)/reso
score = logisticFunction(-10.986, x, 1.5)
return score
def logisticFunction(k, x, mid):
""" (int, int, int) -> int
This function is performs the calculation of a logistic function
"""
#k is positive because we are looking for the portion that we can't trust
exponent = -k*(x - mid)
result = 1/(1+ math.exp(exponent))
return result
def occupancyTest (domain):
"""Domain -> int
This function is responsible for performing the occupancy test. Residue with occupancy less
than 1 should get discarded. This function will return number of residues that should be discarded
"""
pdbID = domain.pdb_id
pdb_info = requests.get('http://www.rcsb.org/pdb/files/'+ pdbID+'.pdb')
check_Requests(pdb_info)
occupancy_serial_num = []
for line in pdb_info.iter_lines():
if (line[0:4] == 'ATOM'):
serial_num = int(line[22:26].strip())
for segment in domain.segment:
if (serial_num >= segment.startpos and serial_num <= segment.endpos):
occupancy = float(line[55:60].strip())
if (occupancy != 1.0 and serial_num not in occupancy_serial_num):
occupancy_serial_num.append(serial_num)
return len(occupancy_serial_num)
#------------------------ Other Helper Function----------------------------
#This is a helper function to cut out all whitespaces and \n in a line
def trim(string):
"""String -> String
This function cuts all the whitespaces and \n in a line
"""
concat = string.split(" ")[-1]
concat.strip()
formedString = concat.split("\n")[0]
return formedString
def check_Requests(result):
"""int -> null
This function takes the response code returns by request. If the response code
is not 200, than it raises an exception and exits the program
"""
print result
if not (result == 200):
try:
result.raise_for_status()
except requests.exceptions.RequestException as e:
print e
print 'exiting program'
sys.exit()
# Read system input.
topo_list = sys.argv[1:]
# Connect to Database by reading Account File.
file = open("Account", "r")
parsed = file.readline().split()
DB = MySQLdb.connect(host=parsed[0], user=parsed[1], passwd=parsed[2], db=parsed[3])
cursor = DB.cursor()
# Find the best representative for each topology node.
for topo in topo_list:
print "Working on Topology: " + topo
best_representative, best_score = "", 0
cursor.execute("""SELECT Name FROM Domain WHERE TopoNode = \'%s\' """%(topo))
domain_list = cursor.fetchall()
for domain_name in domain_list:
newdomain = setup_domain(domain_name)
print "Calculating score for %s "%(domain_name)
cur_score = filter_pdb(newdomain, topo)
if cur_score > best_score:
best_score = cur_score
best_representative = domain_name[0]
if best_representative != "":
with open("Domain_Result", "a") as f:
f.write("BEST REPRESENTATIVE\t%s\t%s\n" % (topo, best_representative[:4]))
cursor.execute("""UPDATE Topology SET Representative = \'%s\', Score = \'%.3f\' WHERE Node = \'%s\'"""%(best_representative, best_score, topo))
else:
print "Cannot find a representative"
# Wrap up and close connection.
DB.commit()
DB.close()
|
teheavy/AMA3D
|
Nh3D/4_CathBestTopo_filter_V2.1.py
|
Python
|
gpl-2.0
| 16,076
|
[
"VisIt"
] |
8fa373633c428a5d14b0ec067e0f4617a60334bc3698a37af2e16907fc7e5f43
|
# Copyright 2012 by Wibowo Arindrarto. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Bio.SearchIO parser for HMMER table output format."""
from itertools import chain
from Bio._py3k import _as_bytes, _bytes_to_string
from Bio.Alphabet import generic_protein
from Bio.SearchIO._index import SearchIndexer
from Bio.SearchIO._model import QueryResult, Hit, HSP, HSPFragment
__all__ = ['Hmmer3TabParser', 'Hmmer3TabIndexer', 'Hmmer3TabWriter']
__docformat__ = "restructuredtext en"
class Hmmer3TabParser(object):
"""Parser for the HMMER table format."""
def __init__(self, handle):
self.handle = handle
self.line = self.handle.readline()
def __iter__(self):
header_mark = '#'
# read through the header if it exists
while self.line.startswith(header_mark):
self.line = self.handle.readline()
# if we have result rows, parse it
if self.line:
for qresult in self._parse_qresult():
yield qresult
def _parse_row(self):
"""Returns a dictionary of parsed row values."""
cols = [x for x in self.line.strip().split(' ') if x]
# if len(cols) > 19, we have extra description columns
# combine them all into one string in the 19th column
if len(cols) > 19:
cols[18] = ' '.join(cols[18:])
# if it's < 19, we have no description columns, so use an empty string
# instead
elif len(cols) < 19:
cols.append('')
assert len(cols) == 19
# assign parsed column data into qresult, hit, and hsp dicts
qresult = {}
qresult['id'] = cols[2] # query name
qresult['accession'] = cols[3] # query accession
hit = {}
hit['id'] = cols[0] # target name
hit['accession'] = cols[1] # target accession
hit['evalue'] = float(cols[4]) # evalue (full sequence)
hit['bitscore'] = float(cols[5]) # score (full sequence)
hit['bias'] = float(cols[6]) # bias (full sequence)
hit['domain_exp_num'] = float(cols[10]) # exp
hit['region_num'] = int(cols[11]) # reg
hit['cluster_num'] = int(cols[12]) # clu
hit['overlap_num'] = int(cols[13]) # ov
hit['env_num'] = int(cols[14]) # env
hit['domain_obs_num'] = int(cols[15]) # dom
hit['domain_reported_num'] = int(cols[16]) # rep
hit['domain_included_num'] = int(cols[17]) # inc
hit['description'] = cols[18] # description of target
hsp = {}
hsp['evalue'] = float(cols[7]) # evalue (best 1 domain)
hsp['bitscore'] = float(cols[8]) # score (best 1 domain)
hsp['bias'] = float(cols[9]) # bias (best 1 domain)
# strand is always 0, since HMMER now only handles protein
frag = {}
frag['hit_strand'] = frag['query_strand'] = 0
frag['alphabet'] = generic_protein
return {'qresult': qresult, 'hit': hit, 'hsp': hsp, 'frag': frag}
def _parse_qresult(self):
"""Generator function that returns QueryResult objects."""
# state values, determines what to do for each line
state_EOF = 0
state_QRES_NEW = 1
state_QRES_SAME = 3
# initial value dummies
qres_state = None
file_state = None
prev_qid = None
cur, prev = None, None
# container for Hit objects, used to create QueryResult
hit_list = []
while True:
# store previous line's parsed values for all lines after the first
if cur is not None:
prev = cur
prev_qid = cur_qid
# only parse the result row if it's not EOF
# NOTE: we are not parsing the extra '#' lines appended to the end
# of hmmer31b1 tabular results since storing them in qresult
# objects means we can not do a single-pass parsing
if self.line and not self.line.startswith('#'):
cur = self._parse_row()
cur_qid = cur['qresult']['id']
else:
file_state = state_EOF
# mock value for cur_qid, since we have nothing to parse
cur_qid = None
if prev_qid != cur_qid:
qres_state = state_QRES_NEW
else:
qres_state = state_QRES_SAME
if prev is not None:
# since domain tab formats only have 1 Hit per line
# we always create HSPFragment, HSP, and Hit per line
prev_hid = prev['hit']['id']
# create fragment and HSP and set their attributes
frag = HSPFragment(prev_hid, prev_qid)
for attr, value in prev['frag'].items():
setattr(frag, attr, value)
hsp = HSP([frag])
for attr, value in prev['hsp'].items():
setattr(hsp, attr, value)
# create Hit and set its attributes
hit = Hit([hsp])
for attr, value in prev['hit'].items():
setattr(hit, attr, value)
hit_list.append(hit)
# create qresult and yield if we're at a new qresult or at EOF
if qres_state == state_QRES_NEW or file_state == state_EOF:
qresult = QueryResult(hit_list, prev_qid)
for attr, value in prev['qresult'].items():
setattr(qresult, attr, value)
yield qresult
# if we're at EOF, break
if file_state == state_EOF:
break
hit_list = []
self.line = self.handle.readline()
class Hmmer3TabIndexer(SearchIndexer):
"""Indexer class for HMMER table output."""
_parser = Hmmer3TabParser
# denotes column location for query identifier
_query_id_idx = 2
def __iter__(self):
"""Iterates over the file handle; yields key, start offset, and length."""
handle = self._handle
handle.seek(0)
query_id_idx = self._query_id_idx
qresult_key = None
header_mark = _as_bytes('#')
split_mark = _as_bytes(' ')
# set line with initial mock value, to emulate header
line = header_mark
# read through header
while line.startswith(header_mark):
start_offset = handle.tell()
line = handle.readline()
# and index the qresults
while True:
end_offset = handle.tell()
if not line:
break
cols = [x for x in line.strip().split(split_mark) if x]
if qresult_key is None:
qresult_key = cols[query_id_idx]
else:
curr_key = cols[query_id_idx]
if curr_key != qresult_key:
adj_end = end_offset - len(line)
yield _bytes_to_string(qresult_key), start_offset, \
adj_end - start_offset
qresult_key = curr_key
start_offset = adj_end
line = handle.readline()
if not line:
yield _bytes_to_string(qresult_key), start_offset, \
end_offset - start_offset
break
def get_raw(self, offset):
"""Returns the raw string of a QueryResult object from the given offset."""
handle = self._handle
handle.seek(offset)
query_id_idx = self._query_id_idx
qresult_key = None
qresult_raw = _as_bytes('')
split_mark = _as_bytes(' ')
while True:
line = handle.readline()
if not line:
break
cols = [x for x in line.strip().split(split_mark) if x]
if qresult_key is None:
qresult_key = cols[query_id_idx]
else:
curr_key = cols[query_id_idx]
if curr_key != qresult_key:
break
qresult_raw += line
return qresult_raw
class Hmmer3TabWriter(object):
"""Writer for hmmer3-tab output format."""
def __init__(self, handle):
self.handle = handle
def write_file(self, qresults):
"""Writes to the handle.
Returns a tuple of how many QueryResult, Hit, and HSP objects were written.
"""
handle = self.handle
qresult_counter, hit_counter, hsp_counter, frag_counter = 0, 0, 0, 0
try:
first_qresult = next(qresults)
except StopIteration:
handle.write(self._build_header())
else:
# write header
handle.write(self._build_header(first_qresult))
# and then the qresults
for qresult in chain([first_qresult], qresults):
if qresult:
handle.write(self._build_row(qresult))
qresult_counter += 1
hit_counter += len(qresult)
hsp_counter += sum(len(hit) for hit in qresult)
frag_counter += sum(len(hit.fragments) for hit in qresult)
return qresult_counter, hit_counter, hsp_counter, frag_counter
def _build_header(self, first_qresult=None):
"""Returns the header string of a HMMER table output."""
# calculate whitespace required
# adapted from HMMER's source: src/p7_tophits.c#L1083
if first_qresult is not None:
# qnamew = max(20, len(first_qresult.id))
qnamew = 20 # why doesn't the above work?
tnamew = max(20, len(first_qresult[0].id))
qaccw = max(10, len(first_qresult.accession))
taccw = max(10, len(first_qresult[0].accession))
else:
qnamew, tnamew, qaccw, taccw = 20, 20, 10, 10
header = "#%*s %22s %22s %33s\n" % \
(tnamew + qnamew + taccw + qaccw + 2, "",
"--- full sequence ----", "--- best 1 domain ----",
"--- domain number estimation ----")
header += "#%-*s %-*s %-*s %-*s %9s %6s %5s %9s %6s %5s %5s %3s " \
"%3s %3s %3s %3s %3s %3s %s\n" % (tnamew - 1, " target name",
taccw, "accession", qnamew, "query name", qaccw,
"accession", " E-value", " score", " bias",
" E-value", " score", " bias", "exp",
"reg", "clu", " ov", "env", "dom", "rep",
"inc", "description of target")
header += "#%*s %*s %*s %*s %9s %6s %5s %9s %6s %5s %5s %3s %3s " \
"%3s %3s %3s %3s %3s %s\n" % (tnamew - 1, "-------------------",
taccw, "----------", qnamew, "--------------------", qaccw,
"----------", "---------", "------", "-----", "---------",
"------", "-----", "---", "---", "---", "---", "---", "---",
"---", "---", "---------------------")
return header
def _build_row(self, qresult):
"""Returns a string or one row or more of the QueryResult object."""
rows = ''
# calculate whitespace required
# adapted from HMMER's source: src/p7_tophits.c#L1083
qnamew = max(20, len(qresult.id))
tnamew = max(20, len(qresult[0].id))
qaccw = max(10, len(qresult.accession))
taccw = max(10, len(qresult[0].accession))
for hit in qresult:
rows += "%-*s %-*s %-*s %-*s %9.2g %6.1f %5.1f %9.2g %6.1f %5.1f " \
"%5.1f %3d %3d %3d %3d %3d %3d %3d %s\n" % (tnamew, hit.id, taccw,
hit.accession, qnamew, qresult.id, qaccw, qresult.accession, hit.evalue,
hit.bitscore, hit.bias, hit.hsps[0].evalue, hit.hsps[0].bitscore,
hit.hsps[0].bias, hit.domain_exp_num, hit.region_num, hit.cluster_num,
hit.overlap_num, hit.env_num, hit.domain_obs_num,
hit.domain_reported_num, hit.domain_included_num, hit.description)
return rows
# if not used as a module, run the doctest
if __name__ == "__main__":
from Bio._utils import run_doctest
run_doctest()
|
poojavade/Genomics_Docker
|
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/Bio/SearchIO/HmmerIO/hmmer3_tab.py
|
Python
|
apache-2.0
| 12,539
|
[
"Biopython"
] |
27eec85b9740a91ad866e392288c0da51c7fa41ab3b4bd5303e9c2bfb79d3021
|
#! /usr/bin/python
#
# Copyright (C) 2003-2014 ABINIT group
#
# Written by Matthieu Verstraete in python (compatible v1.9).
# This is free software, and you are welcome to redistribute it
# under certain conditions (GNU General Public License,
# see ~abinit/COPYING or http://www.gnu.org/copyleft/gpl.txt).
#
# ABINIT is a project of the Universite Catholique de Louvain,
# Corning Inc. and other collaborators, see ~abinit/doc/developers/contributors.txt.
# Please read ~abinit/doc/users/acknowledgments.html for suggested
# acknowledgments of the ABINIT effort.
#
# For more information, see http://www.abinit.org .
#
# This module makes a file into a list of tokens, conserving
# or eliminating comments of the type "# bla bla \n"
# Probably redundant with other, more powerful python modules, but hey.
#
def tokenize_file(infilename):
"""Take input file name and return list of tokens, eliminating comments"""
# reg expss
import re
import sys
import fileinput
import math
#
# open and parse input
#
try:
fp = open (infilename, 'r')
except IOError:
print "Error opening file"
raise
lines = fp.readlines ()
#
# put all tokens into tokens and remove comments
#
tokens = []
for line in lines:
tmp = re.split ('[ \t\n]*',line)
# print "tmp = ", tmp
for tok in tmp:
if (tok != ''):
if (re.compile('[#!][.]*').match(tok)):
break
tokens.append(tok)
# print "tokens = ", tokens
fp.close()
return tokens
def tokenize_file_keep_comments(infilename):
"""Take input file name and return list of tokens, keep comments"""
# reg expss
import re
import sys
import fileinput
import math
#
# open and parse input
#
try:
fp = open (infilename, 'r')
except IOError:
print "Error opening file"
raise
lines = fp.readlines ()
#
# put all tokens into tokens and remove comments
#
tokens = []
for line in lines:
tmp = re.split ('[ \t\n]*',line)
# print "tmp = ", tmp
for tok in tmp:
if (tok != ''):
tokens.append(tok)
# print "tokens = ", tokens
fp.close()
return tokens
|
SamKChang/abinit-7.10.5_multipole
|
scripts/pre_processing/tokenize_file.py
|
Python
|
gpl-3.0
| 2,175
|
[
"ABINIT"
] |
f3fa19858cc3035ace25fede2acd6dfeeac5bc4265721b75ed3b5018ad8827d5
|
import operator
from functools import reduce
def get(caves, i, j):
if i < 0 or i >= len(caves):
return None
if j < 0 or j >= len(caves[i]):
return None
return caves[i][j]
modifiers = [
(-1, 0),
(1, 0),
(0, -1),
(0, +1)
]
with open("inputs/day9.txt") as f:
caves = []
for line in f:
caves.append([int(x) for x in line.strip()])
total_risk = 0
for i in range(0, len(caves)):
for j in range(0, len(caves[i])):
#print(i, j, caves[i][j])
val = caves[i][j]
surrounding = []
for (a,b) in modifiers:
x = get(caves, i+a, j+b)
if x is not None:
surrounding.append(x)
if min(surrounding) > val:
risk = 1 + val
total_risk += risk
print("part1:", total_risk)
def visit(caves, i, j):
#print("visiting %d,%d" % (i,j))
count = 1
caves[i][j] = 'V'
for (a,b) in modifiers:
x = get(caves, i+a, j+b)
if x not in [None, 9, 'V']:
#print(x)
count += visit(caves, i+a, j+b)
return count
def printCaves(caves):
print('#'*len(caves[0]))
for i in range(0, len(caves)):
print(''.join([str(x) for x in caves[i]]))
print('#'*len(caves[0]))
with open("inputs/day9.txt") as f:
caves = []
for line in f:
caves.append([int(x) for x in line.strip()])
#printCaves(caves)
basins = []
for i in range(0, len(caves)):
for j in range(0, len(caves[i])):
val = caves[i][j]
if val in [9, 'V']:
continue
size = visit(caves, i, j)
#print("Basin of size:", size)
basins.append(size)
#printCaves(caves)
basins.sort()
#print(basins)
prod = reduce(operator.mul, basins[-3:], 1)
print("part2:", prod)
|
astonshane/AdventOfCode
|
2021/day9.py
|
Python
|
mit
| 1,915
|
[
"VisIt"
] |
03740afade353fa6f4ddea5308be17ef6d792986d5aaaaea11c1e9934365898f
|
"""
This module contains classes related to data visualization and rendering.
**Rendering:**
* :py:class:`RenderSettings`
* :py:class:`Viewport`
* :py:class:`ViewportConfiguration`
**Render engines:**
* :py:class:`OpenGLRenderer`
* :py:class:`TachyonRenderer`
* :py:class:`POVRayRenderer`
**Data visualization:**
* :py:class:`Display` (base class of all display classes below)
* :py:class:`BondsDisplay`
* :py:class:`DislocationDisplay`
* :py:class:`ParticleDisplay`
* :py:class:`SimulationCellDisplay`
* :py:class:`SurfaceMeshDisplay`
* :py:class:`VectorDisplay`
**Viewport overlays:**
* :py:class:`CoordinateTripodOverlay`
* :py:class:`PythonViewportOverlay`
* :py:class:`TextLabelOverlay`
"""
import sip
import PyQt5.QtGui
# Load the native modules.
from PyScriptRendering import *
from PyScriptViewport import *
import ovito
def _get_RenderSettings_custom_range(self):
"""
Specifies the range of animation frames to render if :py:attr:`.range` is ``CUSTOM_INTERVAL``.
:Default: ``(0,100)``
"""
return (self.customRangeStart, self.customRangeEnd)
def _set_RenderSettings_custom_range(self, range):
if len(range) != 2: raise TypeError("Tuple or list of length two expected.")
self.customRangeStart = range[0]
self.customRangeEnd = range[1]
RenderSettings.custom_range = property(_get_RenderSettings_custom_range, _set_RenderSettings_custom_range)
def _get_RenderSettings_size(self):
"""
The size of the image or movie to be generated in pixels.
:Default: ``(640,480)``
"""
return (self.outputImageWidth, self.outputImageHeight)
def _set_RenderSettings_size(self, size):
if len(size) != 2: raise TypeError("Tuple or list of length two expected.")
self.outputImageWidth = size[0]
self.outputImageHeight = size[1]
RenderSettings.size = property(_get_RenderSettings_size, _set_RenderSettings_size)
def _get_RenderSettings_filename(self):
"""
A string specifying the file path under which the rendered image or movie should be saved.
:Default: ``None``
"""
if self.saveToFile and self.imageFilename: return self.imageFilename
else: return None
def _set_RenderSettings_filename(self, filename):
if filename:
self.imageFilename = filename
self.saveToFile = True
else:
self.saveToFile = False
RenderSettings.filename = property(_get_RenderSettings_filename, _set_RenderSettings_filename)
# Implement FrameBuffer.image property (requires conversion to SIP).
def _get_FrameBuffer_image(self):
return PyQt5.QtGui.QImage(sip.wrapinstance(self._image, PyQt5.QtGui.QImage))
FrameBuffer.image = property(_get_FrameBuffer_image)
def _Viewport_render(self, settings = None):
""" Renders an image or movie of the viewport's view.
:param settings: A settings object, which specifies the resolution, background color, output filename etc. of the image to be rendered.
If ``None``, the global settings are used (given by :py:attr:`DataSet.render_settings <ovito.DataSet.render_settings>`).
:type settings: :py:class:`RenderSettings`
:returns: A `QImage <http://pyqt.sourceforge.net/Docs/PyQt5/api/qimage.html>`_ object on success, which contains the rendered picture;
``None`` if the rendering operation has been canceled by the user.
The rendered image of movie will automatically be saved to disk when the :py:attr:`RenderSettings.filename` attribute contains a non-empty string.
Alternatively, you can save the returned `QImage <http://pyqt.sourceforge.net/Docs/PyQt5/api/qimage.html>`_ explicitly using its ``save()`` method.
This gives you the opportunity to paint additional graphics on top of the image before saving it. For example:
.. literalinclude:: ../example_snippets/render_to_image.py
"""
if settings is None:
settings = self.dataset.render_settings
elif isinstance(settings, dict):
settings = RenderSettings(settings)
if ovito.gui_mode:
# Use the frame buffer of the GUI window for rendering.
fb_window = self.dataset.container.window.frame_buffer_window
fb = fb_window.create_frame_buffer(settings.outputImageWidth, settings.outputImageHeight)
fb_window.show_and_activate()
else:
# Create a temporary off-screen frame buffer.
fb = FrameBuffer(settings.size[0], settings.size[1])
if not self.dataset.render_scene(settings, self, fb, ovito.get_progress_display()):
return None
return fb.image
Viewport.render = _Viewport_render
def _ViewportConfiguration__len__(self):
return len(self.viewports)
ViewportConfiguration.__len__ = _ViewportConfiguration__len__
def _ViewportConfiguration__iter__(self):
return self.viewports.__iter__()
ViewportConfiguration.__iter__ = _ViewportConfiguration__iter__
def _ViewportConfiguration__getitem__(self, index):
return self.viewports[index]
ViewportConfiguration.__getitem__ = _ViewportConfiguration__getitem__
|
srinath-chakravarthy/ovito
|
src/plugins/pyscript/python/ovito/vis/__init__.py
|
Python
|
gpl-3.0
| 5,082
|
[
"OVITO"
] |
64626d00d3a34ed7a1d8d8a2c9bde3f9c02727e2a6252b8f0b7d6ad652f77946
|
"""
This module reads and writes a series of data cubes to a NetCDF file.
=== Set up some parameters ===
Define the shape of the data cube.
>>> shape = (4, 5, 3)
>>> res = (1., 100., 200.)
Set the values of the data cube.
>>> var = numpy.random.random (shape)
Write cubes at these times.
>>> times = range (5)
This is the name of the variable that we will read/write.
>>> var_name = 'grain_size'
=== Write a cube ===
>>> import nccs_files
>>> f = nccs_files.nccs_file ()
Open the file for writing.
>>> f.open_new_file ('test_file.nc', shape=shape, res=res,
... dtype='float64',
... var_name=var_name,
... long_name="Sediment grain size",
... units_name="phe",
... comment="Cube of sediment grain size")
True
The name of the file may not be the same as what was specified in the call
to open_new_file. If the file already exists, a number will be appended to
the file name (before the extension).
>>> file_name = f.file_name
>>> print file_name # doctest: +ELLIPSIS
test_file....nc
Write the variable
>>> f.add_cube (var, var_name)
Close the file.
>>> f.close_file ()
=== Read a cube ===
>>> import nccs_files
>>> f = nccs_files.nccs_file ()
>>> f.open_file (file_name)
True
Read variable from file and compare it to what we wrote.
>>> var_from_file = f.get_cube (var_name, 0)
>>> (var_from_file == var).all ()
True
>>> f.close_file ()
=== Write a series of cubes ===
Open the file for writing.
>>> f.open_new_file ('test_file.nc', shape=shape, res=res,
... dtype='float64',
... var_name=var_name,
... long_name="Sediment grain size",
... units_name="phe",
... comment="Cube of sediment grain size")
True
>>> file_name = f.file_name
>>> print file_name # doctest: +ELLIPSIS
test_file....nc
Write the variable
>>> for time in times:
... f.add_cube (var+time, var_name)
Close the file.
>>> f.close_file ()
=== Read a series of cubes ===
>>> import nccs_files
>>> f = nccs_files.nccs_file ()
>>> f.open_file (file_name)
True
Read variable from file and compare it to what we wrote.
>>> values_match = True
>>> for time in times:
... var_from_file = f.get_cube (var_name, time)
... values_match &= (var_from_file == var+time).all ()
>>> values_match
True
>>> f.close_file ()
"""
import os
import sys
import time
import numpy
import bov_files
import file_utils
import rti_files
#---------------------------------------------------------------------
# This class is for I/O of time-indexed 3D arrays to netCDF files.
#---------------------------------------------------------------------
#
# unit_test()
# save_nccs_cube() # (not ready yet)
#
# class nccs_file():
#
# import_nio()
# open_file()
# get_nio_type_map()
# open_new_file()
# update_time_index()
#----------------------------
# add_cube()
# get_cube()
#----------------------------
# close_file()
# close()
#
#-------------------------------------------------------------------
def save_nccs_cube(nccs_file_name=None, rtg_file_name=None):
nccs = nccs_file ()
OK = nccs.open_file(nccs_file_name)
if not OK:
return
var_name = 'H'
time_index = 200
cube = nccs.get_cube(var_name, time_index)
nccs.close()
cube = numpy.array(cube)
print 'min(cube), max(cube) =', cube.min(), cube.max()
rtg_unit = open(rtg_file_name, 'wb')
cube.tofile(unit)
rtg_unit.cube()
# save_nccs_cube()
#-------------------------------------------------------------------
class nccs_file():
#----------------------------------------------------------
# Note: ncgs = NetCDF Grid Stack (used by CSDMS)
#----------------------------------------------------------
def import_nio(self):
try:
import Nio # (a module in the PyNIO package)
# print 'Imported Nio version: ' + Nio.__version__
return Nio
except:
## python_version = sys.version[:3]
## print ' '
## print 'SORRY, Cannot write netCDF files because'
## print 'the "Nio" package cannot be imported.'
## print ' '
## if (python_version != '2.6'):
## print 'Note that "PyNIO" is only installed for'
## print 'Python version 2.6 on "beach".'
## print 'The current Python version is:', python_version
## print ' '
return False
# import_nio()
#----------------------------------------------------------
def open_file(self, file_name):
#--------------------------------------------------
# Try to import the Nio module from PyNIO package
#--------------------------------------------------
Nio = self.import_nio ()
if not Nio:
return
#-------------------------
# Open file to read only
#-------------------------
try:
nccs_unit = Nio.open_file(file_name, mode="r")
self.nccs_unit = nccs_unit
return True
except:
return False
# open_file()
#----------------------------------------------------------
def get_nio_type_map(self):
#----------------------------------------
# Possible settings for "nio_type_code"
#-------------------------------------------
# nio_type_code = "d" # (double, Float64)
# nio_type_code = "f" # (float, Float32)
# nio_type_code = "l" # (long, Int64)
# nio_type_code = "i" # (int, Int32)
# nio_type_code = "h" # (short, Int16)
# nio_type_code = "b" # (byte, Int8)
# nio_type_code = "S1" # (char)
#-------------------------------------------
nio_type_map = {'float64':'d', 'float32':'f',
'int64':'l', 'int32':'i',
'int16':'s', 'int8':'b',
'S|100':'S1'} # (check last entry)
return nio_type_map
# get_nio_type_map()
#----------------------------------------------------------
def open_new_file(self, file_name, info=None,
var_name='X',
long_name=None,
units_name='None',
dtype='float64',
### dtype='float64'
time_units='minutes',
comment='',
shape=(1,1,1),
res=(1.,1.,1.),
MAKE_RTI=True, MAKE_BOV=False):
#--------------------------------------------------
# Try to import the Nio module from PyNIO package
#--------------------------------------------------
Nio = self.import_nio ()
if not Nio:
return False
#----------------------------
# Does file already exist ?
#----------------------------
file_name = file_utils.check_overwrite( file_name )
self.file_name = file_name
#---------------------------------------
# Check and store the grid information
#---------------------------------------
self.format = 'nccs'
self.file_name = file_name
self.time_index = 0
self.var_name = var_name
self.shape = shape
self.res = res
if (long_name is None):
long_name = var_name
self.long_name = long_name
self.units_name = units_name
self.dtype = dtype
#-----------------------------------
# Get Nio type code for this dtype
#------------------------------------
nio_type_map = self.get_nio_type_map()
nio_type_code = nio_type_map[ dtype.lower() ]
self.nio_type_code = nio_type_code
#-------------------------------------
# Open a new netCDF file for writing
#-------------------------------------
# Sample output from time.asctime():
# "Thu Oct 8 17:10:18 2009"
#-------------------------------------
opt = Nio.options()
opt.PreFill = False # (for efficiency)
opt.HeaderReserveSpace = 4000 # (4000 bytes, for efficiency)
history = "Created using PyNIO " + Nio.__version__ + " on "
history = history + time.asctime() + ". "
history = history + comment
# print 'MADE IT PAST history BLOCK'
try:
nccs_unit = Nio.open_file (file_name, mode="w",
options=opt, history=history)
OK = True
except:
OK = False
return OK
#----------------------------------------------
# Create grid dimensions nx and ny, plus time
#----------------------------------------------
# Without using "int()" here, we get this:
# TypeError: size must be None or integer
#----------------------------------------------
nccs_unit.create_dimension("nz", self.shape[0])
nccs_unit.create_dimension("ny", self.shape[1])
nccs_unit.create_dimension("nx", self.shape[2])
nccs_unit.create_dimension("time", None) # (unlimited dimension)
# print 'MADE IT PAST create_dimension CALLS.'
#-------------------------
# Create a time variable
#------------------------------------------
#('d' = float64; must match in add_cube()
#------------------------------------------
tvar = nccs_unit.create_variable ('time', 'd', ("time",))
nccs_unit.variables['time'].units = time_units
#--------------------------------
# Create a variable in the file
#----------------------------------
# Returns "var" as a PyNIO object
#----------------------------------
var = nccs_unit.create_variable (var_name, nio_type_code,
("time", "nz", "ny", "nx"))
#----------------------------------
# Specify a "nodata" fill value ?
#----------------------------------
var._FillValue = -9999.0 ## Does this jive with Prefill above ??
#------------------------------------
# Create attributes of the variable
#------------------------------------
nccs_unit.variables[var_name].long_name = long_name
nccs_unit.variables[var_name].units = units_name
nccs_unit.variables[var_name].dz = self.res[0]
nccs_unit.variables[var_name].dy = self.res[1]
nccs_unit.variables[var_name].dx = self.res[2]
nccs_unit.variables[var_name].y_south_edge = 0.
nccs_unit.variables[var_name].y_north_edge = self.res[1]*self.shape[1]
nccs_unit.variables[var_name].x_west_edge = 0.
nccs_unit.variables[var_name].x_east_edge = self.res[2]*self.shape[2]
nccs_unit.variables[var_name].z_bottom_edge = 0.
nccs_unit.variables[var_name].z_top_edge = self.res[0]*self.shape[0]
self.nccs_unit = nccs_unit
return OK
# open_new_file()
#----------------------------------------------------------
def add_cube(self, grid, var_name, time=None,
time_index=-1):
#---------------------------------
# Assign a value to the variable
#-------------------------------------------
# This syntax works for scalars and grids
#-------------------------------------------
# nc_unit.variables[var_name].assign_value( grid )
#-------------------------------------
# Can use time_index to overwrite an
# existing grid vs. simple append.
#-------------------------------------
if time_index == -1:
time_index = self.time_index
if time is None:
time = numpy.float64(time_index)
#---------------------------------------
# Write a time to existing netCDF file
#---------------------------------------
times = self.nccs_unit.variables['time']
times[time_index] = time
#---------------------------------------
# Write a grid to existing netCDF file
#---------------------------------------
var = self.nccs_unit.variables[var_name]
if numpy.ndim(grid) == 0:
#-----------------------------------------------
# "grid" is actually a scalar (dynamic typing)
# so convert it to a grid before saving
#-----------------------------------------------
grid2 = grid + numpy.zeros([self.nz, self.ny, self.nx],
dtype=self.dtype)
var[time_index] = grid2.astype(self.dtype)
else:
var[time_index] = grid.astype(self.dtype)
#---------------------------
# Increment the time index
#---------------------------
self.time_index += 1
# add_cube()
#----------------------------------------------------------
def get_cube(self, var_name, time_index):
var = self.nccs_unit.variables[var_name]
return var[time_index]
# get_cube()
#-------------------------------------------------------------------
def close_file(self):
self.nccs_unit.close ()
# close_file()
#-------------------------------------------------------------------
def close(self):
self.nccs_unit.close ()
# close()
#-------------------------------------------------------------------
if __name__ == "__main__":
import doctest
doctest.testmod()
|
mdpiper/topoflow
|
topoflow/utils/nccs_files_Nio.py
|
Python
|
mit
| 13,884
|
[
"NetCDF"
] |
61ee564e6902c29950b9895da834790d6c88dd13d72b7f9bf39ee413a6efc4a1
|
#
# Copyright 2001 - 2016 Ludek Smid [http://www.ospace.net/]
#
# This file is part of Outer Space.
#
# Outer Space is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Outer Space is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Outer Space; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
#
# This module contains messages
#
import ige.ospace.Const as Const
from ige import NoSuchObjectException
import client, types, string, res, gdata
from ige import log
from ige.ospace import Rules
#
# Transform routines
#
def techID2Name(techID):
if techID >= 1000:
return _(client.getTechInfo(techID).name.encode())
else:
return client.getPlayer().shipDesigns[techID].name
def objID2Name(objID):
obj = client.get(objID, noUpdate = 0, publicOnly = 1)
return getattr(obj, 'name', res.getUnknownName())
def objIDList2Names(objIDs):
names = []
for objID in objIDs:
obj = client.get(objID, noUpdate = 1, publicOnly = 1)
if hasattr(obj, 'owner') and obj.owner != obj.oid:
try:
owner = _(' (%s)') % client.get(obj.owner, noUpdate = 1, publicOnly = 1).name
except AttributeError:
owner = ''
else:
owner = ''
text = _('%s%s') % (getattr(obj, 'name', res.getUnknownName()), owner)
names.append(text)
return string.join(names, ', ')
def minesReport((damageCaused, killsCaused, minesTriggered)):
lines = []
techs = minesTriggered.keys()
for techID in techs:
techName = client.getTechInfo(techID).name.encode()
text = _('%d of the %s detonated, causing %d damage, killing %d ships in the process.') %\
(minesTriggered[techID],
techName,
damageCaused.get(techID, 0),
killsCaused.get(techID, 0))
lines.append(text)
return '\n'.join(lines)
def delayTurns(startTurn):
return res.formatTime(startTurn + Rules.galaxyStartDelay)
def stratID2Name(resID):
return _(gdata.stratRes[resID])
def float2percent(number):
return int(number * 100)
def plType2Name(plType):
return gdata.planetTypes[plType]
def designID2Name(designID):
return client.getPlayer().shipDesigns[designID].name
def queueID2Name(queueID):
return res.globalQueueName(queueID)
def votes2Txt((votes, voters)):
lines = []
nominated = sorted(votes, key=lambda a: votes[a], reverse = True)
for playerName in nominated:
if playerName == None:
continue
l = []
for name in voters[playerName]:
l.append(name)
text = " %s got %d votes from %s." % (
playerName,
votes[playerName],
", ".join(l),
)
lines.append(text)
if None in votes:
l = []
for name in voters[None]:
l.append(name)
text = " %s abstained [%d votes]." % (
", ".join(l),
votes[None],
)
lines.append(text)
return "\n".join(lines)
def impMsg(msg):
return "\n".join(msg)
def listing(names):
if len(names) == 1:
return names[0]
conj = _(" and ")
text = conj.join([", ".join(names[:-1]), names[-1]])
return text
#
# Data
#
# severity codes
CRI = 3
MAJ = 2
MIN = 1
INFO = 0
NONE = INFO
# i18n (delayed translation)
def N_(msg): return msg
msgData = {}
def addMsg(msgID, name, transform = None, severity = NONE):
global msgData
msgData[msgID] = (name, transform, severity)
addMsg(Const.MSG_COMPLETED_RESEARCH, N_('Research completed: %(1)s'), (techID2Name,), CRI)
addMsg(Const.MSG_WASTED_SCIPTS, N_('%(1)d research points not used.'), severity = MIN)
addMsg(Const.MSG_CANNOTBUILD_SHLOST, N_('Cannot build on planet - ship may be lost.'), severity = CRI)
addMsg(Const.MSG_CANNOTBUILD_NOSLOT, N_('Cannot build on planet - no free slot.'), severity = CRI)
# NOT NEEDED addMsg(MSG_DESTROYED_BUILDING, N_('Structure destroyed: %(1)s'), (techID2Name,), MAJ)
addMsg(Const.MSG_WASTED_PRODPTS, N_('Construction problem: no task\n\n%(1)d construction points were not used because there was no task to fulfill.'), (int,), severity = INFO)
addMsg(Const.MSG_LOST_PLANET, N_('Planet lost.'), severity = CRI)
addMsg(Const.MSG_COMPLETED_STRUCTURE, N_('Structure completed: %(1)s'), (techID2Name,), MIN)
addMsg(Const.MSG_DEPLOY_HANDLER, N_('A deployment of %(1)s was completed'), (techID2Name,), MIN)
addMsg(Const.MSG_COMPLETED_SHIP, N_('Ship completed: %(1)s'), (techID2Name,), MIN)
addMsg(Const.MSG_GAINED_PLANET, N_('New planet.'), severity = CRI)
addMsg(Const.MSG_COMBAT_RESULTS, N_('Combat with: %(4)s. HP lost: we %(1)d, they %(2)d.\n\nEnemy lost %(2)d HP, we lost %(1)d HP and %(3)d ships/structures. We attacked/were attacked by %(4)s.'), (int, int, int, objIDList2Names), MAJ)
addMsg(Const.MSG_COMBAT_LOST, N_('Battle lost: we were defeated by %(1)s.'), (objID2Name,), CRI)
addMsg(Const.MSG_DESTROYED_FLEET, N_('Fleet destroyed.'), severity = CRI)
addMsg(Const.MSG_COMBAT_WON, N_('Battle won: we defeated %(1)s.'), (objID2Name,), CRI)
addMsg(Const.MSG_NEW_GOVCENTER, N_('A new government center established.'), severity = CRI)
addMsg(Const.MSG_REVOLT_STARTED, N_('Planet revolt started - production halved for the next turns.'), severity = CRI)
addMsg(Const.MSG_REVOLT_ENDED, N_('Planet revolt ended - production restored.'), severity = CRI)
addMsg(Const.MSG_INVALID_TASK, N_('Construction of %(1)s is not valid - construction suspended.'), (techID2Name,), severity = CRI)
addMsg(Const.MSG_NOSUPPORT_POP, N_('Population decreased.\n\nPopulation of this planet has decreased. Build more facilities producing food.'), severity = CRI)
addMsg(Const.MSG_COMPLETED_PROJECT, N_('Project finished: %(1)s'), (techID2Name,), MIN)
addMsg(Const.MSG_ENABLED_TIME, N_('Time in galaxy started to run...'), severity = CRI)
addMsg(Const.MSG_DISABLED_TIME, N_('Time in galaxy stopped...'), severity = CRI)
addMsg(Const.MSG_MISSING_STRATRES, N_('Strategic resource missing: %(1)s'), (stratID2Name,), MAJ)
addMsg(Const.MSG_DELETED_RESEARCH, N_('Research task deleted: %(1)s'), (techID2Name,), CRI)
addMsg(Const.MSG_EXTRACTED_STRATRES, N_('Strategic resource extracted: %(1)s'), (stratID2Name,), MIN)
addMsg(Const.MSG_EXTRACTED_ANTIMATTER_SYNTH, N_('Strategic resource synthesized: 4 units of %(1)s'), (stratID2Name,), MIN)
addMsg(Const.MSG_DOWNGRADED_PLANET_ECO, N_('Planet downgraded to: %(1)s'), (plType2Name,), CRI)
addMsg(Const.MSG_UPGRADED_PLANET_ECO, N_('Planet upgraded to: %(1)s'), (plType2Name,), CRI)
addMsg(Const.MSG_UPGRADED_SHIP, N_('Ship upgraded from %(1)s to %(2)s'), (unicode,unicode), MIN)
addMsg(Const.MSG_DELETED_DESIGN, N_('Obsolete ship design deleted: %(1)s'), (unicode,), CRI)
addMsg(Const.MSG_CANNOT_UPGRADE_SR, N_('Cannot upgrade ship from %(1)s to %(2)s\n\nCannot upgrade ship from %(1)s to %(2)s because of we have not enough of %(3)s.'), (unicode,unicode,stratID2Name), MAJ)
addMsg(Const.MSG_DAMAGE_BY_SG, N_('Malfunctional Star Gate, lost %(1)d %% HP\n\nOur fleet has arrived at system with no or malfunctional Star Gate or Comm/Scann Center. Every ship lost %(1)d %% hitpoints due to intensive deceleration.'), (int,), MAJ)
addMsg(Const.MSG_GAINED_FAME, N_('Gained %(1)d fame.'), severity = INFO)
addMsg(Const.MSG_LOST_FAME, N_('Lost %(1)d fame.'), severity = CRI)
addMsg(Const.MSG_GAINED_TECH, N_('Gained %(1)s technology at sublevel %(2)d.'), (techID2Name, int), severity = INFO)
addMsg(Const.MSG_ENTERED_WORMHOLE, N_('Your fleet entered a wormhole at %(1)s and exited at %(2)s.'), (unicode,unicode), severity = MIN)
addMsg(Const.MSG_NOT_ENTERED_WORMHOLE, N_('Cannot enter wormhole - ship may be lost.'), severity = MIN)
addMsg(Const.MSG_FOUND_WORMHOLE, N_('You have located a wormhole'), severity = MIN) #todo
addMsg(Const.MSG_FUEL_LOST_ORBITING, N_('Fleet lost.\n\n We lost contact with the %(1)s after they ran out of fuel in the system %(2)s.'), (unicode, objID2Name), severity = MAJ)
addMsg(Const.MSG_FUEL_LOST_FLYING, N_('Fleet lost.\n\n We lost contact with the %(1)s after they ran out of fuel en route to the system %(2)s.'), (unicode, objID2Name), severity = MAJ)
addMsg(Const.MSG_QUEUE_TASK_ALLOTED, N_('Task alloted.\n\nGlobal queue \"%(1)s\" alloted %(2)s.'), (queueID2Name, techID2Name), severity = MAJ)
addMsg(Const.MSG_MINES_OWNER_RESULTS, N_('Our minefield triggered: HP / ships destroyed: %(3)s / %(4)s.\n\nForces of %(1)s triggered our minefield. Report:\n\n%(2)s'), (objIDList2Names,minesReport,int,int), severity = MAJ)
addMsg(Const.MSG_MINES_FLEET_RESULTS, N_('Hostile minefield triggered: HP / ships lost: %(1)s / %(2)s.\n\nOur fleet triggered enemy minefield, losing %(1)s HP resulting in destruction of %(2)s ships.'), (int,int), severity = MAJ)
# GNC
addMsg(Const.MSG_GNC_EMR_FORECAST, N_("EMR Forecast\n\nLevel of the electromagnetic radiation is believed to be about %(1)d %% of the average level for the next %(2)s turns"), (float2percent, res.formatTime), severity = MIN)
addMsg(Const.MSG_GNC_EMR_CURRENT_LVL, N_("EMR Forecast\n\nCurrent level of the electromagnetic radiation is about %(1)d %% of the average level."), (float2percent,), severity = MIN)
addMsg(Const.MSG_GNC_VOTING_COMING, N_("Elections!\n\nIt's %(1)s turns before elections! Don't hesitate and vote for the best commander!"), (res.formatTime,), severity = MAJ)
addMsg(Const.MSG_GNC_VOTING_NOWINNER, N_("Election results! Nobody won...\n\nThe results from the last elections have been published. Nobody was strong enough to be elected as a leader of our galaxy. Can we find such person another day?\n\nThe official election results follow:\n\n%(1)s\n\n"), (votes2Txt,), severity = MAJ)
addMsg(Const.MSG_GNC_VOTING_LEADER, N_("Election results! Leader elected!\n\nThe results from the last elections have been published. %(1)s has proved to be the most supported person and has been elected as our Leader. May be, %(1)s can become an Imperator one day.\n\nThe official election results follow:\n\n%(2)s\n\n"), (unicode, votes2Txt,), severity = MAJ)
addMsg(Const.MSG_GNC_VOTING_IMPERATOR, N_("Election results! Imperator elected!\n\nThe results from the last elections have been published. %(1)s has proved to be the most supported person and has been elected as our glorified Imperator. Congratulations - you proved to be the best of all of us!\n\nThe official election results follow:\n\n%(2)s\n\n"), (unicode, votes2Txt,), severity = MAJ)
addMsg(Const.MSG_GNC_GALAXY_FINISHED, N_("Galaxy %(2)s knows its winner - Imperator %(1)s\n\nToday the galaxy %(2)s has been united and the peace has been restored. Majority of commanders voted for Imperator %(1)s as their supreme leader. Congratulations, Imperator, you were brave and wise!\n\nMessage from imperator:\n%(3)s"), (unicode, unicode, impMsg), severity = MAJ)
addMsg(Const.MSG_GNC_GALAXY_GENERATOR, N_("Galaxy %(1)s generation is completed. Galaxy specifications:\n\n%(2)s"), (unicode, votes2Txt,), severity = INFO)
addMsg(Const.MSG_GNC_GALAXY_AUTO_FINISHED, N_("Galaxy %(1)s has ended\n\nToday the galaxy %(1)s has been automatically ended.\n\nReason:\n%(2)s"), (unicode, impMsg), severity = MAJ)
addMsg(Const.MSG_GNC_GALAXY_BRAWL_WON, N_("Galaxy %(1)s has ended\n\nAll hail to the conqueror! Galaxy %(1)s is finally on the brink of peace. After years of conquest, all opposition has been decimated and Sovereign %(2)s stands as the sole usurper of the galaxy."), (unicode, unicode), severity = MAJ)
addMsg(Const.MSG_GNC_GALAXY_COOP_WON, N_("Galaxy %(1)s has ended\n\nAll citizens of galaxy %(1)s rejoice. Peace is finally here!. After years of fending of attacks of barbaric empires, stalwart defence by %(2)s prevailed, and ensured further generations can live in prosperity."), (unicode, listing), severity = MAJ)
addMsg(Const.MSG_GNC_GALAXY_CREATED, N_("Boom of galactic civilizations is here\n\nDark ages are past, and new empires are reaching galactic age at breakneck speed and with fervor never seen before. True space race is predicted to start on %(1)s of universal time."), (delayTurns,), severity = MAJ)
# i18n
del N_
#
# Interface
#
def getMsgText(msgID, data):
msg, transform, severity = msgData.get(msgID, (None, None, None))
# create default messages
if not msg:
return _('ERROR\nMissing text for msg %d: %s') % (msgID, repr(data))
# there is message text -> create message
# force unicode
msg = _(msg)
if data == None:
return msg
try:
# tranform data
newData = {}
if not (type(data) == types.ListType or type(data) == types.TupleType):
data = (data,)
if transform:
index = 1
for tranFunc in transform:
newData[str(index)] = tranFunc(data[index - 1])
index += 1
else:
index = 1
for item in data:
newData[str(index)] = item
index += 1
text = msg % newData
except Exception, e:
# wrong arguments -> default message
log.warning("Error while formating message")
return _('ERROR\nWrong format for msg %d: %s\nException: %s: %s\nFormat: %s') % (msgID, repr(data), str(e.__class__), str(e), msg)
return text
def getMsgSeverity(msgID):
return msgData.get(msgID, (None, None, NONE))[2]
def getFullMessageText(message):
"""Gets full text of automaticaly generated message
If message has no data to generate, it returns empty
string.
"""
text = ""
if message.has_key("data"):
sourceID, msgID, locationID, turn, data = message["data"]
sev = getMsgSeverity(msgID)
currTurn = client.getTurn()
player = client.getPlayer()
# source
if sourceID != Const.OID_NONE and sourceID != player.oid:
obj = client.get(sourceID, noUpdate = 1, publicOnly = 1)
if obj:
if hasattr(obj,'customname') and obj.customname:
source = _('"%s"') % obj.customname
else:
source = getattr(obj, 'name', res.getUnknownName())
else:
source = _('N/A')
else:
source = _('-')
text = '%s%s\n' % (text, _("Source: %s") % source)
# location
if locationID != Const.OID_NONE:
obj = client.get(locationID, noUpdate = 1, publicOnly = 1)
location = getattr(obj, 'name', res.getUnknownName())
else:
location = _('-')
text = '%s%s\n' % (text, _("Location: %s") % location)
text = '%s%s\n' % (text, _("Severity: %s") % _(gdata.msgSeverity[sev]))
text = '%s%s\n' % (text, _("Time: %s [%s]") % (
res.formatTime(turn),
res.formatTime(turn - currTurn),
))
text = '%s%s\n' % (text, "")
text = '%s%s\n' % (text, getMsgText(msgID, data))
return text
|
ospaceteam/outerspace
|
client/osci/messages.py
|
Python
|
gpl-2.0
| 15,355
|
[
"Galaxy"
] |
57dce0236d2b0c804c2d8f10580ed92e43b19e8fc0e20d910778ac09a77de905
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2014 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
"""
Miscellaneous tools used by OpenERP.
"""
from functools import wraps
import cProfile
from contextlib import contextmanager
import subprocess
import logging
import os
import socket
import sys
import threading
import time
import werkzeug.utils
import zipfile
from collections import defaultdict, Mapping, OrderedDict
from datetime import datetime
from itertools import islice, izip, groupby
from lxml import etree
from which import which
from threading import local
import traceback
try:
from html2text import html2text
except ImportError:
html2text = None
from config import config
from cache import *
from .parse_version import parse_version
import openerp
# get_encodings, ustr and exception_to_unicode were originally from tools.misc.
# There are moved to loglevels until we refactor tools.
from openerp.loglevels import get_encodings, ustr, exception_to_unicode # noqa
_logger = logging.getLogger(__name__)
# List of etree._Element subclasses that we choose to ignore when parsing XML.
# We include the *Base ones just in case, currently they seem to be subclasses of the _* ones.
SKIPPED_ELEMENT_TYPES = (etree._Comment, etree._ProcessingInstruction, etree.CommentBase, etree.PIBase)
#----------------------------------------------------------
# Subprocesses
#----------------------------------------------------------
def find_in_path(name):
path = os.environ.get('PATH', os.defpath).split(os.pathsep)
if config.get('bin_path') and config['bin_path'] != 'None':
path.append(config['bin_path'])
try:
return which(name, path=os.pathsep.join(path))
except IOError:
return None
def _exec_pipe(prog, args, env=None):
cmd = (prog,) + args
# on win32, passing close_fds=True is not compatible
# with redirecting std[in/err/out]
close_fds = os.name=="posix"
pop = subprocess.Popen(cmd, bufsize=-1, stdin=subprocess.PIPE, stdout=subprocess.PIPE, close_fds=close_fds, env=env)
return pop.stdin, pop.stdout
def exec_command_pipe(name, *args):
prog = find_in_path(name)
if not prog:
raise Exception('Command `%s` not found.' % name)
return _exec_pipe(prog, args)
#----------------------------------------------------------
# Postgres subprocesses
#----------------------------------------------------------
def find_pg_tool(name):
path = None
if config['pg_path'] and config['pg_path'] != 'None':
path = config['pg_path']
try:
return which(name, path=path)
except IOError:
raise Exception('Command `%s` not found.' % name)
def exec_pg_environ():
""" On systems where pg_restore/pg_dump require an explicit password (i.e.
on Windows where TCP sockets are used), it is necessary to pass the
postgres user password in the PGPASSWORD environment variable or in a
special .pgpass file.
See also http://www.postgresql.org/docs/8.4/static/libpq-envars.html
"""
env = os.environ.copy()
if not env.get('PGPASSWORD') and openerp.tools.config['db_password']:
env['PGPASSWORD'] = openerp.tools.config['db_password']
return env
def exec_pg_command(name, *args):
prog = find_pg_tool(name)
env = exec_pg_environ()
with open(os.devnull) as dn:
args2 = (prog,) + args
rc = subprocess.call(args2, env=env, stdout=dn, stderr=subprocess.STDOUT)
if rc:
raise Exception('Postgres subprocess %s error %s' % (args2, rc))
def exec_pg_command_pipe(name, *args):
prog = find_pg_tool(name)
env = exec_pg_environ()
return _exec_pipe(prog, args, env)
#----------------------------------------------------------
# File paths
#----------------------------------------------------------
#file_path_root = os.getcwd()
#file_path_addons = os.path.join(file_path_root, 'addons')
def file_open(name, mode="r", subdir='addons', pathinfo=False):
"""Open a file from the OpenERP root, using a subdir folder.
Example::
>>> file_open('hr/report/timesheer.xsl')
>>> file_open('addons/hr/report/timesheet.xsl')
>>> file_open('../../base/report/rml_template.xsl', subdir='addons/hr/report', pathinfo=True)
@param name name of the file
@param mode file open mode
@param subdir subdirectory
@param pathinfo if True returns tuple (fileobject, filepath)
@return fileobject if pathinfo is False else (fileobject, filepath)
"""
import openerp.modules as addons
adps = addons.module.ad_paths
rtp = os.path.normcase(os.path.abspath(config['root_path']))
basename = name
if os.path.isabs(name):
# It is an absolute path
# Is it below 'addons_path' or 'root_path'?
name = os.path.normcase(os.path.normpath(name))
for root in adps + [rtp]:
root = os.path.normcase(os.path.normpath(root)) + os.sep
if name.startswith(root):
base = root.rstrip(os.sep)
name = name[len(base) + 1:]
break
else:
# It is outside the OpenERP root: skip zipfile lookup.
base, name = os.path.split(name)
return _fileopen(name, mode=mode, basedir=base, pathinfo=pathinfo, basename=basename)
if name.replace(os.sep, '/').startswith('addons/'):
subdir = 'addons'
name2 = name[7:]
elif subdir:
name = os.path.join(subdir, name)
if name.replace(os.sep, '/').startswith('addons/'):
subdir = 'addons'
name2 = name[7:]
else:
name2 = name
# First, try to locate in addons_path
if subdir:
for adp in adps:
try:
return _fileopen(name2, mode=mode, basedir=adp,
pathinfo=pathinfo, basename=basename)
except IOError:
pass
# Second, try to locate in root_path
return _fileopen(name, mode=mode, basedir=rtp, pathinfo=pathinfo, basename=basename)
def _fileopen(path, mode, basedir, pathinfo, basename=None):
name = os.path.normpath(os.path.join(basedir, path))
if basename is None:
basename = name
# Give higher priority to module directories, which is
# a more common case than zipped modules.
if os.path.isfile(name):
fo = open(name, mode)
if pathinfo:
return fo, name
return fo
# Support for loading modules in zipped form.
# This will not work for zipped modules that are sitting
# outside of known addons paths.
head = os.path.normpath(path)
zipname = False
while os.sep in head:
head, tail = os.path.split(head)
if not tail:
break
if zipname:
zipname = os.path.join(tail, zipname)
else:
zipname = tail
zpath = os.path.join(basedir, head + '.zip')
if zipfile.is_zipfile(zpath):
from cStringIO import StringIO
zfile = zipfile.ZipFile(zpath)
try:
fo = StringIO()
fo.write(zfile.read(os.path.join(
os.path.basename(head), zipname).replace(
os.sep, '/')))
fo.seek(0)
if pathinfo:
return fo, name
return fo
except Exception:
pass
# Not found
if name.endswith('.rml'):
raise IOError('Report %r doesn\'t exist or deleted' % basename)
raise IOError('File not found: %s' % basename)
#----------------------------------------------------------
# iterables
#----------------------------------------------------------
def flatten(list):
"""Flatten a list of elements into a uniqu list
Author: Christophe Simonis (christophe@tinyerp.com)
Examples::
>>> flatten(['a'])
['a']
>>> flatten('b')
['b']
>>> flatten( [] )
[]
>>> flatten( [[], [[]]] )
[]
>>> flatten( [[['a','b'], 'c'], 'd', ['e', [], 'f']] )
['a', 'b', 'c', 'd', 'e', 'f']
>>> t = (1,2,(3,), [4, 5, [6, [7], (8, 9), ([10, 11, (12, 13)]), [14, [], (15,)], []]])
>>> flatten(t)
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
"""
def isiterable(x):
return hasattr(x, "__iter__")
r = []
for e in list:
if isiterable(e):
map(r.append, flatten(e))
else:
r.append(e)
return r
def reverse_enumerate(l):
"""Like enumerate but in the other sens
Usage::
>>> a = ['a', 'b', 'c']
>>> it = reverse_enumerate(a)
>>> it.next()
(2, 'c')
>>> it.next()
(1, 'b')
>>> it.next()
(0, 'a')
>>> it.next()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
StopIteration
"""
return izip(xrange(len(l)-1, -1, -1), reversed(l))
def topological_sort(elems):
""" Return a list of elements sorted so that their dependencies are listed
before them in the result.
:param elems: specifies the elements to sort with their dependencies; it is
a dictionary like `{element: dependencies}` where `dependencies` is a
collection of elements that must appear before `element`. The elements
of `dependencies` are not required to appear in `elems`; they will
simply not appear in the result.
:returns: a list with the keys of `elems` sorted according to their
specification.
"""
# the algorithm is inspired by [Tarjan 1976],
# http://en.wikipedia.org/wiki/Topological_sorting#Algorithms
result = []
visited = set()
def visit(n):
if n not in visited:
visited.add(n)
if n in elems:
# first visit all dependencies of n, then append n to result
map(visit, elems[n])
result.append(n)
map(visit, elems)
return result
class UpdateableStr(local):
""" Class that stores an updateable string (used in wizards)
"""
def __init__(self, string=''):
self.string = string
def __str__(self):
return str(self.string)
def __repr__(self):
return str(self.string)
def __nonzero__(self):
return bool(self.string)
class UpdateableDict(local):
"""Stores an updateable dict to use in wizards
"""
def __init__(self, dict=None):
if dict is None:
dict = {}
self.dict = dict
def __str__(self):
return str(self.dict)
def __repr__(self):
return str(self.dict)
def clear(self):
return self.dict.clear()
def keys(self):
return self.dict.keys()
def __setitem__(self, i, y):
self.dict.__setitem__(i, y)
def __getitem__(self, i):
return self.dict.__getitem__(i)
def copy(self):
return self.dict.copy()
def iteritems(self):
return self.dict.iteritems()
def iterkeys(self):
return self.dict.iterkeys()
def itervalues(self):
return self.dict.itervalues()
def pop(self, k, d=None):
return self.dict.pop(k, d)
def popitem(self):
return self.dict.popitem()
def setdefault(self, k, d=None):
return self.dict.setdefault(k, d)
def update(self, E, **F):
return self.dict.update(E, F)
def values(self):
return self.dict.values()
def get(self, k, d=None):
return self.dict.get(k, d)
def has_key(self, k):
return self.dict.has_key(k)
def items(self):
return self.dict.items()
def __cmp__(self, y):
return self.dict.__cmp__(y)
def __contains__(self, k):
return self.dict.__contains__(k)
def __delitem__(self, y):
return self.dict.__delitem__(y)
def __eq__(self, y):
return self.dict.__eq__(y)
def __ge__(self, y):
return self.dict.__ge__(y)
def __gt__(self, y):
return self.dict.__gt__(y)
def __hash__(self):
return self.dict.__hash__()
def __iter__(self):
return self.dict.__iter__()
def __le__(self, y):
return self.dict.__le__(y)
def __len__(self):
return self.dict.__len__()
def __lt__(self, y):
return self.dict.__lt__(y)
def __ne__(self, y):
return self.dict.__ne__(y)
class currency(float):
""" Deprecate
.. warning::
Don't use ! Use res.currency.round()
"""
def __init__(self, value, accuracy=2, rounding=None):
if rounding is None:
rounding=10**-accuracy
self.rounding=rounding
self.accuracy=accuracy
def __new__(cls, value, accuracy=2, rounding=None):
return float.__new__(cls, round(value, accuracy))
#def __str__(self):
# display_value = int(self*(10**(-self.accuracy))/self.rounding)*self.rounding/(10**(-self.accuracy))
# return str(display_value)
def to_xml(s):
return s.replace('&','&').replace('<','<').replace('>','>')
def get_iso_codes(lang):
if lang.find('_') != -1:
if lang.split('_')[0] == lang.split('_')[1].lower():
lang = lang.split('_')[0]
return lang
ALL_LANGUAGES = {
'am_ET': u'Amharic / አምሃርኛ',
'ar_SY': u'Arabic / الْعَرَبيّة',
'bg_BG': u'Bulgarian / български език',
'bs_BA': u'Bosnian / bosanski jezik',
'ca_ES': u'Catalan / Català',
'cs_CZ': u'Czech / Čeština',
'da_DK': u'Danish / Dansk',
'de_DE': u'German / Deutsch',
'el_GR': u'Greek / Ελληνικά',
'en_AU': u'English (AU)',
'en_GB': u'English (UK)',
'en_US': u'English (US)',
'es_AR': u'Spanish (AR) / Español (AR)',
'es_BO': u'Spanish (BO) / Español (BO)',
'es_CL': u'Spanish (CL) / Español (CL)',
'es_CO': u'Spanish (CO) / Español (CO)',
'es_CR': u'Spanish (CR) / Español (CR)',
'es_DO': u'Spanish (DO) / Español (DO)',
'es_EC': u'Spanish (EC) / Español (EC)',
'es_ES': u'Spanish / Español',
'es_GT': u'Spanish (GT) / Español (GT)',
'es_MX': u'Spanish (MX) / Español (MX)',
'es_PA': u'Spanish (PA) / Español (PA)',
'es_PE': u'Spanish (PE) / Español (PE)',
'es_PY': u'Spanish (PY) / Español (PY)',
'es_UY': u'Spanish (UY) / Español (UY)',
'es_VE': u'Spanish (VE) / Español (VE)',
'et_EE': u'Estonian / Eesti keel',
'eu_ES': u'Basque / Euskara',
'fa_IR': u'Persian / فارس',
'fi_FI': u'Finnish / Suomi',
'fr_BE': u'French (BE) / Français (BE)',
'fr_CA': u'French (CA) / Français (CA)',
'fr_CH': u'French (CH) / Français (CH)',
'fr_CA': u'French (CA) / Français (CA)',
'fr_FR': u'French / Français',
'gl_ES': u'Galician / Galego',
'gu_IN': u'Gujarati / ગુજરાતી',
'he_IL': u'Hebrew / עִבְרִי',
'hi_IN': u'Hindi / हिंदी',
'hr_HR': u'Croatian / hrvatski jezik',
'hu_HU': u'Hungarian / Magyar',
'id_ID': u'Indonesian / Bahasa Indonesia',
'it_IT': u'Italian / Italiano',
'ja_JP': u'Japanese / 日本語',
'ka_GE': u'Georgian / ქართული ენა',
'kab_DZ': u'Kabyle / Taqbaylit',
'ko_KP': u'Korean (KP) / 한국어 (KP)',
'ko_KR': u'Korean (KR) / 한국어 (KR)',
'lo_LA': u'Lao / ພາສາລາວ',
'lt_LT': u'Lithuanian / Lietuvių kalba',
'lv_LV': u'Latvian / latviešu valoda',
'mk_MK': u'Macedonian / македонски јазик',
'mn_MN': u'Mongolian / монгол',
'nb_NO': u'Norwegian Bokmål / Norsk bokmål',
'nl_NL': u'Dutch / Nederlands',
'nl_BE': u'Dutch (BE) / Nederlands (BE)',
'pl_PL': u'Polish / Język polski',
'pt_BR': u'Portuguese (BR) / Português (BR)',
'pt_PT': u'Portuguese / Português',
'ro_RO': u'Romanian / română',
'ru_RU': u'Russian / русский язык',
'sl_SI': u'Slovenian / slovenščina',
'sk_SK': u'Slovak / Slovenský jazyk',
'sq_AL': u'Albanian / Shqip',
'sr_RS': u'Serbian (Cyrillic) / српски',
'sr@latin': u'Serbian (Latin) / srpski',
'sv_SE': u'Swedish / svenska',
'te_IN': u'Telugu / తెలుగు',
'tr_TR': u'Turkish / Türkçe',
'vi_VN': u'Vietnamese / Tiếng Việt',
'uk_UA': u'Ukrainian / українська',
'zh_CN': u'Chinese (CN) / 简体中文',
'zh_HK': u'Chinese (HK)',
'zh_TW': u'Chinese (TW) / 正體字',
'th_TH': u'Thai / ภาษาไทย',
}
def scan_languages():
""" Returns all languages supported by OpenERP for translation
:returns: a list of (lang_code, lang_name) pairs
:rtype: [(str, unicode)]
"""
return sorted(ALL_LANGUAGES.iteritems(), key=lambda k: k[1])
def get_user_companies(cr, user):
def _get_company_children(cr, ids):
if not ids:
return []
cr.execute('SELECT id FROM res_company WHERE parent_id IN %s', (tuple(ids),))
res = [x[0] for x in cr.fetchall()]
res.extend(_get_company_children(cr, res))
return res
cr.execute('SELECT company_id FROM res_users WHERE id=%s', (user,))
user_comp = cr.fetchone()[0]
if not user_comp:
return []
return [user_comp] + _get_company_children(cr, [user_comp])
def mod10r(number):
"""
Input number : account or invoice number
Output return: the same number completed with the recursive mod10
key
"""
codec=[0,9,4,6,8,2,7,1,3,5]
report = 0
result=""
for digit in number:
result += digit
if digit.isdigit():
report = codec[ (int(digit) + report) % 10 ]
return result + str((10 - report) % 10)
def human_size(sz):
"""
Return the size in a human readable format
"""
if not sz:
return False
units = ('bytes', 'Kb', 'Mb', 'Gb')
if isinstance(sz,basestring):
sz=len(sz)
s, i = float(sz), 0
while s >= 1024 and i < len(units)-1:
s /= 1024
i += 1
return "%0.2f %s" % (s, units[i])
def logged(f):
@wraps(f)
def wrapper(*args, **kwargs):
from pprint import pformat
vector = ['Call -> function: %r' % f]
for i, arg in enumerate(args):
vector.append(' arg %02d: %s' % (i, pformat(arg)))
for key, value in kwargs.items():
vector.append(' kwarg %10s: %s' % (key, pformat(value)))
timeb4 = time.time()
res = f(*args, **kwargs)
vector.append(' result: %s' % pformat(res))
vector.append(' time delta: %s' % (time.time() - timeb4))
_logger.debug('\n'.join(vector))
return res
return wrapper
class profile(object):
def __init__(self, fname=None):
self.fname = fname
def __call__(self, f):
@wraps(f)
def wrapper(*args, **kwargs):
profile = cProfile.Profile()
result = profile.runcall(f, *args, **kwargs)
profile.dump_stats(self.fname or ("%s.cprof" % (f.func_name,)))
return result
return wrapper
__icons_list = ['STOCK_ABOUT', 'STOCK_ADD', 'STOCK_APPLY', 'STOCK_BOLD',
'STOCK_CANCEL', 'STOCK_CDROM', 'STOCK_CLEAR', 'STOCK_CLOSE', 'STOCK_COLOR_PICKER',
'STOCK_CONNECT', 'STOCK_CONVERT', 'STOCK_COPY', 'STOCK_CUT', 'STOCK_DELETE',
'STOCK_DIALOG_AUTHENTICATION', 'STOCK_DIALOG_ERROR', 'STOCK_DIALOG_INFO',
'STOCK_DIALOG_QUESTION', 'STOCK_DIALOG_WARNING', 'STOCK_DIRECTORY', 'STOCK_DISCONNECT',
'STOCK_DND', 'STOCK_DND_MULTIPLE', 'STOCK_EDIT', 'STOCK_EXECUTE', 'STOCK_FILE',
'STOCK_FIND', 'STOCK_FIND_AND_REPLACE', 'STOCK_FLOPPY', 'STOCK_GOTO_BOTTOM',
'STOCK_GOTO_FIRST', 'STOCK_GOTO_LAST', 'STOCK_GOTO_TOP', 'STOCK_GO_BACK',
'STOCK_GO_DOWN', 'STOCK_GO_FORWARD', 'STOCK_GO_UP', 'STOCK_HARDDISK',
'STOCK_HELP', 'STOCK_HOME', 'STOCK_INDENT', 'STOCK_INDEX', 'STOCK_ITALIC',
'STOCK_JUMP_TO', 'STOCK_JUSTIFY_CENTER', 'STOCK_JUSTIFY_FILL',
'STOCK_JUSTIFY_LEFT', 'STOCK_JUSTIFY_RIGHT', 'STOCK_MEDIA_FORWARD',
'STOCK_MEDIA_NEXT', 'STOCK_MEDIA_PAUSE', 'STOCK_MEDIA_PLAY',
'STOCK_MEDIA_PREVIOUS', 'STOCK_MEDIA_RECORD', 'STOCK_MEDIA_REWIND',
'STOCK_MEDIA_STOP', 'STOCK_MISSING_IMAGE', 'STOCK_NETWORK', 'STOCK_NEW',
'STOCK_NO', 'STOCK_OK', 'STOCK_OPEN', 'STOCK_PASTE', 'STOCK_PREFERENCES',
'STOCK_PRINT', 'STOCK_PRINT_PREVIEW', 'STOCK_PROPERTIES', 'STOCK_QUIT',
'STOCK_REDO', 'STOCK_REFRESH', 'STOCK_REMOVE', 'STOCK_REVERT_TO_SAVED',
'STOCK_SAVE', 'STOCK_SAVE_AS', 'STOCK_SELECT_COLOR', 'STOCK_SELECT_FONT',
'STOCK_SORT_ASCENDING', 'STOCK_SORT_DESCENDING', 'STOCK_SPELL_CHECK',
'STOCK_STOP', 'STOCK_STRIKETHROUGH', 'STOCK_UNDELETE', 'STOCK_UNDERLINE',
'STOCK_UNDO', 'STOCK_UNINDENT', 'STOCK_YES', 'STOCK_ZOOM_100',
'STOCK_ZOOM_FIT', 'STOCK_ZOOM_IN', 'STOCK_ZOOM_OUT',
'terp-account', 'terp-crm', 'terp-mrp', 'terp-product', 'terp-purchase',
'terp-sale', 'terp-tools', 'terp-administration', 'terp-hr', 'terp-partner',
'terp-project', 'terp-report', 'terp-stock', 'terp-calendar', 'terp-graph',
'terp-check','terp-go-month','terp-go-year','terp-go-today','terp-document-new','terp-camera_test',
'terp-emblem-important','terp-gtk-media-pause','terp-gtk-stop','terp-gnome-cpu-frequency-applet+',
'terp-dialog-close','terp-gtk-jump-to-rtl','terp-gtk-jump-to-ltr','terp-accessories-archiver',
'terp-stock_align_left_24','terp-stock_effects-object-colorize','terp-go-home','terp-gtk-go-back-rtl',
'terp-gtk-go-back-ltr','terp-personal','terp-personal-','terp-personal+','terp-accessories-archiver-minus',
'terp-accessories-archiver+','terp-stock_symbol-selection','terp-call-start','terp-dolar',
'terp-face-plain','terp-folder-blue','terp-folder-green','terp-folder-orange','terp-folder-yellow',
'terp-gdu-smart-failing','terp-go-week','terp-gtk-select-all','terp-locked','terp-mail-forward',
'terp-mail-message-new','terp-mail-replied','terp-rating-rated','terp-stage','terp-stock_format-scientific',
'terp-dolar_ok!','terp-idea','terp-stock_format-default','terp-mail-','terp-mail_delete'
]
def icons(*a, **kw):
global __icons_list
return [(x, x) for x in __icons_list ]
def detect_ip_addr():
"""Try a very crude method to figure out a valid external
IP or hostname for the current machine. Don't rely on this
for binding to an interface, but it could be used as basis
for constructing a remote URL to the server.
"""
def _detect_ip_addr():
from array import array
from struct import pack, unpack
try:
import fcntl
except ImportError:
fcntl = None
ip_addr = None
if not fcntl: # not UNIX:
host = socket.gethostname()
ip_addr = socket.gethostbyname(host)
else: # UNIX:
# get all interfaces:
nbytes = 128 * 32
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
names = array('B', '\0' * nbytes)
#print 'names: ', names
outbytes = unpack('iL', fcntl.ioctl( s.fileno(), 0x8912, pack('iL', nbytes, names.buffer_info()[0])))[0]
namestr = names.tostring()
# try 64 bit kernel:
for i in range(0, outbytes, 40):
name = namestr[i:i+16].split('\0', 1)[0]
if name != 'lo':
ip_addr = socket.inet_ntoa(namestr[i+20:i+24])
break
# try 32 bit kernel:
if ip_addr is None:
ifaces = filter(None, [namestr[i:i+32].split('\0', 1)[0] for i in range(0, outbytes, 32)])
for ifname in [iface for iface in ifaces if iface != 'lo']:
ip_addr = socket.inet_ntoa(fcntl.ioctl(s.fileno(), 0x8915, pack('256s', ifname[:15]))[20:24])
break
return ip_addr or 'localhost'
try:
ip_addr = _detect_ip_addr()
except Exception:
ip_addr = 'localhost'
return ip_addr
# RATIONALE BEHIND TIMESTAMP CALCULATIONS AND TIMEZONE MANAGEMENT:
# The server side never does any timestamp calculation, always
# sends them in a naive (timezone agnostic) format supposed to be
# expressed within the server timezone, and expects the clients to
# provide timestamps in the server timezone as well.
# It stores all timestamps in the database in naive format as well,
# which also expresses the time in the server timezone.
# For this reason the server makes its timezone name available via the
# common/timezone_get() rpc method, which clients need to read
# to know the appropriate time offset to use when reading/writing
# times.
def get_win32_timezone():
"""Attempt to return the "standard name" of the current timezone on a win32 system.
@return the standard name of the current win32 timezone, or False if it cannot be found.
"""
res = False
if sys.platform == "win32":
try:
import _winreg
hklm = _winreg.ConnectRegistry(None,_winreg.HKEY_LOCAL_MACHINE)
current_tz_key = _winreg.OpenKey(hklm, r"SYSTEM\CurrentControlSet\Control\TimeZoneInformation", 0,_winreg.KEY_ALL_ACCESS)
res = str(_winreg.QueryValueEx(current_tz_key,"StandardName")[0]) # [0] is value, [1] is type code
_winreg.CloseKey(current_tz_key)
_winreg.CloseKey(hklm)
except Exception:
pass
return res
def detect_server_timezone():
"""Attempt to detect the timezone to use on the server side.
Defaults to UTC if no working timezone can be found.
@return the timezone identifier as expected by pytz.timezone.
"""
try:
import pytz
except Exception:
_logger.warning("Python pytz module is not available. "
"Timezone will be set to UTC by default.")
return 'UTC'
# Option 1: the configuration option (did not exist before, so no backwards compatibility issue)
# Option 2: to be backwards compatible with 5.0 or earlier, the value from time.tzname[0], but only if it is known to pytz
# Option 3: the environment variable TZ
sources = [ (config['timezone'], 'OpenERP configuration'),
(time.tzname[0], 'time.tzname'),
(os.environ.get('TZ',False),'TZ environment variable'), ]
# Option 4: OS-specific: /etc/timezone on Unix
if os.path.exists("/etc/timezone"):
tz_value = False
try:
f = open("/etc/timezone")
tz_value = f.read(128).strip()
except Exception:
pass
finally:
f.close()
sources.append((tz_value,"/etc/timezone file"))
# Option 5: timezone info from registry on Win32
if sys.platform == "win32":
# Timezone info is stored in windows registry.
# However this is not likely to work very well as the standard name
# of timezones in windows is rarely something that is known to pytz.
# But that's ok, it is always possible to use a config option to set
# it explicitly.
sources.append((get_win32_timezone(),"Windows Registry"))
for (value,source) in sources:
if value:
try:
tz = pytz.timezone(value)
_logger.info("Using timezone %s obtained from %s.", tz.zone, source)
return value
except pytz.UnknownTimeZoneError:
_logger.warning("The timezone specified in %s (%s) is invalid, ignoring it.", source, value)
_logger.warning("No valid timezone could be detected, using default UTC "
"timezone. You can specify it explicitly with option 'timezone' in "
"the server configuration.")
return 'UTC'
def get_server_timezone():
return "UTC"
DEFAULT_SERVER_DATE_FORMAT = "%Y-%m-%d"
DEFAULT_SERVER_TIME_FORMAT = "%H:%M:%S"
DEFAULT_SERVER_DATETIME_FORMAT = "%s %s" % (
DEFAULT_SERVER_DATE_FORMAT,
DEFAULT_SERVER_TIME_FORMAT)
# Python's strftime supports only the format directives
# that are available on the platform's libc, so in order to
# be cross-platform we map to the directives required by
# the C standard (1989 version), always available on platforms
# with a C standard implementation.
DATETIME_FORMATS_MAP = {
'%C': '', # century
'%D': '%m/%d/%Y', # modified %y->%Y
'%e': '%d',
'%E': '', # special modifier
'%F': '%Y-%m-%d',
'%g': '%Y', # modified %y->%Y
'%G': '%Y',
'%h': '%b',
'%k': '%H',
'%l': '%I',
'%n': '\n',
'%O': '', # special modifier
'%P': '%p',
'%R': '%H:%M',
'%r': '%I:%M:%S %p',
'%s': '', #num of seconds since epoch
'%T': '%H:%M:%S',
'%t': ' ', # tab
'%u': ' %w',
'%V': '%W',
'%y': '%Y', # Even if %y works, it's ambiguous, so we should use %Y
'%+': '%Y-%m-%d %H:%M:%S',
# %Z is a special case that causes 2 problems at least:
# - the timezone names we use (in res_user.context_tz) come
# from pytz, but not all these names are recognized by
# strptime(), so we cannot convert in both directions
# when such a timezone is selected and %Z is in the format
# - %Z is replaced by an empty string in strftime() when
# there is not tzinfo in a datetime value (e.g when the user
# did not pick a context_tz). The resulting string does not
# parse back if the format requires %Z.
# As a consequence, we strip it completely from format strings.
# The user can always have a look at the context_tz in
# preferences to check the timezone.
'%z': '',
'%Z': '',
}
POSIX_TO_LDML = {
'a': 'E',
'A': 'EEEE',
'b': 'MMM',
'B': 'MMMM',
#'c': '',
'd': 'dd',
'H': 'HH',
'I': 'hh',
'j': 'DDD',
'm': 'MM',
'M': 'mm',
'p': 'a',
'S': 'ss',
'U': 'w',
'w': 'e',
'W': 'w',
'y': 'yy',
'Y': 'yyyy',
# see comments above, and babel's format_datetime assumes an UTC timezone
# for naive datetime objects
#'z': 'Z',
#'Z': 'z',
}
def posix_to_ldml(fmt, locale):
""" Converts a posix/strftime pattern into an LDML date format pattern.
:param fmt: non-extended C89/C90 strftime pattern
:param locale: babel locale used for locale-specific conversions (e.g. %x and %X)
:return: unicode
"""
buf = []
pc = False
quoted = []
for c in fmt:
# LDML date format patterns uses letters, so letters must be quoted
if not pc and c.isalpha():
quoted.append(c if c != "'" else "''")
continue
if quoted:
buf.append("'")
buf.append(''.join(quoted))
buf.append("'")
quoted = []
if pc:
if c == '%': # escaped percent
buf.append('%')
elif c == 'x': # date format, short seems to match
buf.append(locale.date_formats['short'].pattern)
elif c == 'X': # time format, seems to include seconds. short does not
buf.append(locale.time_formats['medium'].pattern)
else: # look up format char in static mapping
buf.append(POSIX_TO_LDML[c])
pc = False
elif c == '%':
pc = True
else:
buf.append(c)
# flush anything remaining in quoted buffer
if quoted:
buf.append("'")
buf.append(''.join(quoted))
buf.append("'")
return ''.join(buf)
def server_to_local_timestamp(src_tstamp_str, src_format, dst_format, dst_tz_name,
tz_offset=True, ignore_unparsable_time=True):
"""
Convert a source timestamp string into a destination timestamp string, attempting to apply the
correct offset if both the server and local timezone are recognized, or no
offset at all if they aren't or if tz_offset is false (i.e. assuming they are both in the same TZ).
WARNING: This method is here to allow formatting dates correctly for inclusion in strings where
the client would not be able to format/offset it correctly. DO NOT use it for returning
date fields directly, these are supposed to be handled by the client!!
@param src_tstamp_str: the str value containing the timestamp in the server timezone.
@param src_format: the format to use when parsing the server timestamp.
@param dst_format: the format to use when formatting the resulting timestamp for the local/client timezone.
@param dst_tz_name: name of the destination timezone (such as the 'tz' value of the client context)
@param ignore_unparsable_time: if True, return False if src_tstamp_str cannot be parsed
using src_format or formatted using dst_format.
@return local/client formatted timestamp, expressed in the local/client timezone if possible
and if tz_offset is true, or src_tstamp_str if timezone offset could not be determined.
"""
if not src_tstamp_str:
return False
res = src_tstamp_str
if src_format and dst_format:
# find out server timezone
server_tz = get_server_timezone()
try:
# dt_value needs to be a datetime.datetime object (so no time.struct_time or mx.DateTime.DateTime here!)
dt_value = datetime.strptime(src_tstamp_str, src_format)
if tz_offset and dst_tz_name:
try:
import pytz
src_tz = pytz.timezone(server_tz)
dst_tz = pytz.timezone(dst_tz_name)
src_dt = src_tz.localize(dt_value, is_dst=True)
dt_value = src_dt.astimezone(dst_tz)
except Exception:
pass
res = dt_value.strftime(dst_format)
except Exception:
# Normal ways to end up here are if strptime or strftime failed
if not ignore_unparsable_time:
return False
return res
def split_every(n, iterable, piece_maker=tuple):
"""Splits an iterable into length-n pieces. The last piece will be shorter
if ``n`` does not evenly divide the iterable length.
@param ``piece_maker``: function to build the pieces
from the slices (tuple,list,...)
"""
iterator = iter(iterable)
piece = piece_maker(islice(iterator, n))
while piece:
yield piece
piece = piece_maker(islice(iterator, n))
if __name__ == '__main__':
import doctest
doctest.testmod()
class upload_data_thread(threading.Thread):
def __init__(self, email, data, type):
self.args = [('email',email),('type',type),('data',data)]
super(upload_data_thread,self).__init__()
def run(self):
try:
import urllib
args = urllib.urlencode(self.args)
fp = urllib.urlopen('http://www.openerp.com/scripts/survey.php', args)
fp.read()
fp.close()
except Exception:
pass
def upload_data(email, data, type='SURVEY'):
a = upload_data_thread(email, data, type)
a.start()
return True
def get_and_group_by_field(cr, uid, obj, ids, field, context=None):
""" Read the values of ``field´´ for the given ``ids´´ and group ids by value.
:param string field: name of the field we want to read and group by
:return: mapping of field values to the list of ids that have it
:rtype: dict
"""
res = {}
for record in obj.read(cr, uid, ids, [field], context=context):
key = record[field]
res.setdefault(key[0] if isinstance(key, tuple) else key, []).append(record['id'])
return res
def get_and_group_by_company(cr, uid, obj, ids, context=None):
return get_and_group_by_field(cr, uid, obj, ids, field='company_id', context=context)
# port of python 2.6's attrgetter with support for dotted notation
def resolve_attr(obj, attr):
for name in attr.split("."):
obj = getattr(obj, name)
return obj
def attrgetter(*items):
if len(items) == 1:
attr = items[0]
def g(obj):
return resolve_attr(obj, attr)
else:
def g(obj):
return tuple(resolve_attr(obj, attr) for attr in items)
return g
class unquote(str):
"""A subclass of str that implements repr() without enclosing quotation marks
or escaping, keeping the original string untouched. The name come from Lisp's unquote.
One of the uses for this is to preserve or insert bare variable names within dicts during eval()
of a dict's repr(). Use with care.
Some examples (notice that there are never quotes surrounding
the ``active_id`` name:
>>> unquote('active_id')
active_id
>>> d = {'test': unquote('active_id')}
>>> d
{'test': active_id}
>>> print d
{'test': active_id}
"""
def __repr__(self):
return self
class UnquoteEvalContext(defaultdict):
"""Defaultdict-based evaluation context that returns
an ``unquote`` string for any missing name used during
the evaluation.
Mostly useful for evaluating OpenERP domains/contexts that
may refer to names that are unknown at the time of eval,
so that when the context/domain is converted back to a string,
the original names are preserved.
**Warning**: using an ``UnquoteEvalContext`` as context for ``eval()`` or
``safe_eval()`` will shadow the builtins, which may cause other
failures, depending on what is evaluated.
Example (notice that ``section_id`` is preserved in the final
result) :
>>> context_str = "{'default_user_id': uid, 'default_section_id': section_id}"
>>> eval(context_str, UnquoteEvalContext(uid=1))
{'default_user_id': 1, 'default_section_id': section_id}
"""
def __init__(self, *args, **kwargs):
super(UnquoteEvalContext, self).__init__(None, *args, **kwargs)
def __missing__(self, key):
return unquote(key)
class mute_logger(object):
"""Temporary suppress the logging.
Can be used as context manager or decorator.
@mute_logger('openerp.plic.ploc')
def do_stuff():
blahblah()
with mute_logger('openerp.foo.bar'):
do_suff()
"""
def __init__(self, *loggers):
self.loggers = loggers
def filter(self, record):
return 0
def __enter__(self):
for logger in self.loggers:
assert isinstance(logger, basestring),\
"A logger name must be a string, got %s" % type(logger)
logging.getLogger(logger).addFilter(self)
def __exit__(self, exc_type=None, exc_val=None, exc_tb=None):
for logger in self.loggers:
logging.getLogger(logger).removeFilter(self)
def __call__(self, func):
@wraps(func)
def deco(*args, **kwargs):
with self:
return func(*args, **kwargs)
return deco
_ph = object()
class CountingStream(object):
""" Stream wrapper counting the number of element it has yielded. Similar
role to ``enumerate``, but for use when the iteration process of the stream
isn't fully under caller control (the stream can be iterated from multiple
points including within a library)
``start`` allows overriding the starting index (the index before the first
item is returned).
On each iteration (call to :meth:`~.next`), increases its :attr:`~.index`
by one.
.. attribute:: index
``int``, index of the last yielded element in the stream. If the stream
has ended, will give an index 1-past the stream
"""
def __init__(self, stream, start=-1):
self.stream = iter(stream)
self.index = start
self.stopped = False
def __iter__(self):
return self
def next(self):
if self.stopped: raise StopIteration()
self.index += 1
val = next(self.stream, _ph)
if val is _ph:
self.stopped = True
raise StopIteration()
return val
def stripped_sys_argv(*strip_args):
"""Return sys.argv with some arguments stripped, suitable for reexecution or subprocesses"""
strip_args = sorted(set(strip_args) | set(['-s', '--save', '-d', '--database', '-u', '--update', '-i', '--init']))
assert all(config.parser.has_option(s) for s in strip_args)
takes_value = dict((s, config.parser.get_option(s).takes_value()) for s in strip_args)
longs, shorts = list(tuple(y) for _, y in groupby(strip_args, lambda x: x.startswith('--')))
longs_eq = tuple(l + '=' for l in longs if takes_value[l])
args = sys.argv[:]
def strip(args, i):
return args[i].startswith(shorts) \
or args[i].startswith(longs_eq) or (args[i] in longs) \
or (i >= 1 and (args[i - 1] in strip_args) and takes_value[args[i - 1]])
return [x for i, x in enumerate(args) if not strip(args, i)]
class ConstantMapping(Mapping):
"""
An immutable mapping returning the provided value for every single key.
Useful for default value to methods
"""
__slots__ = ['_value']
def __init__(self, val):
self._value = val
def __len__(self):
"""
defaultdict updates its length for each individually requested key, is
that really useful?
"""
return 0
def __iter__(self):
"""
same as len, defaultdict udpates its iterable keyset with each key
requested, is there a point for this?
"""
return iter([])
def __getitem__(self, item):
return self._value
def dumpstacks(sig=None, frame=None):
""" Signal handler: dump a stack trace for each existing thread."""
code = []
def extract_stack(stack):
for filename, lineno, name, line in traceback.extract_stack(stack):
yield 'File: "%s", line %d, in %s' % (filename, lineno, name)
if line:
yield " %s" % (line.strip(),)
# code from http://stackoverflow.com/questions/132058/getting-stack-trace-from-a-running-python-application#answer-2569696
# modified for python 2.5 compatibility
threads_info = dict([(th.ident, {'name': th.name, 'uid': getattr(th, 'uid', 'n/a')})
for th in threading.enumerate()])
for threadId, stack in sys._current_frames().items():
thread_info = threads_info.get(threadId)
code.append("\n# Thread: %s (id:%s) (uid:%s)" %
(thread_info and thread_info['name'] or 'n/a',
threadId,
thread_info and thread_info['uid'] or 'n/a'))
for line in extract_stack(stack):
code.append(line)
if openerp.evented:
# code from http://stackoverflow.com/questions/12510648/in-gevent-how-can-i-dump-stack-traces-of-all-running-greenlets
import gc
from greenlet import greenlet
for ob in gc.get_objects():
if not isinstance(ob, greenlet) or not ob:
continue
code.append("\n# Greenlet: %r" % (ob,))
for line in extract_stack(ob.gr_frame):
code.append(line)
_logger.info("\n".join(code))
class frozendict(dict):
""" An implementation of an immutable dictionary. """
def __delitem__(self, key):
raise NotImplementedError("'__delitem__' not supported on frozendict")
def __setitem__(self, key, val):
raise NotImplementedError("'__setitem__' not supported on frozendict")
def clear(self):
raise NotImplementedError("'clear' not supported on frozendict")
def pop(self, key, default=None):
raise NotImplementedError("'pop' not supported on frozendict")
def popitem(self):
raise NotImplementedError("'popitem' not supported on frozendict")
def setdefault(self, key, default=None):
raise NotImplementedError("'setdefault' not supported on frozendict")
def update(self, *args, **kwargs):
raise NotImplementedError("'update' not supported on frozendict")
class OrderedSet(OrderedDict):
""" A simple collection that remembers the elements insertion order. """
def __init__(self, seq=()):
super(OrderedSet, self).__init__([(x, None) for x in seq])
def add(self, elem):
self[elem] = None
def discard(self, elem):
self.pop(elem, None)
@contextmanager
def ignore(*exc):
try:
yield
except exc:
pass
# Avoid DeprecationWarning while still remaining compatible with werkzeug pre-0.9
if parse_version(getattr(werkzeug, '__version__', '0.0')) < parse_version('0.9.0'):
def html_escape(text):
return werkzeug.utils.escape(text, quote=True)
else:
def html_escape(text):
return werkzeug.utils.escape(text)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
minhtuancn/odoo
|
openerp/tools/misc.py
|
Python
|
agpl-3.0
| 45,945
|
[
"VisIt"
] |
0557a3e4ce8aa7cba44c2807f1deb5b23b4169f634ddd055528b2c5681abcf3e
|
import cv2
import numpy
from PIL import Image
threshold = lambda *a: cv2.threshold(*a)[1]
def lut(frame, gamma_in=100, offset=0):
# gamma_in comes in as [0, 99], maps to [-2, 2]
# this lets the clipping happen more spectacularly
gamma = (gamma_in - 50) / 25.
lut = numpy.arange(256) * gamma
lut += abs(lut.min())
return lut[frame].astype(dtype=numpy.uint8)
def affine(frame, rotation, scale):
rot_deg = 360 - rotation * 3.6 # [0-99] -> [0->356], more to clockwise
scale = (scale + 1) / 50.
center = (frame.shape[1] / 2., frame.shape[0] / 2.)
rot_mat = cv2.getRotationMatrix2D(center, rot_deg, scale)
frame_ = cv2.warpAffine(frame, rot_mat, (frame.shape[1], frame.shape[0]))
return frame_
def param_lut(in_ll, in_ul, gamma, out_ll, out_ul):
a = numpy.zeros(256)
a[in_ll:in_ul] = (numpy.arange(in_ll, in_ul) ** gamma)
a *= (256 / a.max())
a[a>=in_ul] = 255
a = numpy.clip(a, out_ll, out_ul)
return a.astype(numpy.uint8)
def color(frame, *args):
""" apply a 3-channel lut transform (also switches b and r channels) """
frame_ = numpy.zeros(frame.shape)
rill, riul, rlin, roll, roup, gill, giul, glin, goll, goup, bill, biul, blin, boll, boup = args
frame_[:, :, 0] = param_lut(rill, riul, numpy.log(rlin) - 2.91, roll, roup)[frame[:, :, 0]]
frame_[:, :, 1] = param_lut(gill, giul, numpy.log(glin) - 2.91, goll, goup)[frame[:, :, 1]]
frame_[:, :, 2] = param_lut(bill, biul, numpy.log(blin) - 2.91, boll, boup)[frame[:, :, 2]]
return frame_.astype(numpy.uint8)
def invert(frame):
lut = numpy.arange(256, dtype=numpy.uint8)[::-1]
return lut[frame]
def hsv(frame, h=100, s=100, v=100):
frame_hsv = cv2.cvtColor(frame, cv2.COLOR_RGB2HSV).astype(numpy.float64)
frame_hsv[:, :, 0] *= h / 100
frame_hsv[:, :, 1] *= s / 100
frame_hsv[:, :, 2] *= v / 100
frame_ = cv2.cvtColor(frame_hsv.astype(numpy.uint8), cv2.COLOR_HSV2RGB)
return frame_
def lab(frame, l=100, a=100, b=100):
frame_lab = cv2.cvtColor(frame, cv2.COLOR_RGB2Lab).astype(numpy.float64)
frame_lab[:, :, 0] *= l / 100
frame_lab[:, :, 1] *= a / 100
frame_lab[:, :, 2] *= b / 100
frame_ = cv2.cvtColor(frame_lab.astype(numpy.uint8), cv2.COLOR_Lab2RGB)
return frame_.astype(numpy.uint8)
def roll_(frame, r, g, b):
frame[:, :, 0] = numpy.roll(frame[:, :, 0], r)
frame[:, :, 1] = numpy.roll(frame[:, :, 1], g)
frame[:, :, 2] = numpy.roll(frame[:, :, 2], b)
return frame
def flip_h(frame):
frame_ = numpy.fliplr(frame)
return frame_
def flip_v(frame):
frame_ = numpy.flipud(frame)
return frame_
def quantize(frame, num_colors=256):
# TODO add log-log scaling
m = (256 / num_colors) ** 6
lut = numpy.arange(256, dtype=numpy.uint8)
lut = lut // m
lut *= m
return lut[frame].astype(numpy.uint8)
def perspective(frame, pitch=0, yaw=0):
# fake perspective transforms
pass
def circles(frame):
img = cv2.medianBlur(cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY), 5)
circles = cv2.HoughCircles(img, cv2.HOUGH_GRADIENT, 1, 40,
param1=50, param2=30, minRadius=10, maxRadius=50)
if circles is None:
return frame
circles = numpy.uint16(numpy.around(circles))
for i in circles[0, :]:
# draw the outer circle
cv2.circle(frame, (i[0], i[1]), i[2], (0, 255, 0), 2)
return frame
def Median(frame, d):
d = d*2+1
for band in range(frame.shape[2]):
frame[:,:,band] = cv2.medianBlur(frame[:,:,band], d)
return frame
def gaussian(frame, sigma):
frame_ = cv2.GaussianBlur(frame, (0, 0), sigma)
return frame_
def oneminusgaussian(frame, sigma):
frame_ = cv2.Laplacian(frame, cv2.CV_8U, ksize=sigma*2 + 1)
return frame_
filenames = ['C:\\Users\\Ryan\\Documents\\Imaging_Science\\ImageRIT\\Server\\effects\\tiger', 'C:\\Users\\Ryan\\Documents\\Imaging Science\\ImageRIT\\Server\\effects\\emoj']
overlay = {key.split('\\')[-1]: cv2.imread(key + '.png', -1) for key in filenames}
def face(frame, identifier):
arr = overlay[identifier].copy()
red = arr[:,:,2].copy()
blue = arr[:,:,0].copy()
arr[:,:,0] = red
arr[:,:,2] = blue
gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
cascade = cv2.CascadeClassifier('C:\\Users\\Ryan\\Documents\\Imaging_Science\\ImageRIT\\Server\\effects\\haarcascade_frontalface_default.xml')
face_collection = cascade.detectMultiScale(gray, 1.5, 5)
back = Image.fromarray(frame)
for (x,y,w,h) in face_collection:
arr = cv2.resize(arr, (int(1.5*w),int(1.5*h)), interpolation = cv2.INTER_CUBIC)
fore = Image.fromarray(arr)
back.paste(fore, (x,y), fore)
return numpy.asarray(back)
def logpolar(frame):
frame_ = cv2.linearPolar(frame, (frame.shape[1]/2, frame.shape[0]/2), 300, cv2.INTER_NEAREST)
return frame_
config = {
"Binarize": {
"type": "bool-non-momentary",
"text": "Binarize",
"func": threshold,
"args": [127, 255, cv2.THRESH_BINARY]
},
"Gamma": {
"type": "int-dial",
"text": "Gamma",
"func": lut,
"args": [50]
},
"affine": {
"type": "",
"text": ["Scale", "Rotate"],
"func": affine,
"args": [0, 1]
},
"color": {
"func": color,
"args": [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
},
"invert": {
"func": invert,
"args": []
},
"hsv": {
"func": hsv,
"args": []
},
"lab": {
"func": lab,
"args": []
},
"roll": {
"func": roll_,
"args": []
},
"flip_h": {
"func": flip_h,
"args": []
},
"flip_v": {
"func": flip_v,
"args": []
},
"quantize": {
"func": quantize,
"args": []
},
"perspective": {
"func": perspective,
"args": []
},
"circles": {
"func": circles,
"args": []
},
"Median": {
"func": Median,
"args": []
},
"lowpass": {
"func": gaussian,
"args": []
},
"highpass": {
"func": oneminusgaussian,
"args": []
},
"face": {
"func": face,
"args": []
},
"logpolar":{
"func": logpolar,
"args": []
}
}
# be able to call in process with:
# button = config[id]
# button['func'](frame, *button['params'])
|
natedileas/ImageRIT
|
Server/effects/config.py
|
Python
|
gpl-3.0
| 6,477
|
[
"Gaussian"
] |
5a69f9f867a8a68b6e7da9660008c19f3d552b982961c2ea7cd4272d1daa7781
|
"""Boundary conditions for receptor based boundary conditions
including unbinding and rebinding.
"""
from collections import defaultdict
import logging
import itertools as it
import time
import numpy as np
from geomm.grouping import group_pair
from geomm.superimpose import superimpose
from geomm.rmsd import calc_rmsd
from geomm.centering import center_around
import mdtraj as mdj
from wepy.walker import WalkerState
from wepy.util.util import box_vectors_to_lengths_angles
from wepy.util.mdtraj import json_to_mdtraj_topology
from wepy.boundary_conditions.boundary import BoundaryConditions
class ReceptorBC(BoundaryConditions):
"""Abstract base class for ligand-receptor based boundary conditions.
Provides shared utilities for warping walkers to any number of
optionally weighted initial structures through a shared
`warp_walkers` method.
Non-abstract implementations of this class need only implement the
`_progress` method which should return a boolean signalling a
warping event and the dictionary-style warping record of the
progress for only a single walker. These records will be collated
into a single progress record across all walkers.
Additionally, the `_update_bc` method can be overriden to return
'BC' group records. That method should accept the arguments shown
in this ABC and return a list of dictionary-style 'BC' records.
Warping of walkers with multiple initial states will be done
according to a choice of initial states weighted on their weights,
if given.
"""
# records of boundary condition changes (sporadic)
BC_FIELDS = ()
BC_SHAPES = ()
BC_DTYPES = ()
BC_RECORD_FIELDS = ()
# warping fields are directly inherited
# progress towards the boundary conditions (continual)
PROGRESS_FIELDS = ()
PROGRESS_SHAPES = ()
PROGRESS_DTYPES = ()
PROGRESS_RECORD_FIELDS = ()
DISCONTINUITY_TARGET_IDXS = Ellipsis
"""Specifies which 'target_idxs' values are considered discontinuous targets.
Values are either integer indices, Ellipsis (indicating all
possible values are discontinuous), or None indicating no possible
value is discontinuous.
"""
def __init__(self, initial_states=None,
initial_weights=None,
ligand_idxs=None,
receptor_idxs=None,
**kwargs):
"""Base constructor for ReceptorBC.
This should be called immediately in the subclass `__init__`
method.
If the initial weights for each initial state are not given
uniform weights are assigned to them.
Arguments
---------
initial_states : list of objects implementing the State interface
The list of possible states that warped walkers will assume.
initial_weights : list of float, optional
List of normalized probabilities of the initial_states
provided. If not given, uniform probabilities will be
used.
ligand_idxs : arraylike of int
The indices of the atom positions in the state considered
the ligand.
receptor_idxs : arraylike of int
The indices of the atom positions in the state considered
the receptor.
Raises
------
AssertionError
If any of the following kwargs are not given:
initial_states, ligand_idxs, receptor_idxs.
"""
# make sure necessary inputs are given
assert initial_states is not None, "Must give a set of initial states"
assert ligand_idxs is not None, "Must give ligand indices"
assert receptor_idxs is not None, "Must give binding site indices"
self._initial_states = initial_states
self._ligand_idxs = ligand_idxs
self._receptor_idxs = receptor_idxs
# we want to choose initial states conditional on their
# initial probability if specified. If not specified assume
# assume uniform probabilities.
if initial_weights is None:
self._initial_weights = [1/len(initial_states) for _ in initial_states]
else:
self._initial_weights = initial_weights
@property
def initial_states(self):
"""The possible initial states warped walkers may assume."""
return self._initial_states
@property
def initial_weights(self):
"""The probabilities of each initial state being chosen during a warping."""
return self._initial_weights
@property
def ligand_idxs(self):
"""The indices of the atom positions in the state considered the ligand."""
return self._ligand_idxs
@property
def receptor_idxs(self):
"""The indices of the atom positions in the state considered the receptor."""
return self._receptor_idxs
def _progress(self, walker):
"""The method that must be implemented in non-abstract subclasses.
Should decide if a walker should be warped or not and what its
progress is regardless.
Parameters
----------
walker : object implementing the Walker interface
Returns
-------
to_warp : bool
Whether the walker should be warped or not.
progress_data : dict of str : value
Dictionary of the progress record group fields
for this walker alone.
"""
raise NotImplementedError
def _warp(self, walker):
"""Perform the warping of a walker.
Chooses an initial state to replace the walker's state with
according to it's given weight.
Returns a walker of the same type and weight.
Parameters
----------
walker : object implementing the Walker interface
Returns
-------
warped_walker : object implementing the Walker interface
The walker with the state after the warping. Weight should
be the same.
warping_data : dict of str : value
The dictionary-style 'WARPING' record for this
event. Excluding the walker index which is done in the main
`warp_walkers` method.
"""
# choose a state randomly from the set of initial states
target_idx = np.random.choice(range(len(self.initial_states)), 1,
p=self.initial_weights/np.sum(self.initial_weights))[0]
warped_state = self.initial_states[target_idx]
# set the initial state into a new walker object with the same weight
warped_walker = type(walker)(state=warped_state, weight=walker.weight)
# the data for the warp
warp_data = {'target_idx' : np.array([target_idx]),
'weight' : np.array([walker.weight])}
return warped_walker, warp_data
def _update_bc(self, new_walkers, warp_data, progress_data, cycle):
"""Perform an update to the boundary conditions.
No updates to the bc are ever done in this null
implementation.
Parameters
----------
new_walkers : list of walkers
The walkers after warping.
warp_data : list of dict
progress_data : dict
cycle : int
Returns
-------
bc_data : list of dict
The dictionary-style records for BC update events
"""
# do nothing by default
return []
def warp_walkers(self, walkers, cycle):
"""Test the progress of all the walkers, warp if required, and update
the boundary conditions.
Arguments
---------
walkers : list of objects implementing the Walker interface
cycle : int
The index of the cycle.
Returns
-------
new_walkers : list of objects implementing the Walker interface
The new set of walkers that may have been warped.
warp_data : list of dict of str : value
The dictionary-style records for WARPING update events
bc_data : list of dict of str : value
The dictionary-style records for BC update events
progress_data : dict of str : arraylike
The dictionary-style records for PROGRESS update events
"""
new_walkers = []
# sporadic, zero or many records per call
warp_data = []
bc_data = []
# continual, one record per call
progress_data = defaultdict(list)
# calculate progress data
all_progress_data = [self._progress(w) for w in walkers]
for walker_idx, walker in enumerate(walkers):
# unpack progress data
to_warp, walker_progress_data = all_progress_data[walker_idx]
# add that to the progress data record
for key, value in walker_progress_data.items():
progress_data[key].append(value)
# if the walker is meets the requirements for warping warp
# it
if to_warp:
# warp the walker
warped_walker, walker_warp_data = self._warp(walker)
# add the walker idx to the walker warp record
walker_warp_data['walker_idx'] = np.array([walker_idx])
# save warped_walker in the list of new walkers to return
new_walkers.append(warped_walker)
# save the instruction record of the walker
warp_data.append(walker_warp_data)
logging.info('WARP EVENT observed at {}'.format(cycle))
logging.info('Warped Walker Weight = {}'.format(
walker_warp_data['weight']))
# no warping so just return the original walker
else:
new_walkers.append(walker)
# consolidate the progress data to an array of a single
# feature vectors for the cycle
for key, value in progress_data.items():
progress_data[key] = value
# if the boundary conditions need to be updated given the
# cycle and state from warping perform that now and return any
# record data for that
bc_data = self._update_bc(new_walkers, warp_data, progress_data, cycle)
return new_walkers, warp_data, bc_data, progress_data
@classmethod
def warping_discontinuity(cls, warping_record):
"""Tests whether a warping record generated by this class is
discontinuous or not.
Parameters
----------
warping_record : tuple
The WARPING type record.
Returns
-------
is_discontinuous : bool
True if a discontinuous warp False if continuous.
"""
# if it is Ellipsis then all possible values are discontinuous
if cls.DISCONTINUITY_TARGET_IDXS is Ellipsis:
return True
# if it is None then all possible values are continuous
elif cls.DISCONTINUITY_TARGET_IDXS is None:
return False
# otherwise it will have a tuple of indices for the
# target_idxs that are discontinuous targets
elif warping_record[2] in cls.DISCONTINUITY_TARGET_IDXS:
return True
# otherwise it wasn't a discontinuous target
else:
return False
class RebindingBC(ReceptorBC):
"""Boundary condition for doing re-binding simulations of ligands to a
receptor.
Implements the ReceptorBC superclass.
This boundary condition will warp walkers to a number of initial
states whenever a walker becomes very close to the native (bound)
state.
Thus the choice of the 'initial_states' argument should be walkers
which are completely unbound (the choice of which are weighted by
'initial_weight') and the choice of 'native_state' should be of a
ligand bound to the receptor, e.g. X-ray crystallography or docked
structure.
The cutoff for the boundary is an RMSD of the walker to the native
state which is calculated by first aligning and superimposing the
entire structure according the atom indices specified in
'binding_site_idxs', and as the name suggests should correspond to
some approximation of the binding site of the ligand that occurs
in the native state. Then the raw RMSD of the native and walker
ligands is calculated. If this RMSD is less than the 'cutoff_rmsd'
argument the walker is warped.
PROGRESS is reported for each walker from this rmsd.
The BC records are never updated.
"""
# Records of boundary condition changes (sporadic)
BC_FIELDS = ReceptorBC.BC_FIELDS + ('native_rmsd_cutoff', )
"""The 'native_rmsd_cutoff' is the cutoff used to determine when
walkers have re-bound to the receptor, which is defined as the
RMSD of the ligand to the native ligand bound state, when the
binding sites are aligned and superimposed.
"""
BC_SHAPES = ReceptorBC.BC_SHAPES + ((1,), )
BC_DTYPES = ReceptorBC.BC_DTYPES + (np.float, )
BC_RECORD_FIELDS = ReceptorBC.BC_RECORD_FIELDS + ('native_rmsd_cutoff', )
# warping (sporadic)
WARPING_FIELDS = ReceptorBC.WARPING_FIELDS + ()
WARPING_SHAPES = ReceptorBC.WARPING_SHAPES + ()
WARPING_DTYPES = ReceptorBC.WARPING_DTYPES + ()
WARPING_RECORD_FIELDS = ReceptorBC.WARPING_RECORD_FIELDS + ()
# progress towards the boundary conditions (continual)
PROGRESS_FIELDS = ReceptorBC.PROGRESS_FIELDS + ('native_rmsd',)
PROGRESS_SHAPES = ReceptorBC.PROGRESS_SHAPES + (Ellipsis,)
PROGRESS_DTYPES = ReceptorBC.PROGRESS_DTYPES + (np.float,)
PROGRESS_RECORD_FIELDS = ReceptorBC.PROGRESS_RECORD_FIELDS + ('native_rmsd', )
"""Records for the state of this record group.
The 'native_rmsd' is the is the RMSD of the ligand to the native
ligand bound state, when the binding sites are aligned and
superimposed.
"""
def __init__(self, native_state=None,
cutoff_rmsd=0.2,
initial_states=None,
initial_weights=None,
ligand_idxs=None,
binding_site_idxs=None,
**kwargs):
"""Constructor for RebindingBC.
Arguments
---------
native_state : object implementing the State interface
The reference bound state. Will be automatically centered.
cutoff_rmsd : float
The cutoff RMSD for considering a walker bound.
initial_states : list of objects implementing the State interface
The list of possible states that warped walkers will assume.
initial_weights : list of float, optional
List of normalized probabilities of the initial_states
provided. If not given, uniform probabilities will be
used.
ligand_idxs : arraylike of int
The indices of the atom positions in the state considered
the ligand.
binding_site_idxs : arraylike of int
The indices of the atom positions in the state considered
the binding site.
Raises
------
AssertionError
If any of the following kwargs are not given:
native_state, initial_states, ligand_idxs, receptor_idxs.
"""
super().__init__(initial_states=initial_states,
initial_weights=initial_weights,
ligand_idxs=ligand_idxs,
receptor_idxs=binding_site_idxs
**kwargs)
# test inputs
assert native_state is not None, "Must give a native state"
assert type(cutoff_rmsd) is float
native_state_d = native_state.dict()
# save the native state and center it around it's binding site
native_state_d['positions'] = center_around(native_state['positions'], binding_site_idxs)
native_state = WalkerState(**native_state_d)
# save attributes
self._native_state = native_state
self._cutoff_rmsd = cutoff_rmsd
@property
def native_state(self):
"""The reference bound state to which walkers are compared."""
return self._native_state
@property
def cutoff_rmsd(self):
"""The cutoff RMSD for considering a walker bound."""
return self._cutoff_rmsd
@property
def binding_site_idxs(self):
"""The indices of the atom positions in the state considered the binding site."""
return self._receptor_idxs
def _progress(self, walker):
"""Calculate if the walker has bound and provide progress record.
Parameters
----------
walker : object implementing the Walker interface
Returns
-------
is_bound : bool
Whether the walker is unbound (warped) or not
progress_data : dict of str : value
Dictionary of the progress record group fields
for this walker alone.
"""
# first recenter the ligand and the receptor in the walker
box_lengths, box_angles = box_vectors_to_lengths_angles(walker.state['box_vectors'])
grouped_walker_pos = group_pair(walker.state['positions'], box_lengths,
self.binding_site_idxs, self.ligand_idxs)
# center the positions around the center of the binding site
centered_walker_pos = center_around(grouped_walker_pos, self.binding_site_idxs)
# superimpose the walker state positions over the native state
# matching the binding site indices only
sup_walker_pos, _, _ = superimpose(self.native_state['positions'], centered_walker_pos,
idxs=self.binding_site_idxs)
# calculate the rmsd of the walker ligand (superimposed
# according to the binding sites) to the native state ligand
native_rmsd = calc_rmsd(self.native_state['positions'], sup_walker_pos,
idxs=self.ligand_idxs)
# test to see if the ligand is re-bound
rebound = False
if native_rmsd <= self.cutoff_rmsd:
rebound = True
progress_data = {'native_rmsd' : native_rmsd}
return rebound, progress_data
class UnbindingBC(ReceptorBC):
"""Boundary condition for ligand unbinding.
Walkers will be warped (discontinuously) if all atoms in the
ligand are at least a certain distance away from the atoms in the
receptor (i.e. the min-min of ligand-receptor distances > cutoff).
Warping will replace the walker state with the initial state given
as a parameter to this class.
Also reports on the progress of that min-min for each walker.
"""
# records of boundary condition changes (sporadic)
BC_FIELDS = ReceptorBC.BC_FIELDS + ('boundary_distance', )
"""
Only occurs at the start of the simulation and just reports on the
min-min cutoff distance.
"""
BC_SHAPES = ReceptorBC.BC_SHAPES + ((1,), )
BC_DTYPES = ReceptorBC.BC_DTYPES + (np.float, )
BC_RECORD_FIELDS = ReceptorBC.BC_RECORD_FIELDS + ('boundary_distance', )
# warping (sporadic)
WARPING_FIELDS = ReceptorBC.WARPING_FIELDS + ()
WARPING_SHAPES = ReceptorBC.WARPING_SHAPES + ()
WARPING_DTYPES = ReceptorBC.WARPING_DTYPES + ()
WARPING_RECORD_FIELDS = ReceptorBC.WARPING_RECORD_FIELDS + ()
# progress record group
PROGRESS_FIELDS = ReceptorBC.PROGRESS_FIELDS + ('min_distances',)
"""
The 'min_distances' field reports on the min-min ligand-receptor
distance for each walker.
"""
PROGRESS_SHAPES = ReceptorBC.PROGRESS_SHAPES + (Ellipsis,)
PROGRESS_DTYPES = ReceptorBC.PROGRESS_DTYPES + (np.float,)
PROGRESS_RECORD_FIELDS = ReceptorBC.PROGRESS_RECORD_FIELDS + ('min_distances', )
def __init__(self, initial_state=None,
cutoff_distance=1.0,
topology=None,
ligand_idxs=None,
receptor_idxs=None,
periodic=True,
**kwargs):
"""Constructor for UnbindingBC class.
All the key-word arguments are necessary.
The 'initial_state' should be the initial state of your
simulation for proper non-equilibrium simulations.
Arguments
---------
initial_state : object implementing State interface
The state walkers will take on after unbinding.
cutoff_distance : float
The distance that specifies the boundary condition. When
the min-min ligand-receptor distance is less than this it
will be warped.
topology : str
A JSON string of topology.
ligand_idxs : list of int
Indices of the atoms in the topology that correspond to the ligands.
receptor_idxs : list of int
Indices of the atoms in the topology that correspond to the
receptor for the ligand.
Raises
------
AssertionError
If any of the following are not provided: initial_state, topology,
ligand_idxs, receptor_idxs
AssertionError
If the cutoff distance is not a float.
Warnings
--------
The 'initial_state' should be the initial state of your
simulation for proper non-equilibrium simulations.
Notes
-----
The topology argument is necessary due to an implementation
detail that uses mdtraj and may not be required in the future.
"""
# since the super class can handle multiple initial states we
# wrap the single initial state to a list.
super().__init__(initial_states=[initial_state],
ligand_idxs=ligand_idxs,
receptor_idxs=receptor_idxs,
**kwargs)
# test input
assert topology is not None, "Must give a reference topology"
assert type(cutoff_distance) is float
self._cutoff_distance = cutoff_distance
self._topology = topology
# convert the json topology to an mdtraj one
self._mdj_top = json_to_mdtraj_topology(self._topology)
# whether or not to use the periodic box vectors in the
# distance calculation
self._periodic = periodic
@property
def cutoff_distance(self):
"""The distance a ligand must be to be unbound."""
return self._cutoff_distance
@property
def topology(self):
"""JSON string topology of the system."""
return self._topology
def _calc_min_distance(self, walker):
"""Min-min distance for a walker.
Parameters
----------
walker : object implementing the Walker interface
Returns
-------
min_distance : float
"""
cell_lengths, cell_angles = box_vectors_to_lengths_angles(walker.state['box_vectors'])
t2 = time.time()
# make a traj out of it so we can calculate distances through
# the periodic boundary conditions
walker_traj = mdj.Trajectory(walker.state['positions'],
topology=self._mdj_top,
unitcell_lengths=cell_lengths,
unitcell_angles=cell_angles)
t3 = time.time()
# calculate the distances through periodic boundary conditions
# and get hte minimum distance
min_distance = np.min(mdj.compute_distances(walker_traj,
it.product(self.ligand_idxs,
self.receptor_idxs),
periodic=self._periodic)
)
t4 = time.time()
logging.info("Make a traj: {0}; Calc dists: {1}".format(t3-t2,t4-t3))
return min_distance
def _progress(self, walker):
"""Calculate whether a walker has unbound and also provide a
dictionary for a single walker in the progress records.
Parameters
----------
walker : object implementing the Walker interface
Returns
-------
is_unbound : bool
Whether the walker is unbound (warped) or not
progress_data : dict of str : value
Dictionary of the progress record group fields
for this walker alone.
"""
min_distance = self._calc_min_distance(walker)
# test to see if the ligand is unbound
unbound = False
if min_distance >= self._cutoff_distance:
unbound = True
progress_data = {'min_distances' : min_distance}
return unbound, progress_data
def _update_bc(self, new_walkers, warp_data, progress_data, cycle):
"""Perform an update to the boundary conditions.
This is only used on the first cycle to keep a record of the
cutoff parameter.
Parameters
----------
new_walkers : list of walkers
The walkers after warping.
warp_data : list of dict
progress_data : dict
cycle : int
Returns
-------
bc_data : list of dict
The dictionary-style records for BC update events
"""
# Only report a record on
# the first cycle which gives the distance at which walkers
# are warped
if cycle == 0:
return [{'boundary_distance' : np.array([self._cutoff_distance]),},]
else:
return []
|
ADicksonLab/wepy
|
src/wepy/boundary_conditions/receptor.py
|
Python
|
mit
| 25,643
|
[
"MDTraj"
] |
fd419c589ef01a3ecb8042be60af22cbac55bafaa7f422db06c088fa9b2a1bd1
|
# -*- encoding: utf-8 -*-
'''
Created on Nov 12, 2014
@author: antonio
'''
import os, shlex, subprocess
class LatexReport(object):
'''
Clase representa un informe
'''
def __init__(self, title, author):
try:
os.mkdir("plots/") #Creo, si no esta, la carpeta en la que guardar las figuras
except:
pass
self.latexReport = '''
\\documentclass[10pt,a4paper]{report}
\\usepackage[utf8]{inputenc}
\\usepackage[spanish]{babel}
\\usepackage{amsmath}
\\usepackage{amsfonts}
\\usepackage{amssymb}
\\usepackage{graphicx}
\\usepackage{multirow}
\\author{%s}
\\title{%s}
\\makeindex
\\begin{document}
\\maketitle
\\pagebreak
\\tableofcontents
\\pagebreak
'''%(author, title)
def addSection(self, sectionTitle):
self.latexReport += '''\\section{%s}'''%(sectionTitle)
def addContent(self, content):
self.latexReport+=content;
def createPDF(self, fname="report"):
self.latexReport += '''\\end{document}'''
with open('%s.tex'%(fname),'w') as f:
f.write(self.latexReport.encode('utf8')) #Escribo el archivo .tex
proc=subprocess.Popen(shlex.split('pdflatex %s.tex'%(fname))) #Genero el pdf del informe
proc.communicate()
proc=subprocess.Popen(shlex.split('rm %s.aux %s.idx %s.log %s.tex %s.toc'%(fname,fname,fname,fname,fname)))
proc=subprocess.Popen(shlex.split('evince %s.pdf'%(fname)))
proc.communicate()
class LatexGenerator(object):
@staticmethod
def generatePlot(self):
return "hola k ase"
@staticmethod
def generateTable(data, titles):
lTable = "\\begin{center}\\begin{tabular}{"
for i in xrange(len(data[0])):
lTable += "|c"
lTable += "|}\\hline"
nRowTitle = len(data[0])/len(titles)
for i in xrange(len(titles)):
if i == 0:
lTable += "\\multicolumn{%d}{|c|}{%s}"%(nRowTitle, titles[i])
else:
lTable += "& \\multicolumn{%d}{|c|}{%s}"%(nRowTitle, titles[i])
lTable += "\\\\ \\hline "
for i, row in enumerate(data):
for j, cell in enumerate(row):
if j == 0:
lTable += "%s "%(cell)
else:
lTable += "& %s"%(cell)
lTable += "\\\\ \\hline "
lTable += "\\end{tabular}\\end{center}"
return lTable
# Some code to debug
if __name__ == '__main__':
print LatexGenerator.generateTable([[1,2,3,4], [1,2,3,4]], ["titulo1", "titulo2"])
|
aydevosotros/TFG-AntonioMolina
|
TFG/benchmarks/ReportGenerator.py
|
Python
|
gpl-2.0
| 2,740
|
[
"ASE"
] |
5dad94383e76134f89bf6b330c56262f7562755ff2213810498a6eada211e8a1
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2020, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
"""Run data tests for cclib."""
import importlib
import logging
import os
import sys
import unittest
import cclib
__filedir__ = os.path.realpath(os.path.dirname(__file__))
# We need this in Python3 for importing things from the same directory
# within the unit test files.
sys.path.insert(1, os.path.join(__filedir__, 'data'))
parser_names = [
"ADF", "DALTON", "FChk", "GAMESS", "GAMESSUK", "Gaussian", "Jaguar",
"Molpro", "Molcas", "MOPAC", "NWChem", "ORCA", "Psi4", "QChem",
"Turbomole",
]
all_parsers = {name: getattr(cclib.parser, name) for name in parser_names}
# Not used currently, but keeping in a list to keep track of which parsers
# are in the legacy bin.
legacy_parser_names = ["Psi3"]
module_names = [
"SP", "SPun", "GeoOpt", "Basis", "Core", # Basic calculations.
"MP", "CC", "CI", "TD", "TDun", # Post-SCF calculations.
"BOMD", "NMR", "Polar", "Scan", "vib" # Other property calculations.
]
all_modules = {tn: importlib.import_module('.data.test' + tn, package='test')
for tn in module_names}
def gettestdata():
"""Return a dict of the test file data."""
testdatadir = os.path.dirname(os.path.realpath(__file__))
with open(testdatadir + '/testdata') as testdatafile:
lines = testdatafile.readlines()
# Remove blank lines and those starting with '#'.
lines = [line for line in lines if (line.strip() and line[0] != '#')]
# Remove comment at end of lines (everything after a '#').
lines = [line.split('#')[0] for line in lines]
# Transform remaining lines into dictionaries.
cols = [line.split() for line in lines]
labels = ('module', 'parser', 'class', 'subdir', 'files')
testdata = [dict(zip(labels, (c[0], c[1], c[2], c[3], c[4:]))) for c in cols]
return testdata
def get_program_dir(parser_name):
"""Return a directory name given a parser name.
In at least one case (GAMESS-UK) the directory is named differently.
"""
if parser_name == "GAMESSUK":
return "GAMESS-UK"
return parser_name
def getdatafile(parser, subdir, files, stream=None, loglevel=logging.ERROR, datatype=None):
"""Returns a parsed logfile.
Inputs:
parser - a logfile parser class (subclass of LogFile)
subdir - subdirectory containing data files (program version)
files - data filename(s)
stream - where to log to (sys.stdout by default)
loglevel - what level to log at
datatype - ccData or child class
Outputs:
data - the resulting data object
logfile - the parser object used for parsing
"""
# Convert any string into the parser object we will be using.
if isinstance(parser, str):
parser = all_parsers[parser]
datadir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "data"))
programdir = os.path.join(get_program_dir(parser.__name__), subdir)
inputs = [os.path.join(datadir, programdir, fn) for fn in files]
# We should be able to pass a list of length one here, but for some reason
# this does not work with some parsers and we get errors.
if len(inputs) == 1:
inputs = inputs[0]
stream = stream or sys.stdout
logfile = parser(inputs, logstream=stream, loglevel=loglevel,
datatype=datatype or cclib.parser.data.ccData)
data = logfile.parse()
return data, logfile
def ccdata_getattribute_with_coverage(self, attr):
"""A bookkeeping version of __getattribute__ for ccData objects."""
if attr != '_attrlist' and attr in self._attrlist:
if not hasattr(self, 'coverage'):
self.coverage = {}
self.coverage[attr] = self.coverage.get(attr, 0) + 1
return object.__getattribute__(self, attr)
class DataSuite:
"""Suite containing data (logfile) tests in cclib.
This is supposed to represent a single run of the entire data test suite in cclib or
a subset of it. The main functions are to load data, run test cases in the data/
subdirectory, and do some basic bookkeeping.
"""
def __init__(self, parsers, modules, terse=False, silent=False, loglevel=logging.ERROR, stream=sys.stdout):
self.parsers = parsers
self.modules = modules
self.terse = terse or silent
self.silent = silent
self.loglevel = loglevel
self.stream = stream
# Load the test data and filter with parsers and modules.
self.testdata = gettestdata()
self.testdata = [td for td in self.testdata if td['parser'] in self.parsers]
self.testdata = [td for td in self.testdata if td['module'] in self.modules]
# We want to gather the unit tests and results in several lists/dicts,
# in order to easily generate summaries at the end.
self.errors = []
self.failures = []
self.alltests = []
self.perpackage = {p: [0, 0, 0, 0] for p in self.parsers}
def testall(self):
"""Run all unittests in all modules.
Run unit tests for all or a subset of parsers and modules. Arguments:
stream - stream used for all output
"""
stream_test = self.stream
if self.terse:
devnull = open(os.devnull, 'w')
stream_test = devnull
for td in self.testdata:
module = self.modules[td['module']]
parser = self.parsers[td['parser']]
test = getattr(module, td['class'])
description = ''
if not self.silent:
print("", file=stream_test)
description = "%s/%s: %s" % (td['subdir'], ",".join(td['files']), test.__doc__)
print("*** %s ***" % description, file=self.stream)
test.data, test.logfile = getdatafile(
parser, td['subdir'], td['files'], stream=self.stream, loglevel=self.loglevel,
datatype=test.datatype if hasattr(test, 'datatype') else None
)
# By overriding __getattribute__ temporarily with a custom method, we collect
# coverage information for data attributes while the tests are run. This slightly
# hacky approach is very convenient since it is self-contained and we don't
# need to worry about it when writing the actual test cases.
test.data.__class__.__getattribute__ = ccdata_getattribute_with_coverage
# Here we actually run the tests for this line in testdata.
myunittest = unittest.makeSuite(test)
results = unittest.TextTestRunner(stream=stream_test, verbosity=2).run(myunittest)
# We don't want to collect coverage stats beyond this point, so set __getattribute__
# back to its original value. Note that we are setting the class method.
test.data.__class__.__getattribute__ = object.__getattribute__
self.perpackage[td['parser']][0] += results.testsRun
self.perpackage[td['parser']][1] += len(results.errors)
self.perpackage[td['parser']][2] += len(results.failures)
self.perpackage[td['parser']][3] += len(getattr(results, 'skipped', []))
self.alltests.append(test)
self.errors.extend([description + "\n" + "".join(map(str, e)) for e in results.errors])
self.failures.extend([description + "\n" + "".join(map(str, f)) for f in results.failures])
if self.terse:
devnull.close()
return self.errors or self.failures
def summary(self):
"""Prints a summary of the suite after it has been run."""
if self.errors:
print("\n********* SUMMARY OF ERRORS *********\n", file=self.stream)
print("\n".join(self.errors), file=self.stream)
if self.failures:
print("\n********* SUMMARY OF FAILURES *********\n", file=self.stream)
print("\n".join(self.failures), file=self.stream)
print("\n********* SUMMARY PER PACKAGE ****************", file=self.stream)
names = sorted(self.perpackage.keys())
total = [0, 0, 0, 0]
print(" "*14, "\t".join(["Total", "Passed", "Failed", "Errors", "Skipped"]), file=self.stream)
fmt = "%3d\t%3d\t%3d\t%3d\t%3d"
for name in names:
l = self.perpackage[name]
args = (l[0], l[0]-l[1]-l[2]-l[3], l[2], l[1], l[3])
print(name.ljust(15), fmt % args, file=self.stream)
for i in range(4):
total[i] += l[i]
print("\n********* SUMMARY OF EVERYTHING **************", file=self.stream)
print("TOTAL: %d\tPASSED: %d\tFAILED: %d\tERRORS: %d\tSKIPPED: %d" \
%(total[0], total[0]-(total[1]+total[2]+total[3]), total[2], total[1], total[3]), file=self.stream)
def visualtests(self, stream=sys.stdout):
"""These are not formal tests -- but they should be eyeballed."""
parsers_to_test = {
'ADF2013.01' : getdatafile('ADF', "basicADF2013.01", ["dvb_gopt.adfout"])[0],
'DALTON2015' : getdatafile('DALTON', "basicDALTON-2015", ["dvb_gopt_ks.out"])[0],
'Firefly8.0' : getdatafile('GAMESS', "basicFirefly8.0", ["dvb_gopt_a.out"])[0],
'Gaussian16' : getdatafile('Gaussian', "basicGaussian16", ["dvb_gopt.out"])[0],
'GAMESS-US2018' : getdatafile('GAMESS', "basicGAMESS-US2018", ["dvb_gopt_a.out"])[0],
'Jaguar8.0' : getdatafile('Jaguar', "basicJaguar8.3", ["dvb_gopt_ks.out"])[0],
'Molpro2012' : getdatafile('Molpro', "basicMolpro2012", ["dvb_gopt.out", "dvb_gopt.log"])[0],
# Note that it doesn't make sense to put MOPAC here, as it
# is a semiempirical-only program.
'NWChem6.5' : getdatafile('NWChem', "basicNWChem6.5", ["dvb_gopt_ks.out"])[0],
'ORCA4.2' : getdatafile('ORCA', "basicORCA4.2", ["dvb_gopt.out"])[0],
'Psi4-1.3.1' : getdatafile('Psi4', "basicPsi4-1.3.1", ["dvb_gopt_rks.out"])[0],
'QChem5.4' : getdatafile('QChem', "basicQChem5.4", ["dvb_gopt.out"])[0],
}
parser_names = sorted(parsers_to_test.keys())
output = [parsers_to_test[pn] for pn in parser_names]
print("\n*** Visual tests ***", file=self.stream)
print("MO energies of optimised dvb", file=self.stream)
print(" ", "".join(["%-12s" % pn for pn in parser_names]), file=self.stream)
print("HOMO", " ".join(["%+9.4f" % out.moenergies[0][out.homos[0]] for out in output]), file=self.stream)
print("LUMO", " ".join(["%+9.4f" % out.moenergies[0][out.homos[0]+1] for out in output]), file=self.stream)
print("H-L ", " ".join(["%9.4f" % (out.moenergies[0][out.homos[0]+1]-out.moenergies[0][out.homos[0]],) for out in output]), file=self.stream)
def test_all(parsers, modules, terse, silent, loglevel, summary, visual_tests):
parsers = parsers or all_parsers
modules = modules or all_modules
data_suite = DataSuite(parsers, modules, terse=terse, silent=silent, loglevel=loglevel)
errors_or_failures = data_suite.testall()
if summary and not silent:
data_suite.summary()
if visual_tests and not silent:
data_suite.visualtests()
if errors_or_failures:
sys.exit(1)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--debug", action="store_true")
parser.add_argument("--terse", action="store_true")
parser.add_argument("--silent", action="store_true")
parser.add_argument(
"parser_or_module",
nargs="*",
help="Limit the test to the packages/parsers passed as arguments. "
"No arguments implies all parsers."
)
args = parser.parse_args()
loglevel = logging.DEBUG if args.debug else logging.ERROR
# No matching parsers/modules implies all of them.
parsers = {p: all_parsers[p] for p in parser_names
if p in args.parser_or_module} or None
modules = {m: all_modules[m] for m in module_names
if m in args.parser_or_module} or None
test_all(parsers, modules, terse=args.terse, silent=args.silent, loglevel=loglevel, summary=True, visual_tests=True)
|
cclib/cclib
|
test/test_data.py
|
Python
|
bsd-3-clause
| 12,415
|
[
"ADF",
"Dalton",
"GAMESS",
"Gaussian",
"Jaguar",
"MOLCAS",
"MOPAC",
"Molpro",
"NWChem",
"ORCA",
"Psi4",
"TURBOMOLE",
"cclib"
] |
d8bf2e55d30df48882957bfd3ff31baee0e16ae4e50d21ddc107a8cbccf03254
|
#
# Copyright (c) 2016 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import, print_function
from unittest.case import expectedFailure
from unittest.case import skip
from commoncode import functional
from licensedcode import index
from licensedcode.match import get_texts
from commoncode.text import python_safe_name
"""
License test utilities.
"""
def make_license_test_function(
expected_licenses, test_file, test_data_file, test_name,
detect_negative=True, min_score=0,
expected_failure=False,
# if not False, a reason string must be provided
skip_test=False,
# if True detailed traces including matched texts will be returned
trace_text=False):
"""
Build and return a test function closing on tests arguments.
"""
if not isinstance(expected_licenses, list):
expected_licenses = [expected_licenses]
def closure_test_function(*args, **kwargs):
idx = index.get_index()
matches = idx.match(location=test_file, min_score=min_score,
# if negative, do not detect negative rules when testing negative rules
detect_negative=detect_negative)
if not matches:
matches = []
# TODO: we should expect matches properly, not with a grab bag of flat license keys
# flattened list of all detected license keys across all matches.
detected_licenses = functional.flatten(map(unicode, match.rule.licenses) for match in matches)
try:
if not detect_negative:
# we skipped negative detection for a negative rule
# we just want to ensure that the rule was matched proper
assert matches and not expected_licenses and not detected_licenses
else:
assert expected_licenses == detected_licenses
except:
# On failure, we compare against more result data to get additional
# failure details, including the test_file and full match details
match_failure_trace = []
if trace_text:
for match in matches:
qtext, itext = get_texts(match, location=test_file, idx=idx)
rule_text_file = match.rule.text_file
rule_data_file = match.rule.data_file
match_failure_trace.extend(['', '',
'======= MATCH ====', match,
'======= Matched Query Text for:',
'file://{test_file}'.format(**locals())
])
if test_data_file:
match_failure_trace.append('file://{test_data_file}'.format(**locals()))
match_failure_trace.append(qtext.splitlines())
match_failure_trace.extend(['',
'======= Matched Rule Text for:'
'file://{rule_text_file}'.format(**locals()),
'file://{rule_data_file}'.format(**locals()),
itext.splitlines(),
])
# this assert will always fail and provide a detailed failure trace
assert expected_licenses == detected_licenses + [test_name, 'test file: file://' + test_file] + match_failure_trace
closure_test_function.__name__ = test_name
closure_test_function.funcname = test_name
if skip_test:
skipper = skip(repr(skip_test))
closure_test_function = skipper(closure_test_function)
if expected_failure:
closure_test_function = expectedFailure(closure_test_function)
return closure_test_function
def print_matched_texts(match, location=None, query_string=None, idx=None):
"""
Convenience function to print matched texts for tracing and debugging tests.
"""
qtext, itext = get_texts(match, location=location, query_string=query_string, idx=idx)
print()
print('Matched qtext:')
print(qtext)
print()
print('Matched itext:')
print(itext)
def check_license(location=None, query_string=None, expected=(), test_data_dir=None):
if query_string:
idx = index.get_index()
matches = idx.match(location=location, query_string=query_string)
results = functional.flatten(map(unicode, match.rule.licenses) for match in matches)
assert expected == results
else:
test_name = python_safe_name('test_' + location.replace(test_data_dir, ''))
tester = make_license_test_function(
expected_licenses=expected, test_file=location,
test_data_file=None, test_name=test_name,
trace_text=True)
tester()
|
yasharmaster/scancode-toolkit
|
tests/licensedcode/license_test_utils.py
|
Python
|
apache-2.0
| 6,019
|
[
"VisIt"
] |
6450e43d9e58feb8a0c773e62f1a7bc806ceaca2870ea077962b783be4086ce1
|
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import pytest
from unittest import TestCase
import numpy as np
import tensorflow as tf
from bigdl.orca import init_orca_context, stop_orca_context
from bigdl.orca.learn.tf2 import Estimator
NUM_TRAIN_SAMPLES = 1000
NUM_TEST_SAMPLES = 400
def linear_dataset(a=2, size=1000):
x = np.random.rand(size)
y = x / 2
x = x.reshape((-1, 1))
y = y.reshape((-1, 1))
return x, y
def create_train_datasets(config, batch_size):
x_train, y_train = linear_dataset(size=NUM_TRAIN_SAMPLES)
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_dataset = train_dataset.shuffle(NUM_TRAIN_SAMPLES).batch(
batch_size)
return train_dataset
def create_test_dataset(config, batch_size):
x_test, y_test = linear_dataset(size=NUM_TEST_SAMPLES)
test_dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test))
test_dataset = test_dataset.batch(batch_size)
return test_dataset
def simple_model(config):
model = tf.keras.models.Sequential([tf.keras.layers.Dense(10, input_shape=(1,)),
tf.keras.layers.Dense(1)])
return model
def compile_args(config):
if "lr" in config:
lr = config["lr"]
else:
lr = 1e-3
args = {
"optimizer": tf.keras.optimizers.SGD(lr),
"loss": "mean_squared_error",
"metrics": ["mean_squared_error"]
}
return args
def model_creator(config):
model = simple_model(config)
model.compile(**compile_args(config))
return model
class TestTF2Estimator(TestCase):
def setUp(self):
init_orca_context(runtime="ray", address="localhost:6379")
def tearDown(self):
stop_orca_context()
def test_train(self):
estimator = Estimator.from_keras(model_creator=model_creator,
verbose=True,
config=None,
backend="tf2",
workers_per_node=2)
start_stats = estimator.evaluate(create_test_dataset, batch_size=32)
print(start_stats)
train_stats = estimator.fit(create_train_datasets, epochs=1, batch_size=32)
print("This is Train Results:", train_stats)
end_stats = estimator.evaluate(create_test_dataset, batch_size=32)
print("This is Val Results:", end_stats)
assert estimator.get_model()
dloss = end_stats["validation_loss"] - start_stats["validation_loss"]
dmse = (end_stats["validation_mean_squared_error"] -
start_stats["validation_mean_squared_error"])
print(f"dLoss: {dloss}, dMSE: {dmse}")
assert dloss < 0 and dmse < 0, "training sanity check failed. loss increased!"
if __name__ == "__main__":
pytest.main([__file__])
|
intel-analytics/BigDL
|
python/orca/test/bigdl/orca/learn/ray/tf/test_ray_tf2estimator.py
|
Python
|
apache-2.0
| 3,429
|
[
"ORCA"
] |
f9cb0b8b08354ff714fbbb563d9e44206d2ae4c40bc904b7b377686be9bb67cb
|
##############################################################################
# Medical Image Registration ToolKit (MIRTK)
#
# Copyright 2017 Imperial College London
# Copyright 2017 Andreas Schuh
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
import os
import sys
import math
import csv
import json
import re
import shutil
import subprocess
import time
from datetime import datetime
import mirtk
from mirtk.utils import makedirs
from mirtk.batch.slurm import submit as sbatch, wait as swait
from mirtk.batch.condor import submit as cbatch, wait as cwait
##############################################################################
# Spatio-temporal atlas
class SpatioTemporalAtlas(object):
"""Spatio-temporal image and deformation atlas
Configuration entries such as file and directory paths, final atlas time
points (means) and corresponding temporal kernel width/standard deviation
(sigma) are read from a JSON configuration file.
The `construct` function performs the group-wise atlas construction.
Template images for each atlas time point specified in the configuration
are written to the configured `outdir`. At intermediate steps, an average
image is created for each time point associated with an image in the
dataset from which the atlas is created. These can be found in the `img`
subdirectory of each iteration underneath the configured `tmpdir`.
After atlas construction, average images and longitudinal deformations
for every continuous time (within the atlas precision/max. resolution)
can be computed as long as the required deformations are stored.
"""
def _path(self, paths, name, default):
"""Get absolute path from config dictionary."""
path = paths.get(name, default)
return os.path.normpath(os.path.join(self.topdir, path)) if path else None
def __init__(self, config, root=None, step=-1, verbose=1, threads=-1, exit_on_error=False):
"""Load spatio-temporal atlas configuration."""
self.step = step
if isinstance(config, str):
config = os.path.abspath(config)
with open(config, "rt") as f:
self.config = json.load(f)
self.root = os.path.dirname(config)
else:
self.config = config
if root:
self.root = os.path.abspath(root)
else:
self.root = os.getcwd()
# Paths of image file and output of prior global normalization step
paths = self.config.get("paths", {})
images = self.config.get("images", {})
self.topdir = os.path.normpath(os.path.join(self.root, paths.get("topdir", ".")))
self.agecsv = self._path(paths, "agecsv", os.path.join(self.topdir, "config", "ages.csv"))
self.imgcsv = self._path(paths, "imgcsv", os.path.join(self.topdir, "config", "subjects.csv"))
self.imgage = read_ages(self.agecsv)
self.imgids = read_imgids(self.imgcsv)
self.tmpdir = self._path(paths, "tmpdir", "cache")
self.outdir = self._path(paths, "outdir", os.path.join(self.topdir, "templates"))
self.refimg = self._path(images, "ref", os.path.join(self.topdir, "global", "average.nii.gz"))
self.channel = images.get("default", "t2w")
if len(self.config["images"]) == 1:
self.channel = self.config["images"].keys()[0]
# Parameters of temporal kernel regressions
regression = self.config.get("regression", {})
self.means = [float(t) for t in regression['means']]
self.sigma = regression.get("sigma", 1.)
if isinstance(self.sigma, (tuple, list)):
self.sigma = [float(t) for t in self.sigma]
if len(self.sigma) != len(self.means):
raise ValueError("Number of sigma values must equal number of mean values!")
else:
self.sigma = [float(self.sigma)] * len(self.means)
self.epsilon = float(regression.get("epsilon", .001))
self.precision = int(regression.get('precision', 2))
# Registration parameters
regcfg = self.config.get("registration", {})
growth = regcfg.get("growth", {})
self.num_bch_terms = growth.get("bchterms", 3) # composition should be symmetric, e.g., 2, 3, or 5 terms
self.age_specific_imgdof = growth.get("enabled", True)
if self.age_specific_imgdof:
self.age_specific_regdof = not growth.get("exclavg", False)
else:
self.age_specific_regdof = False
# Other workflow execution settings
envcfg = self.config.get("environment", {})
self.verbose = verbose
self.queue = {
"short": envcfg.get("queue", {}).get("short", "local").lower(),
"long": envcfg.get("queue", {}).get("long", "local").lower()
}
self.threads = envcfg.get("threads", 8) if threads < 0 else threads
self.mintasks = envcfg.get("mintasks", 1)
self.maxtasks = envcfg.get("maxtasks", 1000)
self.exit_on_error = exit_on_error
# Discard images not required for any final time point
imgids = set()
for t in self.means:
imgids |= set(self.weights(mean=t).keys())
self.imgids = list(imgids)
self.imgids.sort()
def _run(self, command, args=[], opts={}, step=-1, workdir=None, queue=None, name=None, submit_kwargs={}, wait_kwargs={}):
"""Execute single MIRTK command."""
if step < 0:
step = self.step
if not name:
name = command
if queue and queue.lower() == "local":
queue = None
if command in ("edit-dofs", "em-hard-segmentation", "average-measure"):
# These commands do not support the -threads option (yet)
threads = 0
else:
threads = self.threads
if "verbose" not in opts:
if isinstance(opts, list):
opts.append(("verbose", self.verbose - 2))
else:
opts["verbose"] = self.verbose - 2
if "verbose" in opts and command in ("average-measure"):
# These commands do not support the -verbose option (yet)
del opts["verbose"]
if queue:
job = self._submit(name, command=command, args=args, opts=opts, step=step, workdir=workdir, script=None, tasks=-1, group=1, queue=queue, **submit_kwargs)
self.wait(job, **wait_kwargs)
else:
prevdir = os.getcwd()
if workdir:
os.chdir(workdir)
try:
if self.verbose > 1:
sys.stdout.write("\n\n")
mirtk.run(command, args=args, opts=opts, showcmd=(self.verbose > 1), threads=threads, onerror='exit' if self.exit_on_error else 'throw')
finally:
os.chdir(prevdir)
def _submit(self, name, command=None, args=[], opts={}, script=None, tasks=-1, group=1, step=-1, queue=None, memory=8 * 1024, workdir=None):
"""Submit batch script."""
if step < 0:
step = self.step
if not queue:
queue = self.queue
if tasks == 0:
return (queue, 0)
groups = tasks
threads = self.threads if self.threads > 0 else 1
if script:
source = 'import sys\nimport socket\n'
source += 'sys.stdout.write("Host: " + socket.gethostname() + "\\n\\n")\n'
source += 'sys.path.insert(0, "{0}")\n'.format(os.path.dirname(os.path.dirname(mirtk.__file__)))
source += 'sys.path.insert(0, "{0}")\n'.format(os.path.dirname(__file__))
source += 'from {0} import SpatioTemporalAtlas\n'.format(os.path.splitext(os.path.basename(__file__))[0])
source += 'atlas = SpatioTemporalAtlas(root="{root}", config={config}, step={step}, threads={threads}, verbose=3, exit_on_error=True)\n'
if tasks > 0:
tasks_per_group = max(group, self.mintasks, (tasks + self.maxtasks - 1) // self.maxtasks)
if tasks_per_group > 1:
groups = range(0, tasks, tasks_per_group)
source += 'groupid = int(sys.argv[1])\n'
source += 'if groupid < 0 or groupid >= {0}:\n'.format(len(groups))
source += ' sys.stderr.write("Invalid group ID\\n")\n'
source += 'tasks_per_group = {0}\n'.format(tasks_per_group)
source += 'for taskid in range(groupid * tasks_per_group, (groupid + 1) * tasks_per_group):\n'
source += ' if taskid >= {0}: break\n'.format(tasks)
source += ' ' + '\n '.join(script.splitlines()) + '\n'
source += ' else: sys.stderr.write("Invalid task ID\\n")\n'
groups = len(groups)
else:
source += 'taskid = int(sys.argv[1])\n'
source += 'if taskid < 0: sys.stderr.write("Invalid task ID\\n")\n'
source += script
source += 'else: sys.stderr.write("Invalid task ID\\n")\n'
else:
source += script
opts.update({
"root": self.root,
"config": repr(self.config),
"step": step,
"threads": threads
})
script = source
elif not command:
command = name
jobname = "i{0:02d}_{1}".format(step, name) if step >= 0 else name
if queue.lower() in ("condor", "htcondor"):
if tasks > 0:
log = os.path.join(self.subdir(step), "log", name + "_$(Cluster).$(Process).log")
else:
log = os.path.join(self.subdir(step), "log", name + "_$(Cluster).log")
condor_config = self.config.get("environment", {}).get("condor", {})
requirements = condor_config.get("requirements", [])
environment = condor_config.get("environment", {})
jobid = cbatch(name=jobname, command=command, args=args, opts=opts, script=script, tasks=groups,
log=log, threads=threads, memory=memory, requirements=requirements, environment=environment,
workdir=workdir, verbose=0)
else:
if tasks > 0:
log = os.path.join(self.subdir(step), "log", name + "_%A.%a.log")
else:
log = os.path.join(self.subdir(step), "log", name + "_%j.log")
jobid = sbatch(name=jobname, command=command, args=args, opts=opts, script=script, tasks=groups,
log=log, threads=threads, memory=memory, queue=queue,
workdir=workdir, verbose=0)
if tasks > 0:
self.info("Submitted batch '{}' (id={}, #jobs={}, #tasks={})".format(
name, jobid[0] if isinstance(jobid, tuple) else jobid, groups, tasks)
)
else:
self.info("Submitted job '{}' (id={})".format(name, jobid[0] if isinstance(jobid, tuple) else jobid))
return (queue, jobid)
def wait(self, jobs, interval=60, verbose=5):
"""Wait for batch jobs to complete."""
if not isinstance(jobs, list):
jobs = [jobs]
condor_jobs = []
slurm_jobs = []
for queue, jobid in jobs:
if queue and queue.lower() != "local":
if queue.lower() in ("condor", "htcondor"):
condor_jobs.append(jobid)
else:
slurm_jobs.append(jobid)
if not cwait(condor_jobs, interval=interval, verbose=verbose):
raise Exception("Not all HTCondor jobs finished successfully!")
if not swait(slurm_jobs, interval=interval, verbose=verbose):
raise Exception("Not all SLURM jobs finished successfully!")
def info(self, msg, step=-1):
"""Print status message."""
if self.verbose > 0:
sys.stdout.write("{:%Y-%b-%d %H:%M:%S} INFO ".format(datetime.now()))
sys.stdout.write(msg)
if step >= 0:
sys.stdout.write(" (step={})".format(step))
sys.stdout.write("\n")
def normtime(self, t):
"""Clamp time to be within atlas domain."""
return round(min(max(t, self.means[0]), self.means[-1]), self.precision)
def timeindex(self, t):
"""Get discrete time point index."""
if t <= self.means[0]:
return 0
i = len(self.means) - 1
if t >= self.means[-1]:
return i
for j in range(1, len(self.means)):
if self.means[j] > t:
i = j - 1
break
return i
def timename(self, t1, t2=None):
"""Get time point string used in file names."""
t1 = self.normtime(t1)
if t2 is None:
t2 = t1
else:
t2 = self.normtime(t2)
if isclose(t1, t2):
w = 2
if self.precision > 0:
w += self.precision + 1
return "t{0:0{1}.{2}f}".format(self.normtime(t1), w, self.precision)
return "{0:s}-{1:s}".format(self.timename(t1), self.timename(t2), self.precision)
def age(self, imgid):
"""Get time associated with the specified image, i.e., mean of temporal Gaussian kernel."""
return self.normtime(self.imgage[imgid])
def ages(self):
"""Get set of all ages associated with the images from which atlas is constructed."""
ages = set()
for imgid in self.imgids:
ages.add(self.age(imgid))
return list(ages.union(self.means))
def stdev(self, t):
"""Get standard deviation of temporal Gaussian kernel centered at time t."""
i = self.timeindex(t)
if i == len(self.sigma) - 1:
return self.sigma[i]
else:
alpha = (t - self.means[i]) / (self.means[i + 1] - self.means[i])
return (1. - alpha) * self.sigma[i] + alpha * self.sigma[i + 1]
def weight(self, t, mean, sigma=0, normalize=True, epsilon=None):
"""Evaluate temporal kernel weight for specified image."""
if sigma <= 0:
sigma = self.stdev(mean)
w = math.exp(- .5 * math.pow((t - mean) / sigma, 2))
if normalize:
w /= sigma * math.sqrt(2. * math.pi)
if epsilon is None:
epsilon = self.epsilon
return 0. if w < epsilon else w
def weights(self, mean=None, sigma=0, zero=False):
"""Get weights of images within local support of given temporal kernel."""
if mean is None:
wmean = {}
for mean in self.means:
wimg = {}
for imgid in self.imgids:
w = self.weight(self.age(imgid), mean=mean, sigma=sigma)
if zero or w > 0.:
wimg[imgid] = w
wmean[mean] = wimg
return wmean
else:
wimg = {}
for imgid in self.imgids:
w = self.weight(self.age(imgid), mean=mean, sigma=sigma)
if zero or w > 0.:
wimg[imgid] = w
return wimg
def subdir(self, step=-1):
"""Get absolute path of directory of current iteration."""
path = self.tmpdir
if step < 0:
step = self.step
if step >= 0:
path = os.path.join(path, "i{0:02d}".format(step))
return path
def splitchannel(self, channel=None):
"""Split channel specification into name and label specification."""
if not channel:
channel = self.channel
if "=" in channel:
channel, label = channel.split("=")
try:
l = int(label)
label = l
except ValueError:
label = label.split(",")
else:
label = 0
return (channel, label)
def image(self, imgid, channel=None, label=0, force=False, create=True):
"""Get absolute path of requested input image."""
if not channel:
channel = self.channel
cfg = self.config["images"][channel]
prefix = cfg["prefix"].replace("/", os.path.sep)
suffix = cfg.get("suffix", ".nii.gz")
img = os.path.normpath(os.path.join(self.topdir, prefix + imgid + suffix))
if label:
if isinstance(label, (tuple, list)):
lblstr = ','.join([str(l).strip() for l in label])
else:
lblstr = str(label).strip()
lblstr = lblstr.replace("..", "-")
msk = os.path.join(self.topdir, "masks", "{}_{}".format(channel, lblstr), imgid + ".nii.gz")
if create and (force or not os.path.exists(msk)):
makedirs(os.path.dirname(msk))
if not isinstance(label, list):
label = [label]
self._run("calculate-element-wise", args=[img, "-label"] + label + ["-set", 1, "-pad", 0, "-out", msk, "binary"])
img = msk
return img
def affdof(self, imgid):
"""Get absolute path of affine transformation of global normalization."""
cfg = self.config.get("registration", {}).get("affine", None)
if cfg is None:
return "identity"
prefix = cfg["prefix"].replace("/", os.path.sep)
suffix = cfg.get("suffix", ".dof")
return os.path.normpath(os.path.join(self.topdir, prefix + imgid + suffix))
def regcfg(self, step=-1):
"""Get registration configuration entries."""
configs = self.config.get("registration", {}).get("config", {})
if not isinstance(configs, list):
configs = [configs]
cfg = {}
for i in range(min(step, len(configs)) if step > 0 else len(configs)):
cfg.update(configs[i])
return cfg
def parin(self, step=-1, force=False, create=True):
"""Write registration configuration file and return path."""
if step < 0:
step = self.step
parin = os.path.join(self.subdir(step), "config", "register.cfg")
if create and (force or not os.path.exists(parin)):
cfg = self.regcfg(step)
images = self.config["images"]
channels = cfg.get("channels", ["t2w"])
if not isinstance(channels, list):
channels = [channels]
if len(channels) == 0:
raise ValueError("Empty list of images/channels specified for registration!")
params = ["[default]"]
# transformation model
model = cfg.get("model", "SVFFD")
mffd, model = model.split(":") if ":" in model else "None", model
params.append("Transformation model = {}".format(model))
params.append("Multi-level transformation = {}".format(mffd))
# energy function
energy = cfg.get("energy", "sym" if "svffd" in model.lower() else "asym")
if energy.lower() in ("asym", "asymmetric"):
sim_term_type = 0
elif energy.lower() in ("ic", "inverseconsistent", "inverse-consistent"):
sim_term_type = 1
elif energy.lower() in ("sym", "symmetric"):
sim_term_type = 2
else:
sim_term_type = -1
if sim_term_type < 0:
formula = energy
else:
formula = ""
measures = cfg.get("measures", {})
for c in range(len(channels)):
target = 2 * c + 1
source = 2 * c + 2
if isinstance(measures, list):
measure = measures[c] if c < len(measures) else measures[-1]
elif isinstance(measures, dict):
channel = self.splitchannel(channels[c])[0]
measure = measures.get(channel, "NMI")
else:
measure = measures
if sim_term_type == 2:
term = "{sim}[{channel} sim](I({tgt}) o T^-0.5, I({src}) o T^0.5)"
elif sim_term_type == 1:
term = "{sim}[{channel} fwd-sim](I({tgt}) o T^-1, I({src})) + {sim}[{channel} bwd-sim](I({tgt}), I({src}) o T)"
else:
term = "{sim}[{channel} sim](I({tgt}), I({src}) o T)"
if c > 0:
formula += " + "
formula += term.format(sim=measure, channel=channels[c].capitalize().replace("=", " "), tgt=target, src=source)
if "bending" in cfg:
formula += " + 0 BE[Bending energy](T)"
if "elasticity" in cfg:
formula += " + 0 LE[Linear elasticity](T)"
if "jacobian" in cfg:
formula += " + 0 JAC[Jacobian penalty](T)"
params.append("Energy function = " + formula)
params.append("No. of bins = {}".format(cfg.get("bins", 64)))
params.append("Local window size [box] = {}".format(cfg.get("window", "5 vox")))
params.append("No. of last function values = 10")
if "svffd" in model.lower():
params.append("Integration method = {}".format(cfg.get("ffdim", "FastSS")))
params.append("No. of integration steps = {}".format(cfg.get("intsteps", 64)))
params.append("No. of BCH terms = {}".format(cfg.get("bchterms", 4)))
params.append("Use Lie derivative = {}".format(cfg.get("liederiv", "No")))
# resolution pyramid
spacings = cfg.get("spacing", [])
if not isinstance(spacings, list):
spacings = [spacings]
resolutions = cfg.get("resolution", [])
if not isinstance(resolutions, list):
resolutions = [resolutions]
blurring = cfg.get("blurring", {})
if isinstance(blurring, list):
blurring = {self.splitchannel(channels[0])[0]: blurring}
be_weights = cfg.get("bending", [0])
if not isinstance(be_weights, list):
be_weights = [be_weights]
le_weights = cfg.get("elasticity", [0])
if not isinstance(le_weights, list):
le_weights = [le_weights]
lj_weights = cfg.get("jacobian", [0])
if not isinstance(lj_weights, list):
lj_weights = [lj_weights]
levels = cfg.get("levels", 0)
if levels <= 0:
levels = len(resolutions) if isinstance(resolutions, list) else 4
resolution = 0.
spacing = 0.
params.append("No. of resolution levels = {0}".format(levels))
params.append("Image interpolation mode = {0}".format(cfg.get("interpolation", "Linear with padding")))
params.append("Downsample images with padding = {0}".format("Yes" if cfg.get("padding", True) else "No"))
params.append("Image similarity foreground = {0}".format(cfg.get("foreground", "Overlap")))
params.append("Strict step length range = No")
params.append("Maximum streak of rejected steps = 2")
for level in range(1, levels + 1):
params.append("")
params.append("[level {}]".format(level))
resolution = float(resolutions[level - 1]) if level <= len(resolutions) else 2. * resolution
if resolution > 0.:
params.append("Resolution [mm] = {}".format(resolution))
for c in range(len(channels)):
target = 2 * c + 1
source = 2 * c + 2
image = images[self.splitchannel(channels[c])[0]]
bkgrnd = float(image.get("bkgrnd", -1))
params.append("Background value of image {0} = {1}".format(target, bkgrnd))
bkgrnd = -1. if "labels" in image else 0.
params.append("Background value of image {0} = {1}".format(source, bkgrnd))
for c in range(len(channels)):
target = 2 * c + 1
source = 2 * c + 2
channel = self.splitchannel(channels[c])[0]
sigmas = blurring.get(channel, [])
if not isinstance(sigmas, list):
sigmas = [sigmas]
if len(sigmas) == 0 and "labels" in images[channel]:
sigmas = [2]
if len(sigmas) > 0:
sigma = float(sigmas[level - 1] if level <= len(sigmas) else sigmas[-1])
params.append("Blurring of image {0} [vox] = {1}".format(target, sigma))
spacing = float(spacings[level - 1]) if level <= len(spacings) else 2. * spacing
if spacing > 0.:
params.append("Control point spacing = {0}".format(spacing))
be_weight = float(be_weights[level - 1] if level <= len(be_weights) else be_weights[-1])
params.append("Bending energy weight = {0}".format(be_weight))
le_weight = float(le_weights[level - 1] if level <= len(le_weights) else le_weights[-1])
params.append("Linear elasticity weight = {0}".format(le_weight))
lj_weight = float(lj_weights[level - 1] if level <= len(lj_weights) else lj_weights[-1])
params.append("Jacobian penalty weight = {0}".format(lj_weight))
# write -parin file for "register" command
makedirs(os.path.dirname(parin))
with open(parin, "wt") as f:
f.write("\n".join(params) + "\n")
return parin
def regdof(self, imgid, t=None, step=-1, path=None, force=False, create=True, batch=False):
"""Register atlas to specified image and return path of transformation file."""
if step < 0:
step = self.step
age = self.age(imgid)
if t is None or not self.age_specific_regdof:
t = age
if not path:
path = os.path.join(self.subdir(step), "dof", "deformation", self.timename(t), "{0}.dof.gz".format(imgid))
dof = path
if isclose(t, age):
if force or not os.path.exists(path):
if step < 1:
dof = "identity"
elif create:
cfg = self.regcfg(step)
args = []
affdof = self.affdof(imgid)
if affdof == "identity":
affdof = None
channels = cfg.get("channels", self.channel)
if not isinstance(channels, list):
channels = [channels]
if len(channels) == 0:
raise ValueError("Empty list of images/channels specified for registration!")
for channel in channels:
channel, label = self.splitchannel(channel)
args.append("-image")
args.append(self.image(imgid, channel=channel, label=label))
if affdof:
args.append("-dof")
args.append(affdof)
args.append("-image")
args.append(self.avgimg(t, channel=channel, label=label, step=step - 1, create=not batch))
makedirs(os.path.dirname(dof))
self._run("register", args=args, opts={
"parin": self.parin(step=step, force=force, create=not batch),
"mask": self.refimg,
"dofin": "identity",
"dofout": dof
})
else:
dof1 = self.regdof(imgid, t=age, step=step, force=force, create=create and not batch)
dof2 = self.growth(age, t, step=step - 1, force=force, create=create and not batch)
if dof1 == "identity" and dof2 == "identity":
dof = "identity"
elif dof1 == "identity":
dof = dof2
elif dof2 == "identity":
dof = dof1
elif create:
makedirs(os.path.dirname(dof))
self._run("compose-dofs", args=[dof1, dof2, dof], opts={"bch": self.num_bch_terms, "global": False})
return dof
def regdofs(self, step=-1, force=False, create=True, queue=None, batchname="register"):
"""Register all images to their age-specific template."""
if step < 0:
step = self.step
if not queue:
queue = self.queue["long"]
if queue == "local":
queue = None
self.info("Register images to template of corresponding age", step=step)
script = ""
tasks = 0
dofs = {}
for imgid in self.imgids:
dof = self.regdof(imgid, step=step, force=force, create=create and not queue)
if dof != "identity" and queue and (force or not os.path.exists(dof)):
remove_or_makedirs(dof)
script += 'elif taskid == {taskid}: atlas.regdof("{imgid}", batch=True)\n'.format(taskid=tasks, imgid=imgid)
tasks += 1
dofs[imgid] = dof
if create and queue:
self.parin(step=step, force=force) # create -parin file if missing
job = self._submit(batchname, script=script, tasks=tasks, step=step, queue=queue)
else:
job = (None, 0)
return (job, dofs)
def doftable(self, t, step=-1, force=False, create=True, batch=False):
"""Write table with image to atlas deformations of images with non-zero weight."""
dofs = []
t = self.normtime(t)
weights = self.weights(t)
all_dofs_are_identity = True
for imgid in self.imgids:
if imgid in weights:
if not self.age_specific_regdof or isclose(t, self.age(imgid)):
dof = self.regdof(imgid, t=t, step=step, force=force, create=create and not batch)
else:
dof = self.regdof(imgid, t=t, step=step, force=force, create=create, batch=batch)
dofs.append((dof, weights[imgid]))
if dof != "identity":
all_dofs_are_identity = False
if all_dofs_are_identity:
dofdir = None
dofnames = "identity"
elif len(dofs) > 0:
dofdir = self.topdir
dofnames = os.path.join(self.subdir(step), "config", "{}-dofs.tsv".format(self.timename(t)))
if create and (force or not os.path.exists(dofnames)):
makedirs(os.path.dirname(dofnames))
with open(dofnames, "wt") as table:
for dof, w in dofs:
if dofdir:
dof = os.path.relpath(dof, dofdir)
table.write("{}\t{}\n".format(dof, w))
else:
raise ValueError("No image has non-zero weight for time {0}!".format(t))
return (dofdir, dofnames)
def avgdof(self, t, path=None, step=-1, force=False, create=True, batch=False):
"""Get mean cross-sectional SV FFD transformation at given time."""
t = self.normtime(t)
if not path:
path = os.path.join(self.subdir(step), "dof", "average", "{0}.dof.gz".format(self.timename(t)))
if create and (force or not os.path.exists(path)):
dofdir, dofnames = self.doftable(t, step=step, force=force, batch=batch)
if dofnames == "identity":
path = "identity"
else:
makedirs(os.path.dirname(path))
self._run("average-dofs", args=[path], opts={
"dofdir": dofdir,
"dofnames": dofnames,
"type": "SVFFD",
"target": "common",
"invert": True,
"global": False
})
return path
def avgdofs(self, step=-1, force=False, create=True, queue=None, batchname="avgdofs"):
"""Compute all average SV FFDs needed for (parallel) atlas construction."""
if step < 0:
step = self.step
if not queue:
queue = self.queue["short"]
if queue == "local":
queue = None
self.info("Compute residual average deformations at each age", step=step)
script = ""
tasks = 0
dofs = {}
for t in self.ages():
dof = self.avgdof(t, step=step, force=force, create=create and not queue)
if queue and (force or not os.path.exists(dof)):
remove_or_makedirs(dof)
script += 'elif taskid == {taskid}: atlas.avgdof({t}, batch=True)\n'.format(taskid=tasks, t=t)
tasks += 1
dofs[t] = dof
if create and queue:
job = self._submit(batchname, script=script, tasks=tasks, step=step, queue=queue)
else:
job = (None, 0)
return (job, dofs)
def growth(self, t1, t2, step=-1, force=False, create=True, batch=False):
"""Make composite SV FFD corresponding to longitudinal change from t1 to t2."""
if step < 0:
step = self.step
t1 = self.normtime(t1)
t2 = self.normtime(t2)
if isclose(t1, t2):
return "identity"
dof = os.path.join(self.subdir(step), "dof", "growth", "{0}.dof.gz".format(self.timename(t1, t2)))
if step < 1:
return path if os.path.exists(dof) else "identity"
if create and (force or not os.path.exists(dof)):
dofs = [
self.avgdof(t1, step=step, force=force, create=not batch),
self.growth(t1, t2, step=step - 1, force=force, create=not batch),
self.avgdof(t2, step=step, force=force, create=not batch)
]
if dofs[1] == "identity":
del dofs[1]
makedirs(os.path.dirname(dof))
self._run("compose-dofs", args=dofs + [dof], opts={"scale": -1., "bch": self.num_bch_terms, "global": False})
return dof
def compose(self, step=-1, ages=[], allpairs=False, force=False, create=True, queue=None, batchname="compose"):
"""Compose longitudinal deformations with residual average deformations."""
if step < 0:
step = self.step
if not queue:
queue = self.queue["short"]
if queue == "local":
queue = None
self.info("Update all pairs of longitudinal deformations", step=step)
script = ""
tasks = 0
dofs = {}
if not ages:
ages = self.ages()
for t1 in ages:
dofs[t1] = {}
for t2 in ages:
if allpairs or (t1 != t2 and self.weight(t1, mean=t2) > 0.):
dof = self.growth(t1, t2, step=step, force=force, create=create and not queue)
if dof != "identity" and queue and (force or not os.path.exists(dof)):
remove_or_makedirs(dof)
script += 'elif taskid == {taskid}: atlas.growth({t1}, {t2}, batch=True)\n'.format(taskid=tasks, t1=t1, t2=t2)
tasks += 1
dofs[t1][t2] = dof
if create and queue:
job = self._submit(batchname, script=script, tasks=tasks, step=step, queue=queue)
else:
job = (None, 0)
return (job, dofs)
def imgdof(self, imgid, t, step=-1, decomposed=False, force=False, create=True, batch=False):
"""Compute composite image to atlas transformation."""
if step < 0:
step = self.step
if step > 0:
dof = os.path.join(self.subdir(step), "dof", "composite", self.timename(t), "{0}.dof.gz".format(imgid))
if decomposed or (create and (force or not os.path.exists(dof))):
dofs = [
self.affdof(imgid),
self.regdof(imgid, step=step, force=force, create=create and not batch)
]
if self.age_specific_imgdof:
growth = self.growth(self.age(imgid), t, step=step - 1, force=force, create=create and not batch)
if growth != "identity":
dofs.append(growth)
dofs.append(self.avgdof(t, step=step, force=force, create=create and not batch))
if decomposed:
dof = dofs
elif create:
makedirs(os.path.dirname(dof))
self._run("compose-dofs", args=dofs + [dof])
else:
dof = self.affdof(imgid)
if decomposed:
dof = [dof] + ['identity'] * 2
return dof
def imgdofs(self, ages=[], step=-1, force=False, create=True, queue=None, batchname="imgdofs"):
"""Compute all composite image to atlas transformations."""
if step < 0:
step = self.step
if not queue:
queue = self.queue["short"]
if queue == "local":
queue = None
self.info("Update composite image to atlas transformations", step=step)
if ages:
if not isinstance(ages, (tuple, list)):
ages = [ages]
ages = [self.normtime(t) for t in ages]
else:
ages = self.ages()
script = ""
tasks = 0
dofs = {}
for t in ages:
dofs[t] = {}
weights = self.weights(t)
for imgid in self.imgids:
if imgid in weights:
dof = self.imgdof(imgid=imgid, t=t, step=step, force=force, create=create and not queue)
if dof != "identity" and queue and (force or not os.path.exists(dof)):
remove_or_makedirs(dof)
script += 'elif taskid == {taskid}: atlas.imgdof(imgid="{imgid}", t={t}, batch=True)\n'.format(
taskid=tasks, imgid=imgid, t=t
)
tasks += 1
dofs[t][imgid] = dof
if create and queue:
job = self._submit(batchname, script=script, tasks=tasks, step=step, queue=queue)
else:
job = (None, 0)
return (job, dofs)
def defimg(self, imgid, t, channel=None, path=None, step=-1, decomposed=True, force=False, create=True, batch=False):
"""Transform sample image to atlas space at given time point."""
if not channel:
channel = self.channel
if not path:
path = os.path.join(self.subdir(step), channel, self.timename(t), imgid + ".nii.gz")
if create and (force or not os.path.exists(path)):
cfg = self.config["images"][channel]
img = self.image(imgid, channel=channel)
dof = self.imgdof(imgid=imgid, t=t, step=step, decomposed=decomposed, force=force, create=not batch)
opts = {
"interp": cfg.get("interp", "linear"),
"target": self.refimg,
"dofin": dof,
"invert": True
}
if "bkgrnd" in cfg:
opts["source-padding"] = cfg["bkgrnd"]
if "labels" in cfg:
opts["labels"] = cfg["labels"].split(",")
if "datatype" in cfg:
opts["datatype"] = cfg["datatype"]
makedirs(os.path.dirname(path))
self._run("transform-image", args=[img, path], opts=opts)
return path
def defimgs(self, ages=[], channels=[], step=-1, decomposed=True, force=False, create=True, queue=None, batchname="defimgs"):
"""Transform all images to discrete set of atlas time points."""
if step < 0:
step = self.step
if not queue:
queue = self.queue["short"]
if queue == "local":
queue = None
single_channel = channels and isinstance(channels, basestring)
if not channels:
channels = self.regcfg(step).get("channels", self.channel)
if not isinstance(channels, list):
channels = [channels]
if ages:
self.info("Deform images to discrete time points", step=step)
if not isinstance(ages, (tuple, list)):
ages = [ages]
ages = [self.normtime(t) for t in ages]
else:
self.info("Deform images to observed time points", step=step)
ages = self.ages()
script = ""
tasks = 0
imgs = {}
for channel in channels:
imgs[channel] = {}
for t in ages:
weights = self.weights(t)
if len(weights) == 0:
raise Exception("No image has non-zero weight for t={}".format(t))
imgs[channel][t] = []
for imgid in self.imgids:
if imgid in weights:
img = self.defimg(imgid=imgid, t=t, channel=channel, step=step, decomposed=decomposed, force=force, create=create and not queue)
if queue and (force or not os.path.exists(img)):
remove_or_makedirs(img)
script += 'elif taskid == {taskid}: atlas.defimg(imgid="{imgid}", t={t}, channel="{channel}", path="{path}", decomposed={decomposed}, batch=True)\n'.format(
taskid=tasks, t=t, imgid=imgid, channel=channel, path=img, decomposed=decomposed
)
tasks += 1
imgs[channel][t].append(img)
if create and queue:
job = self._submit(batchname, script=script, tasks=tasks, step=step, queue=queue)
else:
job = (None, 0)
if single_channel:
imgs = imgs[channel]
return (job, imgs)
def imgtable(self, t, channel=None, step=-1, decomposed=True, force=False, create=True, batch=False):
"""Write image table with weights for average-images, and deform images to given time point."""
t = self.normtime(t)
if not channel:
channel = self.channel
image = self.config["images"][channel]
table = os.path.join(self.subdir(step), "config", "{t}-{channel}.tsv".format(t=self.timename(t), channel=channel))
if create and (force or not os.path.exists(table)):
weights = self.weights(t)
if len(weights) == 0:
raise Exception("No image has non-zero weight for t={}".format(t))
makedirs(os.path.dirname(table))
with open(table, "wt") as f:
f.write(self.topdir)
f.write("\n")
for imgid in self.imgids:
if imgid in weights:
img = self.defimg(t=t, imgid=imgid, channel=channel, step=step, decomposed=decomposed, force=force, create=not batch)
f.write(os.path.relpath(img, self.topdir))
f.write("\t{0}\n".format(weights[imgid]))
return table
def avgimg(self, t, channel=None, label=0, path=None, sharpen=True, outdir=None, step=-1, decomposed=True, force=False, create=True, batch=False):
"""Create average image for a given time point."""
if not channel:
channel = self.channel
cfg = self.config["images"][channel]
if isinstance(label, basestring):
label = label.split(",")
if isinstance(label, list) and len(label) == 1:
label = label[0]
if not path:
if label:
if isinstance(label, (tuple, list)):
lblstr = ','.join([str(l).strip() for l in label])
elif isinstance(label, int):
max_label = max(parselabels(cfg["labels"])) if "labels" in cfg else 9
lblstr = "{0:0{1}d}".format(label, len(str(max_label)))
else:
lblstr = str(label).strip()
lblstr = lblstr.replace("..", "-")
else:
lblstr = None
if outdir:
outdir = os.path.abspath(outdir)
if "labels" in cfg:
if lblstr:
path = os.path.join(outdir, "pbmaps", "_".join([channel, lblstr]))
else:
path = os.path.join(outdir, "labels", channel)
else:
path = os.path.join(outdir, "templates", channel)
else:
path = os.path.join(self.subdir(step), channel)
if lblstr:
path = os.path.join(path, "prob_" + lblstr)
elif sharpen:
path = os.path.join(path, "templates")
else:
path = os.path.join(path, "mean")
path = os.path.join(path, self.timename(t) + ".nii.gz")
if create and (force or not os.path.exists(path)):
makedirs(os.path.dirname(path))
if "labels" in cfg and not label:
labels = parselabels(cfg["labels"])
args = [self.avgimg(t, channel=channel, label=label, step=step, decomposed=decomposed, force=force, batch=batch) for label in labels]
self._run("em-hard-segmentation", args=[len(args)] + args + [path])
else:
table = self.imgtable(t, step=step, channel=channel, decomposed=decomposed, force=force, batch=batch)
opts = {
"images": table,
"reference": self.refimg
}
if "bkgrnd" in cfg:
opts["padding"] = float(cfg["bkgrnd"])
opts["datatype"] = cfg.get("datatype", "float")
if "labels" in cfg:
opts["label"] = label
if opts["datatype"] not in ["float", "double"]:
opts["rescaling"] = cfg.get("rescaling", [0, 100])
elif "rescaling" in cfg:
opts["rescaling"] = cfg["rescaling"]
else:
opts["threshold"] = .5
opts["normalization"] = cfg.get("normalization", "zscore")
opts["rescaling"] = cfg.get("rescaling", [0, 100])
if sharpen:
opts["sharpen"] = cfg.get("sharpen", True)
self._run("average-images", args=[path], opts=opts)
return path
def avgimgs(self, step=-1, ages=[], channels=[], labels={}, sharpen=True, outdir=None, decomposed=True, force=False, create=True, queue=None, batchname="avgimgs"):
"""Create all average images required for (parallel) atlas construction."""
if step < 0:
step = self.step
if not queue:
queue = self.queue["short"]
if queue == "local":
queue = None
if ages:
self.info("Average images at discrete time points", step=step)
ages = [self.normtime(t) for t in ages]
else:
self.info("Average images at observed time points", step=step)
ages = self.ages()
single_channel = channels and isinstance(channels, basestring)
if not channels:
channels = self.regcfg(step).get("channels", self.channel)
if not isinstance(channels, list):
channels = [channels]
if not isinstance(labels, dict):
dlabels = {}
for channel in channels:
dlabels[channel] = labels
labels = dlabels
script = ""
tasks = 0
imgs = {}
for channel in channels:
imgs[channel] = {}
for t in ages:
imgs[channel][t] = []
for channel in channels:
segments = [0]
if "labels" in self.config["images"][channel]:
lbls = labels.get(channel, [])
if lbls:
if isinstance(lbls, basestring):
if lbls.lower() == "all":
segments = parselabels(self.config["images"][channel]["labels"])
else:
segments = parselabels(lbls)
else:
segments = lbls
for segment in segments:
for t in ages:
img = self.avgimg(t, channel=channel, label=segment, sharpen=sharpen, outdir=outdir, step=step,
decomposed=decomposed, force=force, create=create and not queue)
if queue and (force or not os.path.exists(img)):
remove_or_makedirs(img)
script += 'elif taskid == {taskid}: atlas.avgimg(t={t}, channel="{channel}", label={segment}, sharpen={sharpen}, outdir={outdir}, decomposed={decomposed}, batch=True)\n'.format(
taskid=tasks, t=t, channel=channel, segment=repr(segment), sharpen=sharpen, outdir=repr(outdir), decomposed=decomposed
)
tasks += 1
if len(segments) == 1:
imgs[channel][t] = img
else:
imgs[channel][t].append(img)
if create and queue:
job = self._submit(batchname, script=script, tasks=tasks, step=step, queue=queue)
else:
job = (None, 0)
if single_channel:
imgs = imgs[channels[0]]
return (job, imgs)
def construct(self, start=-1, niter=10, outdir=None, force=False, queue=None):
"""Perform atlas construction.
Args:
start (int): Last completed iteration. (default: step)
niter (int, optional): Number of atlas construction iterations. (default: 10)
outdir (str, optional): Directory for final templates. (default: config.paths.outdir)
force (bool, optional): Force re-creation of already existing files. (default: False)
queue (str, dict, optional): Name of queues of batch queuing system.
When not specified, the atlas construction runs on the local machine
using the number of threads specified during construction. When a single `str`
is given, both short and long running jobs are submitted to the same queue.
Otherwise, separate environments can be specified for "short" or "long" running
jobs using the respective dictionary keys. The supported environments are:
- "local": Multi-threaded execution on host machine
- "condor": Batch execution using HTCondor
- "<other>": Batch execution using named SLURM partition
(default: config.environment.queue)
"""
if start < 0:
start = self.step
if start < 0:
raise ValueError("Atlas to be constructed must have step index >= 0!")
if start > 0:
self.info("Performing {0} iterations starting with step {1}".format(niter, start))
else:
self.info("Performing {0} iterations".format(niter))
self.info("Age-dependent image deformations = {}".format(self.age_specific_imgdof))
self.info("Average age-dependent deformations = {}".format(self.age_specific_regdof))
# Initialize dict of queues used for batch execution
if not queue:
queue = self.queue
if not isinstance(queue, dict):
queue = {"short": queue, "long": queue}
else:
if "short" not in queue:
queue["short"] = "local"
if "long" not in queue:
queue["long"] = "local"
# Save considerable amount of disk memory by not explicitly storing the
# composite image to atlas transformations of type FluidFreeFormTransformation
# (alternatively, approximate composition). The transform-image and/or
# average-images commands can apply a sequence of transforms in order to
# perform the composition quasi on-the-fly.
decomposed_imgdofs = True
rmtemp_regdofs = True
# Iterate atlas construction steps
weights = {}
for t in self.ages():
weights[t] = self.weights(t)
for step in range(start + 1, start + niter + 1):
# Deform images to atlas space
if not decomposed_imgdofs:
job = self.imgdofs(step=step - 1, force=force, queue=queue["short"])[0]
self.wait(job, interval=30, verbose=1)
job = self.defimgs(step=step - 1, decomposed=decomposed_imgdofs, force=force, queue=queue["short"])[0]
self.wait(job, interval=30, verbose=1)
# Average images in atlas space
job = self.avgimgs(step=step - 1, force=force, queue=queue["short"])[0]
self.wait(job, interval=60, verbose=2)
# Register all images to the current template images
job = self.regdofs(step=step, force=force, queue=queue["long"])[0]
self.wait(job, interval=60, verbose=5)
# Compute all required average deformations
job = self.avgdofs(step=step, force=force, queue=queue["short"])[0]
self.wait(job, interval=60, verbose=2)
if rmtemp_regdofs and self.age_specific_regdof:
self.info("Deleting temporary deformation files", step=step)
for t in weights:
for imgid in weights[t]:
dof1 = self.regdof(imgid, step=step, create=False)
dof2 = self.regdof(imgid, t=t, step=step, create=False)
if dof2 != "identity" and dof1 != dof2 and os.path.exists(dof2):
os.remove(dof2)
# Compute all required longitudinal deformations
if self.age_specific_regdof or self.age_specific_imgdof:
job = self.compose(step=step, force=force, queue=queue["short"])[0]
self.wait(job, interval=30, verbose=1)
# Write final template images to specified directory
self.step = start + niter
self.info("Creating final mean shape templates")
if outdir is None:
outdir = self.outdir
if outdir:
ages = self.means
else:
ages = self.ages()
outdir = None
if not decomposed_imgdofs:
job = self.imgdofs(ages=ages, force=force, queue=queue["short"])[0]
self.wait(job, interval=30, verbose=1)
channels = [channel for channel in self.config["images"].keys() if channel not in ("default", "ref")]
job = self.defimgs(channels=channels, ages=ages, decomposed=decomposed_imgdofs, force=force, queue=queue["short"])[0]
self.wait(job, interval=30, verbose=1)
job = self.avgimgs(channels=channels, ages=ages, force=force, queue=queue["short"], outdir=outdir)[0]
self.wait(job, interval=60, verbose=2)
self.info("Finished atlas construction!")
def evaluate(self, ages=[], step=-1, force=False, queue=None):
"""Evaluate atlas sharpness measures."""
if not ages:
ages = self.means
if isinstance(ages, int):
ages = [float(ages)]
elif isinstance(ages, float):
ages = [ages]
if isinstance(step, int):
if step < 0:
if self.step < 0:
raise ValueError("Need to specify which step/iteration of atlas construction to evaluate!")
step = self.step
steps = [step]
else:
steps = step
measures = self.config["evaluation"]["measures"]
rois_spec = self.config["evaluation"].get("rois", {})
roi_paths = {}
roi_label = {}
roi_labels = {}
roi_channels = set()
for roi_name, roi_path in rois_spec.items():
labels = []
if isinstance(roi_path, list):
if len(roi_path) != 2:
raise ValueError("Invalid evaluation ROI value, must be either path (format) string of individual ROI or [<path_format>, <range>]")
labels = roi_path[1]
roi_path = roi_path[0]
if roi_path in self.config["images"] and isinstance(labels, basestring) and labels.lower() == "all":
labels = parselabels(self.config["images"][roi_path].get("labels", ""))
if not labels:
raise ValueError("ROI channel {} must have 'labels' specified to use for 'all'!".format(roi_path))
else:
labels = parselabels(labels)
roi_name_format = roi_name
if roi_name_format.format(l=0) == roi_name_format:
raise ValueError("Invalid evaluation ROI key name, name must include '{l}' format string!")
for label in labels:
roi_name = roi_name_format.format(l=label)
roi_paths[roi_name] = roi_path
roi_label[roi_name] = label
else:
roi_paths[roi_name] = roi_path
if roi_path in self.config["images"]:
roi_channels.add(roi_path)
roi_labels[roi_path] = labels
roi_names = roi_paths.keys()
roi_names.sort()
roi_channels = list(roi_channels)
re_measure = re.compile("^\s*(\w+)\s*\((.*)\)\s*$")
# Evaluate voxel-wise measures
for step in steps:
voxelwise_measures = {}
for t in ages:
voxelwise_measures[t] = []
for channel in measures:
channel_info = self.config["images"][channel]
channel_measures = measures[channel]
if isinstance(channel_measures, basestring):
channel_measures = [channel_measures]
if channel_measures:
# Deform individual images to atlas time point
name = "eval_defimgs_{channel}".format(channel=channel)
job, imgs = self.defimgs(channels=channel, ages=ages, step=step, queue=queue, batchname=name)
self.wait(job, interval=30, verbose=1)
# Evaluate measures for this image channel/modality
for measure in channel_measures:
measure = measure.lower().strip()
match = re_measure.match(measure)
if match:
measure = match.group(1)
args = match.group(2).strip()
else:
args = ""
# Evaluate gradient magnitude of average image
if measure == "grad":
sharpen = False
if "labels" in channel_info:
if not args:
raise ValueError("Gradient magnitude of segmentation can only be computed for probability map of one or more label(s)!")
labels = [args]
else:
if args and args not in ("mean", "avg", "average", "template"):
raise ValueError("Gradient magnitude of intensity images can only be computed from 'mean'/'avg'/'average'/'template' image!")
if args and args == "template":
sharpen = True
labels = []
name = "eval_avgimgs_{channel}".format(channel=channel)
job, avgs = self.avgimgs(channels=channel, labels={channel: labels}, ages=ages, sharpen=sharpen, step=step, queue=queue)
self.wait(job, interval=60, verbose=2)
if args:
measure += "_" + args
for t in ages:
path = os.path.join(self.subdir(step), channel, measure, self.timename(t) + ".nii.gz")
if force or not os.path.exists(path):
makedirs(os.path.dirname(path))
self._run("detect-edges", step=step, queue=queue, wait_kwargs={"interval": 10, "verbose": 3},
name="eval_{channel}_{measure}_{age}".format(channel=channel, measure=measure, age=self.timename(t)),
args=[avgs[t], path], opts={"padding": channel_info.get("bkgrnd", -1), "central": None})
voxelwise_measures[t].append((channel, measure, path))
else:
if args:
raise ValueError("Measure '{}' has no arguments!".format(measure))
opts = {
"bins": channel_info.get("bins", 0),
"normalization": channel_info.get("normalization", "none")
}
if "rescaling" in channel_info:
opts["rescale"] = channel_info["rescaling"]
if "bkgrnd" in channel_info:
opts["padding"] = channel_info["bkgrnd"]
for t in ages:
path = os.path.join(self.subdir(step), channel, measure, self.timename(t) + ".nii.gz")
if force or not os.path.exists(path):
makedirs(os.path.dirname(path))
opts["output"] = os.path.relpath(path, self.topdir)
mask = self.config["evaluation"].get("mask", "").format(t=t)
if mask:
opts["mask"] = mask
self._run("aggregate-images", step=step, queue=queue, workdir=self.topdir, wait_kwargs={"interval": 30, "verbose": 1},
name="eval_{channel}_{measure}_{age}".format(channel=channel, measure=measure, age=self.timename(t)),
args=[measure] + [os.path.relpath(img, self.topdir) for img in imgs[t]], opts=opts)
voxelwise_measures[t].append((channel, measure, path))
# Average voxel-wise measures within each ROI
for channel in roi_channels:
# Deform individual images to atlas time point
name = "eval_defimgs_{channel}".format(channel=channel)
job, imgs = self.defimgs(channels=channel, ages=ages, step=step, queue=queue, batchname=name)
self.wait(job, interval=30, verbose=1)
name = "eval_avgrois_{channel}".format(channel=channel)
job, rois = self.avgimgs(channels=[channel], labels=roi_labels, ages=ages, step=step, queue=queue, batchname=name)
self.wait(job, interval=60, verbose=2)
for t in ages:
if voxelwise_measures[t] and roi_paths:
subdir = self.subdir(step)
mean_table = os.path.join(subdir, "qc-measures", self.timename(t) + "-mean.csv")
sdev_table = os.path.join(subdir, "qc-measures", self.timename(t) + "-sdev.csv")
wsum_table = os.path.join(subdir, "qc-measures", self.timename(t) + "-wsum.csv")
if force or not os.path.exists(mean_table) or not os.path.exists(sdev_table) or not os.path.exists(wsum_table):
outdir = os.path.dirname(mean_table)
tmp_mean_table = os.path.join(outdir, "." + os.path.basename(mean_table))
tmp_sdev_table = os.path.join(outdir, "." + os.path.basename(sdev_table))
tmp_wsum_table = os.path.join(outdir, "." + os.path.basename(wsum_table))
args = [x[2] for x in voxelwise_measures[t]]
opts = {
"name": [],
"roi-name": [],
"roi-path": [],
"header": None,
"preload": None,
"mean": tmp_mean_table,
"sdev": tmp_sdev_table,
"size": tmp_wsum_table,
"digits": self.config["evaluation"].get("digits", 9)
}
for voxelwise_measure in voxelwise_measures[t]:
channel = voxelwise_measure[0]
measure = voxelwise_measure[1]
opts["name"].append("{}/{}".format(channel, measure))
for roi_name in roi_names:
roi_path = roi_paths[roi_name]
if roi_path in roi_channels:
roi_path = self.avgimg(t, channel=roi_path, label=roi_label.get(roi_name, 0), step=step, create=False)
else:
roi_path = roi_path.format(
subdir=subdir, tmpdir=self.tmpdir, topdir=self.topdir,
i=step, t=t, l=roi_label.get(roi_name, 0)
)
opts["roi-name"].append(roi_name)
opts["roi-path"].append(roi_path)
makedirs(outdir)
try:
self._run("average-measure", step=step, queue=queue,
name="eval_average_{age}".format(age=self.timename(t)),
args=args, opts=opts)
except Exception as e:
for tmp_table in [tmp_mean_table, tmp_sdev_table, tmp_wsum_table]:
if os.path.exists(tmp_table):
os.remove(tmp_table)
raise e
cur_wait_time = 0
inc_wait_time = 10
max_wait_time = 6 * inc_wait_time
if queue and queue.lower() != "local":
while True:
missing = []
for tmp_table in [tmp_mean_table, tmp_sdev_table, tmp_wsum_table]:
if not os.path.exists(tmp_table):
missing.append(tmp_table)
if not missing:
break
if cur_wait_time < max_wait_time:
time.sleep(inc_wait_time)
cur_wait_time += inc_wait_time
else:
raise Exception("Job average-measure finished, but output files still missing after {}s: {}".format(cur_wait_time, missing))
for src, dst in zip([tmp_mean_table, tmp_sdev_table, tmp_wsum_table], [mean_table, sdev_table, wsum_table]):
try:
os.rename(src, dst)
except OSError as e:
sys.stderr.write("Failed to rename '{}' to '{}'".format(src, dst))
raise e
def template(self, i, channel=None):
"""Get absolute path of i-th template image."""
if i < 0 or i >= len(self.means):
raise IndexError()
if not channel:
channel = self.channel
img = self.avgimg(self.means[i], channel=channel, step=self.step, create=False, outdir=self.outdir)
if self.step >= 0 and not os.path.exists(img):
avg = self.avgimg(self.means[i], channel=channel, step=self.step, create=False)
if os.path.exists(avg):
img = avg
return img
def __len__(self):
"""Length of the atlas is the number of templates at discrete time points."""
return len(self.means)
def __getitem__(self, i):
"""Absolute file path of i-th atlas template."""
return self.template(i)
def deformation(self, i, t=None, force=False, create=True):
"""Get absolute path of longitudinal deformation from template i."""
if i < 0 or i >= len(self.means):
return "identity"
if t is None:
t = self.means[i + 1]
return self.growth(self.means[i], t, step=self.step, force=force, create=create)
def deform(self, i, t, path=None, channel=None, force=False, create=True):
"""Deform i-th template using longitudinal deformations to time point t."""
if not channel:
channel = self.channel
source = self.template(i, channel=channel)
t = self.normtime(t)
if t == self.means[i]:
if path and os.path.realpath(os.path.abspath(path)) != os.path.realpath(source):
if force and (create or not os.path.exists(path)):
shutil.copyfile(source, path)
return path
else:
return source
if not path:
path = os.path.join(self.subdir(self.step), channel, "temp", "{}-{}.nii.gz".format(self.timename(self.means[i]), self.timename(t)))
if create and (force or not os.path.exists(path)):
dof = self.deformation(i, t)
makedirs(os.path.dirname(path))
self._run("transform-image", args=[source, path], opts={"dofin": dof, "target": self.refimg})
return path
def interpolate(self, t, path=None, channel=None, interp="default", deform=False, sigma=0, force=False, create=True):
"""Interpolate atlas volume for specified time from finite set of templates.
Unlike avgimg, this function does not evaluate the continuous spatio-temporal function
to construct a template image for the given age. Instead, it uses the specified
interpolation kernel and computes the corresponding weighted average of previously
constructed template images.
Args:
interp (str): Temporal interpolation kernel (weights). (default: 'gaussian')
sigma (float): Standard deviation used for Gaussian interpolation. (default: adaptive)
deform (bool): Whether to deform each template using the longitudinal deformation.
"""
interp = interp.lower()
if interp in ("kernel", "default"):
interp = "gaussian"
if not channel:
channel = self.channel
t = self.normtime(t)
i = self.timeindex(t)
if t == self.means[i] and interp.startswith("linear"):
return self.template(i, channel=channel)
if not path:
path = os.path.join(self.outdir, self.timename(t) + ".nii.gz")
if create and (force or not os.path.exists(path)):
args = [path]
# Linear interpolation
if interp == "linear":
w = (self.means[i + 1] - t) / (self.means[i + 1] - self.means[i])
args.extend(["-image", self.template(i, channel=channel), w])
if deform:
args.extend(["-dof", self.deformation(i, t)])
w = (t - self.means[i]) / (self.means[i + 1] - self.means[i])
args.extend(["-image", self.template(i + 1, channel=channel), w])
if deform:
args.extend(["-dof", self.deformation(i + 1, t)])
# Gaussian interpolation
elif interp == "gaussian":
for i in range(len(self.means)):
w = self.weight(self.means[i], mean=t, sigma=sigma)
if w > 0.:
args.extend(["-image", self.template(i, channel=channel), w])
if deform:
dof = self.deformation(self.means[i], t)
if dof != "identity":
args.extend(["-dof", dof])
if create:
bkgrnd = self.config["images"][channel].get("bkgrnd", -1)
self._run("average-images", args=args, opts={"reference": self.refimg, "datatype": "uchar", "padding": bkgrnd})
return path
def view(self, i=None, channel=None):
"""View template image at specified time point(s)."""
if i is None:
i = range(len(self.means))
elif not isinstance(i, (list, tuple)):
i = [i]
if not channel:
channel = self.channel
imgs = [self.template(int(idx), channel=channel) for idx in i]
mirtk.run("view", target=imgs)
def rmtemp(self):
"""Delete temporary files.
This function removes all temporary files which can be recomputed
without the need for performing the more costly registrations.
Intermediate template images, auxiliary CSV files, and composite
transformations are among these temporary files to be deleted.
"""
raise NotImplementedError()
##############################################################################
# Auxiliaries
# ----------------------------------------------------------------------------
def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
"""Compare too floating point numbers."""
return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
# ----------------------------------------------------------------------------
def read_imgids(path):
"""Read subject/image IDs from text file."""
imgids = []
with open(path, "rt") as f:
for line in f.readlines():
line = line.strip()
if len(line) > 0 and line[0] != '#':
imgids.append(re.split("^[ \t,]+", line)[0])
return imgids
# ----------------------------------------------------------------------------
def read_ages(path, delimiter=None):
"""Read subject/image age from CSV file."""
if not delimiter:
ext = os.path.splitext(path)[1].lower()
if ext == ".csv":
delimiter = ","
elif ext == ".tsv":
delimiter = "\t"
elif ext == ".txt":
delimiter = " "
else:
raise ValueError("Cannot determine delimiter of {} file! Use file extension .csv, .tsv, or .txt.".format(path))
ages = {}
with open(path, "rt") as csvfile:
reader = csv.reader(csvfile, delimiter=delimiter, quotechar='"')
for line in reader:
ages[line[0]] = float(line[1])
return ages
# ----------------------------------------------------------------------------
def basename_without_ext(path):
name = os.path.basename(path)
name, ext = os.path.splitext(name)
if ext.lower() == '.gz':
name = os.path.splitext(name)[0]
return name
# ----------------------------------------------------------------------------
def remove_or_makedirs(path):
"""Remove file when it exists or make directories otherwise."""
if os.path.exists(path):
os.remove(path)
else:
makedirs(os.path.dirname(path))
# ----------------------------------------------------------------------------
def parselabels(labels):
"""Parse labels specification."""
values = []
isstr = isinstance(labels, basestring)
if isstr and "," in labels:
labels = [arg.trim() for arg in labels.split(",")]
if isinstance(labels, (tuple, list)):
for label in labels:
values.extend(parselabels(label))
elif isstr:
m = re.match("([0-9]+)..([0-9]+)", labels)
if m:
values.extend(range(int(m.group(1)), int(m.group(2)) + 1))
elif ":" in labels:
range_spec = [int(x) for x in labels.split(":")]
if len(range_spec) == 2:
values.extend(list(range(range_spec[0], range_spec[2] + 1, range_spec[1])))
else:
values.extend(list(range(range_spec[0], range_spec[1] + 1)))
elif labels != "":
values.append(int(labels))
else:
values.append(int(labels))
return values
|
BioMedIA/MIRTK
|
Applications/lib/python/mirtk/atlas/spatiotemporal.py
|
Python
|
apache-2.0
| 77,515
|
[
"Gaussian"
] |
bbba6d101d164105ae27893a615fceee42acbb90e63ffe2d3f8f8776c3e140d2
|
# -*- coding: utf-8 -*-
# Copyright 2007-2021 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import math
import sympy
import numpy as np
from hyperspy.component import _get_scaling_factor
from hyperspy._components.expression import Expression
from hyperspy._components.gaussian import _estimate_gaussian_parameters
from hyperspy.misc.utils import is_binned # remove in v2.0
from distutils.version import LooseVersion
sqrt2pi = math.sqrt(2 * math.pi)
sigma2fwhm = 2 * math.sqrt(2 * math.log(2))
class Voigt(Expression):
r"""Voigt component.
Symmetric peak shape based on the convolution of a Lorentzian and Normal
(Gaussian) distribution:
.. math::
f(x) = G(x) \cdot L(x)
where :math:`G(x)` is the Gaussian function and :math:`L(x)` is the
Lorentzian function. In this case using an approximate formula by David
(see Notes). This approximation improves on the pseudo-Voigt function
(linear combination instead of convolution of the distributions) and is,
to a very good approximation, equivalent to a Voigt function:
.. math::
z(x) &= \frac{x + i \gamma}{\sqrt{2} \sigma} \\
w(z) &= \frac{e^{-z^2} \text{erfc}(-i z)}{\sqrt{2 \pi} \sigma} \\
f(x) &= A \cdot \Re\left\{ w \left[ z(x - x_0) \right] \right\}
============== =============
Variable Parameter
============== =============
:math:`x_0` centre
:math:`A` area
:math:`\gamma` gamma
:math:`\sigma` sigma
============== =============
Parameters
-----------
centre : float
Location of the maximum of the peak.
area : float
Intensity below the peak.
gamma : float
:math:`\gamma` = HWHM of the Lorentzian distribution.
sigma: float
:math:`2 \sigma \sqrt{(2 \log(2))}` = FWHM of the Gaussian distribution.
For convenience the `gwidth` and `lwidth` attributes can also be used to
set and get the FWHM of the Gaussian and Lorentzian parts of the
distribution, respectively. For backwards compatability, `FWHM` is another
alias for the Gaussian width.
Notes
-----
W.I.F. David, J. Appl. Cryst. (1986). 19, 63-64,
doi:10.1107/S0021889886089999
"""
def __init__(self, centre=10., area=1., gamma=0.2, sigma=0.1,
module=["numpy", "scipy"], **kwargs):
# Not to break scripts once we remove the legacy Voigt
if "legacy" in kwargs:
del kwargs["legacy"]
if LooseVersion(sympy.__version__) < LooseVersion("1.3"):
raise ImportError("The `Voigt` component requires "
"SymPy >= 1.3")
# We use `_gamma` internally to workaround the use of the `gamma`
# function in sympy
super().__init__(
expression="area * real(V); \
V = wofz(z) / (sqrt(2.0 * pi) * sigma); \
z = (x - centre + 1j * _gamma) / (sigma * sqrt(2.0))",
name="Voigt",
centre=centre,
area=area,
gamma=gamma,
sigma=sigma,
position="centre",
module=module,
autodoc=False,
rename_pars={"_gamma": "gamma"},
**kwargs,
)
# Boundaries
self.area.bmin = 0.
self.gamma.bmin = 0.
self.sigma.bmin = 0.
self.isbackground = False
self.convolved = True
def estimate_parameters(self, signal, x1, x2, only_current=False):
"""Estimate the Voigt function by calculating the momenta of the
Gaussian.
Parameters
----------
signal : Signal1D instance
x1 : float
Defines the left limit of the spectral range to use for the
estimation.
x2 : float
Defines the right limit of the spectral range to use for the
estimation.
only_current : bool
If False estimates the parameters for the full dataset.
Returns
-------
: bool
Exit status required for the :meth:`remove_background` function.
Notes
-----
Adapted from http://www.scipy.org/Cookbook/FittingData
Examples
--------
>>> g = hs.model.components1D.Voigt(legacy=False)
>>> x = np.arange(-10, 10, 0.01)
>>> data = np.zeros((32, 32, 2000))
>>> data[:] = g.function(x).reshape((1, 1, 2000))
>>> s = hs.signals.Signal1D(data)
>>> s.axes_manager[-1].offset = -10
>>> s.axes_manager[-1].scale = 0.01
>>> g.estimate_parameters(s, -10, 10, False)
"""
super()._estimate_parameters(signal)
axis = signal.axes_manager.signal_axes[0]
centre, height, sigma = _estimate_gaussian_parameters(signal, x1, x2,
only_current)
scaling_factor = _get_scaling_factor(signal, axis, centre)
if only_current is True:
self.centre.value = centre
self.sigma.value = sigma
self.area.value = height * sigma * sqrt2pi
if is_binned(signal):
# in v2 replace by
#if axis.is_binned:
self.area.value /= scaling_factor
return True
else:
if self.area.map is None:
self._create_arrays()
self.area.map['values'][:] = height * sigma * sqrt2pi
if is_binned(signal):
# in v2 replace by
#if axis.is_binned:
self.area.map['values'][:] /= scaling_factor
self.area.map['is_set'][:] = True
self.sigma.map['values'][:] = sigma
self.sigma.map['is_set'][:] = True
self.centre.map['values'][:] = centre
self.centre.map['is_set'][:] = True
self.fetch_stored_values()
return True
@property
def gwidth(self):
return self.sigma.value * sigma2fwhm
@gwidth.setter
def gwidth(self, value):
self.sigma.value = value / sigma2fwhm
@property
def FWHM(self):
return self.sigma.value * sigma2fwhm
@FWHM.setter
def FWHM(self, value):
self.sigma.value = value / sigma2fwhm
@property
def lwidth(self):
return self.gamma.value * 2
@lwidth.setter
def lwidth(self, value):
self.gamma.value = value / 2
|
thomasaarholt/hyperspy
|
hyperspy/_components/voigt.py
|
Python
|
gpl-3.0
| 7,040
|
[
"Gaussian"
] |
4de6edfca19108debfa5856787306a37d89e8d3683690c821567db56610abcba
|
#!/usr/bin/env python3
# @package fill_missing
# @brief This script solves the Laplace equation as a method of filling holes in map-plane data.
#
# Uses an approximation to Laplace's equation
# @f[ \nabla^2 u = 0 @f]
# to smoothly replace missing values in two-dimensional NetCDF
# variables with the average of the ``nearby'' non-missing values.
#
# Here is hypothetical example, filling the missing values in the variables
# `topg` and `usurf` in `data.nc` :
# \code
# fill_missing.py -v topg,usurf data.nc data_smoothed.nc
# \endcode
# Generally variables should be filled one at a time.
#
# Each of the requested variables must have missing values specified
# using the _FillValue attribute.
import petsc4py
import sys
petsc4py.init(sys.argv)
from petsc4py import PETSc
import numpy as np
def assemble_matrix(mask):
"""Assemble the matrix corresponding to the standard 5-point stencil
approximation of the Laplace operator on the domain defined by
mask == True, where mask is a 2D NumPy array.
Uses zero Neumann BC at grid edges.
The grid spacing is ignored, which is equivalent to assuming equal
spacing in x and y directions.
"""
PETSc.Sys.Print("Assembling the matrix...")
# grid size
nrow, ncol = mask.shape
# create sparse matrix
A = PETSc.Mat()
A.create(PETSc.COMM_WORLD)
A.setSizes([nrow * ncol, nrow * ncol])
A.setType('aij') # sparse
A.setPreallocationNNZ(5)
# precompute values for setting
# diagonal and non-diagonal entries
diagonal = 4.0
offdx = - 1.0
offdy = - 1.0
def R(i, j):
"Map from the (row,column) pair to the linear row number."
return i * ncol + j
# loop over owned block of rows on this
# processor and insert entry values
row_start, row_end = A.getOwnershipRange()
for row in range(row_start, row_end):
i = row // ncol # map row number to
j = row - i * ncol # grid coordinates
if mask[i, j] == False:
A[row, row] = diagonal
continue
D = diagonal
# i
if i == 0: # top row
D += offdy
if i > 0: # interior
col = R(i - 1, j)
A[row, col] = offdx
if i < nrow - 1: # interior
col = R(i + 1, j)
A[row, col] = offdx
if i == nrow - 1: # bottom row
D += offdy
# j
if j == 0: # left-most column
D += offdx
if j > 0: # interior
col = R(i, j - 1)
A[row, col] = offdy
if j < ncol - 1: # interior
col = R(i, j + 1)
A[row, col] = offdy
if j == ncol - 1: # right-most column
D += offdx
A[row, row] = D
# communicate off-processor values
# and setup internal data structures
# for performing parallel operations
A.assemblyBegin()
A.assemblyEnd()
PETSc.Sys.Print("done.")
return A
def assemble_rhs(rhs, X):
"""Assemble the right-hand side of the system approximating the
Laplace equation.
Modifies rhs in place; sets Dirichlet BC using X where X.mask ==
False.
"""
# PETSc.Sys.Print("Setting Dirichlet BC...")
nrow, ncol = X.shape
row_start, row_end = rhs.getOwnershipRange()
# The right-hand side is zero everywhere except for Dirichlet
# nodes.
rhs.set(0.0)
for row in range(row_start, row_end):
i = row // ncol # map row number to
j = row - i * ncol # grid coordinates
if X.mask[i, j] == False:
rhs[row] = 4.0 * X[i, j]
rhs.assemble()
# PETSc.Sys.Print("done.")
def create_solver():
"Create the KSP solver"
# create linear solver
ksp = PETSc.KSP()
ksp.create(PETSc.COMM_WORLD)
# Use algebraic multigrid:
pc = ksp.getPC()
pc.setType(PETSc.PC.Type.GAMG)
ksp.setFromOptions()
ksp.setInitialGuessNonzero(True)
return ksp
def fill_missing(field, matrix=None):
"""Fill missing values in a NumPy array 'field' using the matrix
'matrix' approximating the Laplace operator."""
ksp = create_solver()
if matrix is None:
A = assemble_matrix(field.mask)
else:
# PETSc.Sys.Print("Reusing the matrix...")
A = matrix
# obtain solution & RHS vectors
x, b = A.getVecs()
assemble_rhs(b, field)
initial_guess = np.mean(field)
# set the initial guess
x.set(initial_guess)
ksp.setOperators(A)
# Solve Ax = b
# PETSc.Sys.Print("Solving...")
ksp.solve(b, x)
# PETSc.Sys.Print("done.")
# transfer solution to processor 0
vec0, scatter = create_scatter(x)
scatter_to_0(x, vec0, scatter)
return vec0, A
def create_scatter(vector):
"Create the scatter to processor 0."
comm = vector.getComm()
rank = comm.getRank()
scatter, V0 = PETSc.Scatter.toZero(vector)
scatter.scatter(vector, V0, False, PETSc.Scatter.Mode.FORWARD)
comm.barrier()
return V0, scatter
def scatter_to_0(vector, vector_0, scatter):
"Scatter a distributed 'vector' to 'vector_0' on processor 0 using 'scatter'."
comm = vector.getComm()
scatter.scatter(vector, vector_0, False, PETSc.Scatter.Mode.FORWARD)
comm.barrier()
def scatter_from_0(vector_0, vector, scatter):
"Scatter 'vector_0' on processor 0 to a distributed 'vector' using 'scatter'."
comm = vector.getComm()
scatter.scatter(vector, vector_0, False, PETSc.Scatter.Mode.REVERSE)
comm.barrier()
def fill_2d_record(data, matrix=None):
"Fill missing values in a 2D record."
if getattr(data, "mask", None) is None:
return data, None
filled_data, A = fill_missing(data, matrix)
if PETSc.COMM_WORLD.getRank() == 0:
filled_data = filled_data[:].reshape(data.shape)
return filled_data, A
def test():
"Test fill_missing() using synthetic data."
N = 201
M = int(N * 1.5)
x = np.linspace(-1, 1, N)
y = np.linspace(-1, 1, M)
xx, yy = np.meshgrid(x, y)
zz = np.sin(2.5 * np.pi * xx) * np.cos(2.0 * np.pi * yy)
K = 10
mask = np.random.randint(0, K, zz.size).reshape(zz.shape) / float(K)
mask[(xx - 1.0) ** 2 + (yy - 1.0) ** 2 < 1.0] = 1
mask[(xx + 1.0) ** 2 + (yy + 1.0) ** 2 < 1.0] = 1
field = np.ma.array(zz, mask=mask)
zzz, _ = fill_missing(field)
rank = PETSc.COMM_WORLD.getRank()
if rank == 0:
zzz0_np = zzz[:].reshape(field.shape)
import pylab as plt
plt.figure(1)
plt.imshow(zz, interpolation='nearest')
plt.figure(2)
plt.imshow(field, interpolation='nearest')
plt.figure(3)
plt.imshow(zzz0_np, interpolation='nearest')
plt.show()
def fill_variable(nc, name):
"Fill missing values in one variable."
PETSc.Sys.Print("Processing %s..." % name)
t0 = time()
var = nc.variables[name]
comm = PETSc.COMM_WORLD
rank = comm.getRank()
if var.ndim == 3:
A = None
n_records = var.shape[0]
for t in range(n_records):
PETSc.Sys.Print("Processing record %d/%d..." % (t + 1, n_records))
data = var[t, :, :]
filled_data, A = fill_2d_record(data, A)
if rank == 0:
var[t, :, :] = filled_data
comm.barrier()
PETSc.Sys.Print("Time elapsed: %5f seconds." % (time() - t0))
elif var.ndim == 2:
data = var[:, :]
filled_data, _ = fill_2d_record(data)
if rank == 0:
var[:, :] = filled_data
comm.barrier()
PETSc.Sys.Print("Time elapsed: %5f seconds." % (time() - t0))
else:
PETSc.Sys.Print("Skipping the %dD variable %s." % (var.ndim, name))
return
# Remove the _FillValue attribute:
try:
delattr(var, '_FillValue')
except:
pass
# Remove the missing_value attribute:
try:
delattr(var, 'missing_value')
except:
pass
def add_history(nc):
"Update the history attribute in a NetCDF file nc."
comm = PETSc.COMM_WORLD
rank = comm.getRank()
if rank != 0:
return
# add history global attribute (after checking if present)
historysep = ' '
historystr = asctime() + ': ' + historysep.join(sys.argv) + '\n'
if 'history' in nc.ncattrs():
nc.history = historystr + nc.history # prepend to history string
else:
nc.history = historystr
if __name__ == "__main__":
from argparse import ArgumentParser
import os
import os.path
import tempfile
import shutil
from time import time, asctime
try:
from netCDF4 import Dataset as NC
except:
PETSc.Sys.Print("netCDF4 is not installed!")
sys.exit(1)
parser = ArgumentParser()
parser.description = "Fill missing values by solving the Laplace equation in on the missing values and using present values as Dirichlet B.C."
parser.add_argument("INPUT", nargs=1, help="Input file name.")
parser.add_argument("OUTPUT", nargs=1, help="Output file name.")
parser.add_argument("-a", "--all", dest="all", action="store_true",
help="Process all variables.")
parser.add_argument("-v", "--vars", dest="variables",
help="comma-separated list of variables to process")
options, _ = parser.parse_known_args()
input_filename = options.INPUT[0]
output_filename = options.OUTPUT[0]
if options.all:
nc = NC(input_filename)
variables = list(nc.variables.keys())
nc.close()
else:
try:
variables = (options.variables).split(',')
except:
PETSc.Sys.Print("Please specify variables using the -v option.")
sys.exit(-1)
# Done processing command-line options.
comm = PETSc.COMM_WORLD
rank = comm.getRank()
t0 = time()
PETSc.Sys.Print("Filling missing values in %s and saving results to %s..." % (input_filename,
output_filename))
if rank == 0:
try:
PETSc.Sys.Print("Creating a temporary file...")
# find the name of the directory with the output file:
dirname = os.path.dirname(os.path.abspath(output_filename))
(handle, tmp_filename) = tempfile.mkstemp(prefix="fill_missing_",
suffix=".nc",
dir=dirname)
os.close(handle) # mkstemp returns a file handle (which we don't need)
except IOError:
PETSc.Sys.Print("ERROR: Can't create %s, Exiting..." % tmp_filename)
try:
PETSc.Sys.Print("Copying input file %s to %s..." % (input_filename,
tmp_filename))
shutil.copy(input_filename, tmp_filename)
except IOError:
PETSc.Sys.Print("ERROR: Can't copy %s, Exiting..." % input_filename)
try:
if rank == 0:
nc = NC(tmp_filename, 'a')
else:
nc = NC(input_filename, 'r')
except Exception as message:
PETSc.Sys.Print(message)
PETSc.Sys.Print("Note: %s was not modified." % output_filename)
sys.exit(-1)
add_history(nc)
for name in variables:
try:
fill_variable(nc, name)
except Exception as message:
PETSc.Sys.Print("ERROR:", message)
PETSc.Sys.Print("Note: %s was not modified." % output_filename)
sys.exit(-1)
nc.close()
try:
if rank == 0:
shutil.move(tmp_filename, output_filename)
except:
PETSc.Sys.Print("Error moving %s to %s. Exiting..." % (tmp_filename,
output_filename))
sys.exit(-1)
PETSc.Sys.Print("Total time elapsed: %5f seconds." % (time() - t0))
|
pism/pism
|
util/fill_missing_petsc.py
|
Python
|
gpl-3.0
| 12,044
|
[
"NetCDF"
] |
f02b7fbe701d594d8baec5e1135b614d1aa8494b5f1cd702f8ab90ff349dd84a
|
# Copyright (c) 2016, Mike Smith
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import GPy
import numpy as np
import sys #so I can print dots
import time
import os
from joblib import Parallel, delayed
def get_log_likelihood(inputs,data,clust):
"""Get the LL of a combined set of clusters, ignoring time series offsets.
Get the log likelihood of a cluster without worrying about the fact
different time series are offset. We're using it here really for those
cases in which we only have one cluster to get the loglikelihood of.
arguments:
inputs -- the 'X's in a list, one item per cluster
data -- the 'Y's in a list, one item per cluster
clust -- list of clusters to use
returns a tuple:
log likelihood and the offset (which is always zero for this model)
"""
S = data[0].shape[0] #number of time series
#build a new dataset from the clusters, by combining all clusters together
X = np.zeros([0,1])
Y = np.zeros([0,S])
#for each person in the cluster,
#add their inputs and data to the new dataset
for p in clust:
X = np.vstack([X,inputs[p]])
Y = np.vstack([Y,data[p].T])
#find the loglikelihood. We just add together the LL for each time series.
#ll=0
#for s in range(S):
# m = GPy.models.GPRegression(X,Y[:,s][:,None])
# m.optimize()
# ll+=m.log_likelihood()
m = GPy.models.GPRegression(X,Y)
m.optimize()
ll=m.log_likelihood()
return ll,0
def get_log_likelihood_offset(inputs,data,clust):
"""Get the log likelihood of a combined set of clusters, fitting the offsets
arguments:
inputs -- the 'X's in a list, one item per cluster
data -- the 'Y's in a list, one item per cluster
clust -- list of clusters to use
returns a tuple:
log likelihood and the offset
"""
#if we've only got one cluster, the model has an error, so we want to just
#use normal GPRegression.
if len(clust)==1:
return get_log_likelihood(inputs,data,clust)
S = data[0].shape[0] #number of time series
X = np.zeros([0,2]) #notice the extra column, this is for the cluster index
Y = np.zeros([0,S])
#for each person in the cluster, add their inputs and data to the new
#dataset. Note we add an index identifying which person is which data point.
#This is for the offset model to use, to allow it to know which data points
#to shift.
for i,p in enumerate(clust):
idx = i*np.ones([inputs[p].shape[0],1])
X = np.vstack([X,np.hstack([inputs[p],idx])])
Y = np.vstack([Y,data[p].T])
m = GPy.models.GPOffsetRegression(X,Y)
#TODO: How to select a sensible prior?
m.offset.set_prior(GPy.priors.Gaussian(0,20))
#TODO: Set a sensible start value for the length scale,
#make it long to help the offset fit.
m.optimize()
ll = m.log_likelihood()
offset = m.offset.values[0]
return ll,offset
def cluster_loglike(clusti,data,inputs,pairloglikes):
results = [get_log_likelihood_offset(inputs,data,[clusti,clustj]) for clustj in range(clusti) if np.isnan(pairloglikes[clusti,clustj])]
return(results)
def cluster(data,inputs,verbose=False,parallel=False,cores=1,batch=1):
"""Clusters data
Using the new offset model, this method uses a greedy algorithm to cluster
the data. It starts with all the data points in separate clusters and tests
whether combining them increases the overall log-likelihood (LL). It then
iteratively joins pairs of clusters which cause the greatest increase in
the LL, until no join increases the LL.
arguments:
inputs -- the 'X's in a list, one item per cluster
data -- the 'Y's in a list, one item per cluster
verbose -- True or False. If True, gives additional information on the algorithm.
parallel -- True or False. If True, runs the algorithm in paralllel using joblib
cores -- How many cores to parallelise over. Ignored if parallel is False
batch -- A joblib parallelisation argument. Ignored if parallel is False.
returns a list of the clusters.
"""
N=len(data)
if verbose & parallel:
try:
omp_num_threads = os.environ['OMP_NUM_THREADS']
except KeyError:
omp_num_threads = 'unset'
print('parallel processing')
print('{0} cores : This is the number of cores your job will be split over'.format(cores))
print('{0} batch size : The number of atomic tasks to dispatch at once to each worker. '.format(batch))
print('{0} OMP_NUM_THREADS : An environment variable that affects number of cores for OpenMP-accelerated functions. (e.g.Intel MKL)'.format(omp_num_threads))
if verbose & (not parallel):
print('serial processing')
#Define a set of N active cluster
active = []
for p in range(0,N):
active.append([p])
loglikes = np.zeros(len(active))
loglikes[:] = None
pairloglikes = np.zeros([len(active),len(active)])
pairloglikes[:] = None
pairoffset = np.zeros([len(active),len(active)])
it = 0
with Parallel(n_jobs=cores,batch_size=batch) as parallel_pool:
while True:
if verbose:
it +=1
print("Iteration %d" % it)
t0 = time.time()
if parallel:
#results = [cluster_loglike(clusti,data,inputs,loglikes,pairloglikes) for clusti in range(len(active))]
results = parallel_pool(delayed(cluster_loglike)(vari,data,inputs,pairloglikes) for vari in range(len(active)))
#results is a list of lists that needs flattening
results=[item for sublist in results for item in sublist]
#Make the assignments
results_index = 0
for clusti in range(len(active)):
if np.isnan(loglikes[clusti]):
loglikes[clusti], unused_offset = get_log_likelihood_offset(inputs,data,[clusti])
for clustj in range(clusti): #count from 0 to clustj-1
if np.isnan(pairloglikes[clusti,clustj]):
pairloglikes[clusti,clustj],pairoffset[clusti,clustj] = results[results_index]
results_index = results_index + 1
else: #serial
for clusti in range(len(active)):
if verbose:
sys.stdout.write('.')
sys.stdout.flush()
if np.isnan(loglikes[clusti]):
loglikes[clusti], unused_offset = get_log_likelihood_offset(inputs,data,[clusti])
#try combining with each other cluster...
for clustj in range(clusti): #count from 0 to clustj-1
temp = [clusti,clustj]
if np.isnan(pairloglikes[clusti,clustj]):
pairloglikes[clusti,clustj],pairoffset[clusti,clustj] = get_log_likelihood_offset(inputs,data,temp)
seploglikes = np.repeat(loglikes[:,None].T,len(loglikes),0)+np.repeat(loglikes[:,None],len(loglikes),1)
loglikeimprovement = pairloglikes - seploglikes #how much likelihood improves with clustering
top = np.unravel_index(np.nanargmax(pairloglikes-seploglikes), pairloglikes.shape)
#if loglikeimprovement.shape[0]<3:
# #no more clustering to do - this shouldn't happen really unless
# #we've set the threshold to apply clustering to less than 0
# break
#if theres further clustering to be done...
if loglikeimprovement[top[0],top[1]]>0:
active[top[0]].extend(active[top[1]])
offset=pairoffset[top[0],top[1]]
inputs[top[0]] = np.vstack([inputs[top[0]],inputs[top[1]]-offset])
data[top[0]] = np.hstack([data[top[0]],data[top[1]]])
del inputs[top[1]]
del data[top[1]]
del active[top[1]]
#None = message to say we need to recalculate
pairloglikes[:,top[0]] = None
pairloglikes[top[0],:] = None
pairloglikes = np.delete(pairloglikes,top[1],0)
pairloglikes = np.delete(pairloglikes,top[1],1)
loglikes[top[0]] = None
loglikes = np.delete(loglikes,top[1])
if verbose:
t1 = time.time()
print('\nloop time',t1-t0)
else:
break
#if loglikeimprovement[top[0],top[1]]>0:
# print "joined"
# print top
# print offset
# print offsets
# print offsets[top[1]]-offsets[top[0]]
#TODO Add a way to return the offsets applied to all the time series
return active
#starttime = time.time()
#active = cluster(data,inputs)
#endtime = time.time()
#print "TOTAL TIME %0.4f" % (endtime-starttime)
|
mikecroucher/GPy
|
GPy/util/cluster_with_offset.py
|
Python
|
bsd-3-clause
| 9,193
|
[
"Gaussian"
] |
503684b291517dc819ebd820ca3fc2d2438df439a938400374e5f22cfbf0725d
|
# Orca
#
# Copyright (C) 2010-2011 The Orca Team
# Copyright (C) 2011-2012 Igalia, S.L.
#
# Author: Joanmarie Diggs <jdiggs@igalia.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (C) 2010-2011 The Orca Team" \
"Copyright (C) 2011-2012 Igalia, S.L."
__license__ = "LGPL"
import pyatspi
import pyatspi.utils as utils
import orca.scripts.default as default
import orca.cmdnames as cmdnames
import orca.debug as debug
import orca.guilabels as guilabels
import orca.input_event as input_event
import orca.messages as messages
import orca.orca as orca
import orca.settings as settings
import orca.settings_manager as settings_manager
import orca.speechserver as speechserver
import orca.orca_state as orca_state
import orca.speech as speech
from .structural_navigation import StructuralNavigation
from .braille_generator import BrailleGenerator
from .speech_generator import SpeechGenerator
from .script_utilities import Utilities
_settingsManager = settings_manager.getManager()
########################################################################
# #
# The WebKitGtk script class. #
# #
########################################################################
class Script(default.Script):
CARET_NAVIGATION_KEYS = ['Left', 'Right', 'Up', 'Down', 'Home', 'End']
def __init__(self, app, isBrowser=True):
"""Creates a new script for WebKitGtk applications.
Arguments:
- app: the application to create a script for.
"""
default.Script.__init__(self, app)
self._loadingDocumentContent = False
self._isBrowser = isBrowser
self._lastCaretContext = None, -1
self.sayAllOnLoadCheckButton = None
if _settingsManager.getSetting('sayAllOnLoad') == None:
_settingsManager.setSetting('sayAllOnLoad', True)
def setupInputEventHandlers(self):
"""Defines InputEventHandler fields for this script that can be
called by the key and braille bindings."""
default.Script.setupInputEventHandlers(self)
self.inputEventHandlers.update(
self.structuralNavigation.inputEventHandlers)
self.inputEventHandlers["sayAllHandler"] = \
input_event.InputEventHandler(
Script.sayAll,
cmdnames.SAY_ALL)
self.inputEventHandlers["panBrailleLeftHandler"] = \
input_event.InputEventHandler(
Script.panBrailleLeft,
cmdnames.PAN_BRAILLE_LEFT,
False) # Do not enable learn mode for this action
self.inputEventHandlers["panBrailleRightHandler"] = \
input_event.InputEventHandler(
Script.panBrailleRight,
cmdnames.PAN_BRAILLE_RIGHT,
False) # Do not enable learn mode for this action
def getToolkitKeyBindings(self):
"""Returns the toolkit-specific keybindings for this script."""
return self.structuralNavigation.keyBindings
def getAppPreferencesGUI(self):
"""Return a GtkGrid containing the application unique configuration
GUI items for the current application."""
from gi.repository import Gtk
grid = Gtk.Grid()
grid.set_border_width(12)
label = guilabels.READ_PAGE_UPON_LOAD
self.sayAllOnLoadCheckButton = \
Gtk.CheckButton.new_with_mnemonic(label)
self.sayAllOnLoadCheckButton.set_active(
_settingsManager.getSetting('sayAllOnLoad'))
grid.attach(self.sayAllOnLoadCheckButton, 0, 0, 1, 1)
grid.show_all()
return grid
def getPreferencesFromGUI(self):
"""Returns a dictionary with the app-specific preferences."""
return {'sayAllOnLoad': self.sayAllOnLoadCheckButton.get_active()}
def getBrailleGenerator(self):
"""Returns the braille generator for this script."""
return BrailleGenerator(self)
def getSpeechGenerator(self):
"""Returns the speech generator for this script."""
return SpeechGenerator(self)
def getStructuralNavigation(self):
"""Returns the 'structural navigation' class for this script."""
types = self.getEnabledStructuralNavigationTypes()
return StructuralNavigation(self, types, True)
def getEnabledStructuralNavigationTypes(self):
"""Returns a list of the structural navigation object types
enabled in this script."""
enabledTypes = [StructuralNavigation.BLOCKQUOTE,
StructuralNavigation.BUTTON,
StructuralNavigation.CHECK_BOX,
StructuralNavigation.CHUNK,
StructuralNavigation.COMBO_BOX,
StructuralNavigation.ENTRY,
StructuralNavigation.FORM_FIELD,
StructuralNavigation.HEADING,
StructuralNavigation.LANDMARK,
StructuralNavigation.LINK,
StructuralNavigation.LIST,
StructuralNavigation.LIST_ITEM,
StructuralNavigation.LIVE_REGION,
StructuralNavigation.PARAGRAPH,
StructuralNavigation.RADIO_BUTTON,
StructuralNavigation.SEPARATOR,
StructuralNavigation.TABLE,
StructuralNavigation.TABLE_CELL,
StructuralNavigation.UNVISITED_LINK,
StructuralNavigation.VISITED_LINK]
return enabledTypes
def getUtilities(self):
"""Returns the utilites for this script."""
return Utilities(self)
def onCaretMoved(self, event):
"""Called whenever the caret moves.
Arguments:
- event: the Event
"""
self._lastCaretContext = event.source, event.detail1
lastKey, mods = self.utilities.lastKeyAndModifiers()
if lastKey in ['Tab', 'ISO_Left_Tab']:
return
if lastKey == 'Down' \
and orca_state.locusOfFocus == event.source.parent \
and event.source.getIndexInParent() == 0 \
and orca_state.locusOfFocus.getRole() == pyatspi.ROLE_LINK:
self.updateBraille(event.source)
return
orca.setLocusOfFocus(event, event.source, False)
default.Script.onCaretMoved(self, event)
def onDocumentReload(self, event):
"""Callback for document:reload accessibility events."""
docRoles = [pyatspi.ROLE_DOCUMENT_FRAME, pyatspi.ROLE_DOCUMENT_WEB]
if event.source.getRole() in docRoles:
self._loadingDocumentContent = True
def onDocumentLoadComplete(self, event):
"""Callback for document:load-complete accessibility events."""
docRoles = [pyatspi.ROLE_DOCUMENT_FRAME, pyatspi.ROLE_DOCUMENT_WEB]
if not event.source.getRole() in docRoles:
return
self._loadingDocumentContent = False
if not self._isBrowser:
return
# TODO: We need to see what happens in Epiphany on pages where focus
# is grabbed rather than set the caret at the start. But for simple
# content in both Yelp and Epiphany this is alright for now.
obj, offset = self.setCaretAtStart(event.source)
orca.setLocusOfFocus(event, obj, False)
self.updateBraille(obj)
if _settingsManager.getSetting('sayAllOnLoad') \
and _settingsManager.getSetting('enableSpeech'):
self.sayAll(None)
def onDocumentLoadStopped(self, event):
"""Callback for document:load-stopped accessibility events."""
docRoles = [pyatspi.ROLE_DOCUMENT_FRAME, pyatspi.ROLE_DOCUMENT_WEB]
if event.source.getRole() in docRoles:
self._loadingDocumentContent = False
def onFocusedChanged(self, event):
"""Callback for object:state-changed:focused accessibility events."""
obj = event.source
role = obj.getRole()
self._lastCaretContext = obj, -1
if role == pyatspi.ROLE_LIST_ITEM and obj.childCount:
return
widgetRoles = [pyatspi.ROLE_MENU,
pyatspi.ROLE_MENU_ITEM,
pyatspi.ROLE_LIST_ITEM,
pyatspi.ROLE_RADIO_BUTTON]
if role in widgetRoles:
default.Script.onFocusedChanged(self, event)
return
textRoles = [pyatspi.ROLE_HEADING,
pyatspi.ROLE_PANEL,
pyatspi.ROLE_PARAGRAPH,
pyatspi.ROLE_SECTION,
pyatspi.ROLE_TABLE_CELL]
if role in textRoles:
return
if not (role == pyatspi.ROLE_LINK and obj.childCount):
lastKey, mods = self.utilities.lastKeyAndModifiers()
if lastKey in self.CARET_NAVIGATION_KEYS:
return
default.Script.onFocusedChanged(self, event)
def onBusyChanged(self, event):
"""Callback for object:state-changed:busy accessibility events."""
obj = event.source
try:
role = obj.getRole()
name = obj.name
except:
return
docRoles = [pyatspi.ROLE_DOCUMENT_FRAME, pyatspi.ROLE_DOCUMENT_WEB]
if role not in docRoles or not self._isBrowser:
return
if event.detail1:
self.presentMessage(messages.PAGE_LOADING_START)
elif name:
self.presentMessage(messages.PAGE_LOADING_END_NAMED % name)
else:
self.presentMessage(messages.PAGE_LOADING_END)
def sayCharacter(self, obj):
"""Speak the character at the caret.
Arguments:
- obj: an Accessible object that implements the AccessibleText interface
"""
if obj.getRole() == pyatspi.ROLE_ENTRY:
default.Script.sayCharacter(self, obj)
return
boundary = pyatspi.TEXT_BOUNDARY_CHAR
objects = self.utilities.getObjectsFromEOCs(obj, boundary=boundary)
for (obj, start, end, string) in objects:
if string:
self.speakCharacter(string)
else:
speech.speak(self.speechGenerator.generateSpeech(obj))
def sayWord(self, obj):
"""Speaks the word at the caret.
Arguments:
- obj: an Accessible object that implements the AccessibleText interface
"""
if obj.getRole() == pyatspi.ROLE_ENTRY:
default.Script.sayWord(self, obj)
return
boundary = pyatspi.TEXT_BOUNDARY_WORD_START
objects = self.utilities.getObjectsFromEOCs(obj, boundary=boundary)
for (obj, start, end, string) in objects:
self.sayPhrase(obj, start, end)
def sayLine(self, obj):
"""Speaks the line at the caret.
Arguments:
- obj: an Accessible object that implements the AccessibleText interface
"""
if obj.getRole() == pyatspi.ROLE_ENTRY:
default.Script.sayLine(self, obj)
return
boundary = pyatspi.TEXT_BOUNDARY_LINE_START
objects = self.utilities.getObjectsFromEOCs(obj, boundary=boundary)
for (obj, start, end, string) in objects:
self.sayPhrase(obj, start, end)
# TODO: Move these next items into the speech generator.
if obj.getRole() == pyatspi.ROLE_PANEL \
and obj.getIndexInParent() == 0:
obj = obj.parent
rolesToSpeak = [pyatspi.ROLE_HEADING, pyatspi.ROLE_LINK]
if obj.getRole() in rolesToSpeak:
speech.speak(self.speechGenerator.getRoleName(obj))
def sayPhrase(self, obj, startOffset, endOffset):
"""Speaks the text of an Accessible object between the given offsets.
Arguments:
- obj: an Accessible object that implements the AccessibleText interface
- startOffset: the start text offset.
- endOffset: the end text offset.
"""
if obj.getRole() == pyatspi.ROLE_ENTRY:
default.Script.sayPhrase(self, obj, startOffset, endOffset)
return
phrase = self.utilities.substring(obj, startOffset, endOffset)
if len(phrase) and phrase != "\n":
if phrase.isupper():
voice = self.voices[settings.UPPERCASE_VOICE]
else:
voice = self.voices[settings.DEFAULT_VOICE]
phrase = self.utilities.adjustForRepeats(phrase)
links = [x for x in obj if x.getRole() == pyatspi.ROLE_LINK]
if links:
phrase = self.utilities.adjustForLinks(obj, phrase, startOffset)
speech.speak(phrase, voice)
else:
# Speak blank line if appropriate.
#
self.sayCharacter(obj)
def skipObjectEvent(self, event):
"""Gives us, and scripts, the ability to decide an event isn't
worth taking the time to process under the current circumstances.
Arguments:
- event: the Event
Returns True if we shouldn't bother processing this object event.
"""
if event.type.startswith('object:state-changed:focused') \
and event.detail1:
if event.source.getRole() == pyatspi.ROLE_LINK:
return False
return default.Script.skipObjectEvent(self, event)
def useStructuralNavigationModel(self):
"""Returns True if we should do our own structural navigation.
This should return False if we're in a form field, or not in
document content.
"""
doNotHandleRoles = [pyatspi.ROLE_ENTRY,
pyatspi.ROLE_TEXT,
pyatspi.ROLE_PASSWORD_TEXT,
pyatspi.ROLE_LIST,
pyatspi.ROLE_LIST_ITEM,
pyatspi.ROLE_MENU_ITEM]
if not self.structuralNavigation.enabled:
return False
if not self.utilities.isWebKitGtk(orca_state.locusOfFocus):
return False
states = orca_state.locusOfFocus.getState()
if states.contains(pyatspi.STATE_EDITABLE):
return False
role = orca_state.locusOfFocus.getRole()
if role in doNotHandleRoles:
if role == pyatspi.ROLE_LIST_ITEM:
return not states.contains(pyatspi.STATE_SELECTABLE)
if states.contains(pyatspi.STATE_FOCUSED):
return False
return True
def setCaretAtStart(self, obj):
"""Attempts to set the caret at the specified offset in obj. Because
this is not always possible, this method will attempt to locate the
first place inside of obj in which the caret can be positioned.
Arguments:
- obj: the accessible object in which the caret should be placed.
Returns the object and offset in which we were able to set the caret.
Otherwise, None if we could not find a text object, and -1 if we were
not able to set the caret.
"""
def implementsText(obj):
if obj.getRole() == pyatspi.ROLE_LIST:
return False
return 'Text' in utils.listInterfaces(obj)
child = obj
if not implementsText(obj):
child = utils.findDescendant(obj, implementsText)
if not child:
return None, -1
index = -1
text = child.queryText()
for i in range(text.characterCount):
if text.setCaretOffset(i):
index = i
break
return child, index
def panBrailleLeft(self, inputEvent=None, panAmount=0):
"""In document content, we want to use the panning keys to browse the
entire document.
"""
if self.flatReviewContext \
or not self.isBrailleBeginningShowing() \
or not self.utilities.isWebKitGtk(orca_state.locusOfFocus):
return default.Script.panBrailleLeft(self, inputEvent, panAmount)
obj = self.utilities.findPreviousObject(orca_state.locusOfFocus)
orca.setLocusOfFocus(None, obj, notifyScript=False)
self.updateBraille(obj)
# Hack: When panning to the left in a document, we want to start at
# the right/bottom of each new object. For now, we'll pan there.
# When time permits, we'll give our braille code some smarts.
while self.panBrailleInDirection(panToLeft=False):
pass
self.refreshBraille(False)
return True
def panBrailleRight(self, inputEvent=None, panAmount=0):
"""In document content, we want to use the panning keys to browse the
entire document.
"""
if self.flatReviewContext \
or not self.isBrailleEndShowing() \
or not self.utilities.isWebKitGtk(orca_state.locusOfFocus):
return default.Script.panBrailleRight(self, inputEvent, panAmount)
obj = self.utilities.findNextObject(orca_state.locusOfFocus)
orca.setLocusOfFocus(None, obj, notifyScript=False)
self.updateBraille(obj)
# Hack: When panning to the right in a document, we want to start at
# the left/top of each new object. For now, we'll pan there. When time
# permits, we'll give our braille code some smarts.
while self.panBrailleInDirection(panToLeft=True):
pass
self.refreshBraille(False)
return True
def sayAll(self, inputEvent):
"""Speaks the contents of the document beginning with the present
location. Overridden in this script because the sayAll could have
been started on an object without text (such as an image).
"""
if not self.utilities.isWebKitGtk(orca_state.locusOfFocus):
return default.Script.sayAll(self, inputEvent)
speech.sayAll(self.textLines(orca_state.locusOfFocus),
self.__sayAllProgressCallback)
return True
def getTextSegments(self, obj, boundary, offset=0):
segments = []
text = obj.queryText()
length = text.characterCount
string, start, end = text.getTextAtOffset(offset, boundary)
while string and offset < length:
string = self.utilities.adjustForRepeats(string)
voice = self.speechGenerator.getVoiceForString(obj, string)
string = self.utilities.adjustForLinks(obj, string, start)
# Incrementing the offset should cause us to eventually reach
# the end of the text as indicated by a 0-length string and
# start and end offsets of 0. Sometimes WebKitGtk returns the
# final text segment instead.
if segments and [string, start, end, voice] == segments[-1]:
break
segments.append([string, start, end, voice])
offset = end + 1
string, start, end = text.getTextAtOffset(offset, boundary)
return segments
def textLines(self, obj):
"""Creates a generator that can be used to iterate over each line
of a text object, starting at the caret offset.
Arguments:
- obj: an Accessible that has a text specialization
Returns an iterator that produces elements of the form:
[SayAllContext, acss], where SayAllContext has the text to be
spoken and acss is an ACSS instance for speaking the text.
"""
if not obj:
return
if obj.getRole() == pyatspi.ROLE_LINK:
obj = obj.parent
docRoles = [pyatspi.ROLE_DOCUMENT_FRAME, pyatspi.ROLE_DOCUMENT_WEB]
document = utils.findAncestor(obj, lambda x: x.getRole() in docRoles)
if not document or document.getState().contains(pyatspi.STATE_BUSY):
return
allTextObjs = utils.findAllDescendants(
document, lambda x: x and 'Text' in utils.listInterfaces(x))
allTextObjs = allTextObjs[allTextObjs.index(obj):len(allTextObjs)]
textObjs = [x for x in allTextObjs if x.parent not in allTextObjs]
if not textObjs:
return
boundary = pyatspi.TEXT_BOUNDARY_LINE_START
sayAllStyle = _settingsManager.getSetting('sayAllStyle')
if sayAllStyle == settings.SAYALL_STYLE_SENTENCE:
boundary = pyatspi.TEXT_BOUNDARY_SENTENCE_START
offset = textObjs[0].queryText().caretOffset
for textObj in textObjs:
textSegments = self.getTextSegments(textObj, boundary, offset)
roleInfo = self.speechGenerator.getRoleName(textObj)
if roleInfo:
roleName, voice = roleInfo
textSegments.append([roleName, 0, -1, voice])
for (string, start, end, voice) in textSegments:
yield [speechserver.SayAllContext(textObj, string, start, end),
voice]
offset = 0
def __sayAllProgressCallback(self, context, progressType):
if progressType == speechserver.SayAllContext.PROGRESS:
return
obj = context.obj
orca.setLocusOfFocus(None, obj, notifyScript=False)
offset = context.currentOffset
text = obj.queryText()
if progressType == speechserver.SayAllContext.INTERRUPTED:
text.setCaretOffset(offset)
return
# SayAllContext.COMPLETED doesn't necessarily mean done with SayAll;
# just done with the current object. If we're still in SayAll, we do
# not want to set the caret (and hence set focus) in a link we just
# passed by.
try:
hypertext = obj.queryHypertext()
except NotImplementedError:
pass
else:
linkCount = hypertext.getNLinks()
links = [hypertext.getLink(x) for x in range(linkCount)]
if [l for l in links if l.startIndex <= offset <= l.endIndex]:
return
text.setCaretOffset(offset)
def getTextLineAtCaret(self, obj, offset=None):
"""Gets the line of text where the caret is.
Argument:
- obj: an Accessible object that implements the AccessibleText
interface
- offset: an optional caret offset to use. (Not used here at the
moment, but needed in the Gecko script)
Returns the [string, caretOffset, startOffset] for the line of text
where the caret is.
"""
textLine = default.Script.getTextLineAtCaret(self, obj, offset)
string = textLine[0]
if string and string.find(self.EMBEDDED_OBJECT_CHARACTER) == -1 \
and obj.getState().contains(pyatspi.STATE_FOCUSED):
return textLine
textLine[0] = self.utilities.displayedText(obj)
try:
text = obj.queryText()
except:
pass
else:
textLine[1] = min(textLine[1], text.characterCount)
return textLine
def updateBraille(self, obj, extraRegion=None):
"""Updates the braille display to show the given object.
Arguments:
- obj: the Accessible
- extra: extra Region to add to the end
"""
if not _settingsManager.getSetting('enableBraille') \
and not _settingsManager.getSetting('enableBrailleMonitor'):
debug.println(debug.LEVEL_INFO, "BRAILLE: update disabled")
return
if not obj:
return
if not self.utilities.isWebKitGtk(obj) \
or (not self.utilities.isInlineContainer(obj) \
and not self.utilities.isTextListItem(obj)):
default.Script.updateBraille(self, obj, extraRegion)
return
brailleLine = self.getNewBrailleLine(clearBraille=True, addLine=True)
for child in obj:
if not self.utilities.onSameLine(child, obj[0]):
break
[regions, fRegion] = self.brailleGenerator.generateBraille(child)
self.addBrailleRegionsToLine(regions, brailleLine)
if not brailleLine.regions:
[regions, fRegion] = self.brailleGenerator.generateBraille(
obj, role=pyatspi.ROLE_PARAGRAPH)
self.addBrailleRegionsToLine(regions, brailleLine)
self.setBrailleFocus(fRegion)
if extraRegion:
self.addBrailleRegionToLine(extraRegion, brailleLine)
self.refreshBraille()
|
ruibarreira/linuxtrail
|
usr/lib/python3/dist-packages/orca/scripts/toolkits/WebKitGtk/script.py
|
Python
|
gpl-3.0
| 25,412
|
[
"ORCA"
] |
fcfa6de85956b483601035cbf2de7d8e8fcf469b3b7ef2a0ede21577297db987
|
langversion = 1
langname = "English"
##updater
# text construct: "Versión "+version+available+changelog
#example: Version 3 available, click here to download, or for changelog click here
available = " disponible, haga clic aquí para descargar"
changelog = ", o para el registro de cambios haga clic aquí"
##world gen
worldify = "mundo de la imagen"
planetoids = "Planetoids & Terra"
arena = "la arena de mazmorra"
flat = "Flatworld"
new = "Nuevo Mundo:"
##mainmenu
#omnitool
settings = "Ajustes"
exit = "Salida"
#start
start = "Empezar"
terraria = "Terraria"
steamfree = "Terraria Steamfree"
#open
open = "Abra"
imagefolder = "Imágenes del Mundo"
backupfolder = "Copias de Seguridad del Mundo"
themes = "Los Temas de Omnitool"
#visit
visit = "Visitar"
donate = "Donar"
homepage = "Omnitool"
TO = "Terraria En Línea"
wiki = "Terraria Wiki"
##world thumbnail
label = "Mundo: "
##settings menu
warning = "Todos los cambios requieren un reinicio para entrar en vigor" #out of date - not all changes require a restart now
none = "Ninguno"
tiny = "Pequeñita" #unused
small = "Pequeña"
medium = "Mediana"
large = "Grande"
very_large = "XXL"
theme_select = "Seleccionar Tema:"
thumbsize = "Tamaño de la Thumbnail del Mundo:"
mk_backups = "Hacer Copias de Seguridad:"
world_columns = "Mundo Columnas:"
##world interaction menu
wa_worldactionmenu = "Action for {}:"
wa_imageopen = "Open Image"
wa_renderopen = "Render World"
wa_teditopen = "Open in TEdit"
wa_update = "Update Image"
wa_super = "Generate Super-Image"
##planetoids & terra
pt_start = 'Inicie la Generación!'
pt_name = "Nombre: "
pt_mode = "Modo: "
pt_small = "Pequenas Planetoids"
pt_medium = "Medianas Planetoids"
pt_large = "Grandes Planetoids"
pt_square = "Cuadradas Planetoids"
pt_both = "Grandes Planetoids & Terra"
pt_square_terra = "Cuadrada Terra"
pt_start_sel = "Empezar: "
pt_morning = "La Mañana"
pt_day = "El Día"
pt_night = "La Noche"
pt_bloodmoon = "Luna de Sangre"
pt_extras = "Extras: "
pt_sun = "Sol: "
pt_atlantis = "Atlantis: "
pt_merchant = "Comerciante: "
pt_lloot = "Menos Botín: "
pt_mirror = "Mirror Mode: "
##worldify
w_start = "Empiece la worldification!"
w_cont = "Continúe"
w_name = "Nombre: "
w_rgb = "RGB"
w_hsv = "Ponderado HSV"
w_method = "Método: "
w_priority = "Prioridad de Selección"
w_hue = "Tono: "
w_saturation = "Saturación: "
w_brightness = "Luminosidad: "
##arena
a_start = "Empiece la Generación!"
a_name = "Nombre: "
a_rooms = "Salas: "
a_sidelen = "El lado de longitud de la Sala: "
a_corlen = "Longitude del Corredor: "
a_chest = "Cofre: "
a_itemchest = "Artículos por Cofre: "
a_light = "Iluminación: "
a_chances = "Posibilidades de la Sala: "
a_standard = "Estándar: "
a_cross = "Corredor de Cruz: "
##torch
at_chances = "Posibilidades de color:"
at_full = "Espectro completo"
at_blue = "Azul"
at_red = "Rojo"
at_green = "Verde"
at_pink = "Demonia Antorcha"
at_white = "Blando"
at_yellow = "Amarillo"
at_purple = "Morado"
at_lime = "Madilta Antorcha"
##plugins
pl_start = "Comienza plugin"
pl_rec = "Seleccione un mundo ser recibido"
pl_mod = "Seleccione un mundo ser modificado"
pl_trans = "Seleccione dos mundos ser usado para una transferencia"
pl_trans_source = "Fuente"
pl_trans_target = "Objetivo"
##flatworld
fw_size = "World size:"
fw_small = "small"
fw_medium = "medium"
fw_large = "large"
fw_tile = "Tile type:"
fw_wall = "Wall type:"
fw_surf = "Surface type:"
|
flying-sheep/omnitool
|
Language/spanish.py
|
Python
|
mit
| 3,418
|
[
"VisIt"
] |
6d5ae649b15c1e3d30a54d77d6c67e24e9e371b1edef30bfdb51650c73fabd23
|
#
# Copyright (C) 2001 greg Landrum
#
""" unit testing code for compound descriptors
"""
import unittest
from rdkit.ML.Descriptors import Parser
import math
class TestCase(unittest.TestCase):
def setUp(self):
self.piece1 = [['d1', 'd2', 's1'], ['d1', 'd2', 's1']]
self.aDict = {'Fe': {'d1': 1, 'd2': 2, 's1': 'abc'}, 'Pt': {'d1': 10, 'd2': 20, 's1': 'def'}}
self.pDict = {'d1': 100., 'd2': 200.}
self.compos = [('Fe', 1), ('Pt', 1)]
self.cExprs = ["SUM($1)", "SUM($1)+SUM($2)", "MEAN($1)", "DEV($2)", "MAX($1)", "MIN($2)",
"SUM($1)/$a", 'HAS($3,"def")', 'HAS($3,"xyz")', 'HAS($3)', "sqrt($a+($b+$b))"]
self.results = [11., 33., 5.5, 9., 10., 2., 0.11, 1, 0, -666, math.sqrt(500)]
self.tol = 0.0001
def testSingleCalcs(self):
" testing calculation of a single descriptor "
for i in range(len(self.cExprs)):
cExpr = self.cExprs[i]
argVect = self.piece1 + [cExpr]
res = Parser.CalcSingleCompoundDescriptor(self.compos, argVect, self.aDict, self.pDict)
self.assertAlmostEqual(res, self.results[i], 2)
def testMultipleCalcs(self):
" testing calculation of multiple descriptors "
for i in range(len(self.cExprs)):
cExpr = self.cExprs[i]
argVect = self.piece1 + [cExpr]
res = Parser.CalcMultipleCompoundsDescriptor([self.compos, self.compos], argVect, self.aDict,
[self.pDict, self.pDict])
self.assertAlmostEqual(res[0], self.results[i], delta=self.tol,
msg='Expression {0} failed'.format(cExpr))
self.assertAlmostEqual(res[1], self.results[i], delta=self.tol,
msg='Expression {0} failed'.format(cExpr))
def _test_exampleCode(self):
Parser._exampleCode()
if __name__ == '__main__': # pragma: nocover
unittest.main()
|
bp-kelley/rdkit
|
rdkit/ML/Descriptors/UnitTestParser.py
|
Python
|
bsd-3-clause
| 1,864
|
[
"RDKit"
] |
678735204371646ca055813e45e975a8d5607baea36e74725aa656e6b5fbe8fd
|
# -*- coding: utf-8 -*-
#
# Pulsar documentation build configuration file, created by
# sphinx-quickstart on Sun Dec 16 23:16:23 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
import pulsar
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.todo', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Pulsar'
copyright = u'2014, Galaxy Project'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = pulsar.__version__
# The full version, including alpha/beta/rc tags.
release = pulsar.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'PulsarDoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Pulsar.tex', u'Pulsar Documentation',
u'The Galaxy Project', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pulsar', u'Pulsar Documentation',
[u'Galaxy Project'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Pulsar', u'Pulsar Documentation',
u'Galaxy Project', 'Pulsar', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
ssorgatem/pulsar
|
docs/conf.py
|
Python
|
apache-2.0
| 7,849
|
[
"Galaxy"
] |
ceb4793d564bcd2491cc3cbd24031106513fbf501622e1686e42f10d48051d00
|
# This file is part of the myhdl library, a Python package for using
# Python as a Hardware Description Language.
#
# Copyright (C) 2003-2015 Jan Decaluwe
#
# The myhdl library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2.1 of the
# License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
""" myhdl toVHDL conversion module.
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import math
import os
import inspect
from datetime import datetime
#import compiler
#from compiler import ast as astNode
import ast
from types import GeneratorType
import warnings
from copy import copy
import string
import myhdl
import myhdl
from myhdl import *
from myhdl import ToVHDLError, ToVHDLWarning
from myhdl._extractHierarchy import (_HierExtr, _isMem, _getMemInfo,
_UserVhdlCode, _userCodeMap)
from myhdl._instance import _Instantiator
from myhdl.conversion._misc import (_error, _kind, _context,
_ConversionMixin, _Label, _genUniqueSuffix, _isConstant)
from myhdl.conversion._analyze import (_analyzeSigs, _analyzeGens, _analyzeTopFunc,
_Ram, _Rom, _enumTypeSet)
from myhdl._Signal import _Signal, _WaiterList
from myhdl.conversion._toVHDLPackage import _package
from myhdl._util import _flatten
from myhdl._compat import integer_types, class_types, StringIO
from myhdl._ShadowSignal import _TristateSignal, _TristateDriver
from myhdl._block import _Block
from myhdl._getHierarchy import _getHierarchy
_version = myhdl.__version__.replace('.', '')
_shortversion = _version.replace('dev', '')
_converting = 0
_profileFunc = None
_enumPortTypeSet = set()
def _checkArgs(arglist):
for arg in arglist:
if not isinstance(arg, (GeneratorType, _Instantiator, _UserVhdlCode)):
raise ToVHDLError(_error.ArgType, arg)
def _flatten(*args):
arglist = []
for arg in args:
if isinstance(arg, _Block):
if arg.vhdl_code is not None:
arglist.append(arg.vhdl_code)
continue
else:
arg = arg.subs
if id(arg) in _userCodeMap['vhdl']:
arglist.append(_userCodeMap['vhdl'][id(arg)])
elif isinstance(arg, (list, tuple, set)):
for item in arg:
arglist.extend(_flatten(item))
else:
arglist.append(arg)
return arglist
def _makeDoc(doc, indent=''):
if doc is None:
return ''
doc = inspect.cleandoc(doc)
pre = '\n' + indent + '-- '
doc = '-- ' + doc
doc = doc.replace('\n', pre)
return doc
class _ToVHDLConvertor(object):
__slots__ = ("name",
"directory",
"component_declarations",
"header",
"no_myhdl_header",
"no_myhdl_package",
"library",
"use_clauses",
"architecture",
"std_logic_ports",
"initial_values"
)
def __init__(self):
self.name = None
self.directory = None
self.component_declarations = None
self.header = ''
self.no_myhdl_header = False
self.no_myhdl_package = False
self.library = "work"
self.use_clauses = None
self.architecture = "MyHDL"
self.std_logic_ports = False
self.initial_values = False
def __call__(self, func, *args, **kwargs):
global _converting
if _converting:
return func(*args, **kwargs) # skip
else:
# clean start
sys.setprofile(None)
from myhdl import _traceSignals
if _traceSignals._tracing:
raise ToVHDLError("Cannot use toVHDL while tracing signals")
if not isinstance(func, _Block):
if not callable(func):
raise ToVHDLError(_error.FirstArgType, "got %s" % type(func))
_converting = 1
if self.name is None:
name = func.__name__
if isinstance(func, _Block):
name = func.func.__name__
else:
name = str(self.name)
if isinstance(func, _Block):
try:
h = _getHierarchy(name, func)
finally:
_converting = 0
else:
warnings.warn(
"\n toVHDL(): Deprecated usage: See http://dev.myhdl.org/meps/mep-114.html", stacklevel=2)
try:
h = _HierExtr(name, func, *args, **kwargs)
finally:
_converting = 0
if self.directory is None:
directory = ''
else:
directory = self.directory
compDecls = self.component_declarations
useClauses = self.use_clauses
vpath = os.path.join(directory, name + ".vhd")
vfile = open(vpath, 'w')
ppath = os.path.join(directory, "pck_myhdl_%s.vhd" % _shortversion)
pfile = None
# # write MyHDL package always during development, as it may change
# pfile = None
# if not os.path.isfile(ppath):
# pfile = open(ppath, 'w')
if not self.no_myhdl_package:
pfile = open(ppath, 'w')
### initialize properly ###
_genUniqueSuffix.reset()
_enumTypeSet.clear()
_enumPortTypeSet.clear()
arglist = _flatten(h.top)
_checkArgs(arglist)
genlist = _analyzeGens(arglist, h.absnames)
siglist, memlist = _analyzeSigs(h.hierarchy, hdl='VHDL')
# print h.top
_annotateTypes(genlist)
# infer interface
if isinstance(func, _Block):
# infer interface after signals have been analyzed
func._inferInterface()
intf = func
else:
intf = _analyzeTopFunc(func, *args, **kwargs)
intf.name = name
# sanity checks on interface
for portname in intf.argnames:
s = intf.argdict[portname]
if s._name is None:
raise ToVHDLError(_error.ShadowingSignal, portname)
if s._inList:
raise ToVHDLError(_error.PortInList, portname)
# add enum types to port-related set
if isinstance(s._val, EnumItemType):
obj = s._val._type
if obj in _enumTypeSet:
_enumTypeSet.remove(obj)
_enumPortTypeSet.add(obj)
else:
assert obj in _enumPortTypeSet
doc = _makeDoc(inspect.getdoc(func))
needPck = len(_enumPortTypeSet) > 0
lib = self.library
arch = self.architecture
stdLogicPorts = self.std_logic_ports
self._convert_filter(h, intf, siglist, memlist, genlist)
if pfile:
_writeFileHeader(pfile, ppath)
print(_package, file=pfile)
pfile.close()
_writeFileHeader(vfile, vpath)
if needPck:
_writeCustomPackage(vfile, intf)
_writeModuleHeader(vfile, intf, needPck, lib, arch, useClauses, doc, stdLogicPorts)
_writeFuncDecls(vfile)
_writeTypeDefs(vfile)
_writeSigDecls(vfile, intf, siglist, memlist)
_writeCompDecls(vfile, compDecls)
_convertGens(genlist, siglist, memlist, vfile)
_writeModuleFooter(vfile, arch)
vfile.close()
# tbfile.close()
### clean-up properly ###
self._cleanup(siglist)
return h.top
def _cleanup(self, siglist):
# clean up signal names
for sig in siglist:
sig._clear()
# sig._name = None
# sig._driven = False
# sig._read = False
# clean up attributes
self.name = None
self.component_declarations = None
self.header = ''
self.no_myhdl_header = False
self.no_myhdl_package = False
self.architecture = "MyHDL"
self.std_logic_ports = False
def _convert_filter(self, h, intf, siglist, memlist, genlist):
# intended to be a entry point for other uses:
# code checking, optimizations, etc
pass
toVHDL = _ToVHDLConvertor()
myhdl_header = """\
-- File: $filename
-- Generated by MyHDL $version
-- Date: $date
"""
def _writeFileHeader(f, fn):
vars = dict(filename=fn,
version=myhdl.__version__,
date=datetime.today().ctime()
)
if toVHDL.header:
print(string.Template(toVHDL.header).substitute(vars), file=f)
if not toVHDL.no_myhdl_header:
print(string.Template(myhdl_header).substitute(vars), file=f)
print(file=f)
def _writeCustomPackage(f, intf):
print(file=f)
print("package pck_%s is" % intf.name, file=f)
print(file=f)
print("attribute enum_encoding: string;", file=f)
print(file=f)
sortedList = list(_enumPortTypeSet)
sortedList.sort(key=lambda x: x._name)
for t in sortedList:
print(" %s" % t._toVHDL(), file=f)
print(file=f)
print("end package pck_%s;" % intf.name, file=f)
print(file=f)
portConversions = []
def _writeModuleHeader(f, intf, needPck, lib, arch, useClauses, doc, stdLogicPorts):
print("library IEEE;", file=f)
print("use IEEE.std_logic_1164.all;", file=f)
print("use IEEE.numeric_std.all;", file=f)
print("use std.textio.all;", file=f)
print(file=f)
if lib != "work":
print("library %s;" % lib, file=f)
if useClauses is not None:
f.write(useClauses)
f.write("\n")
else:
print("use %s.pck_myhdl_%s.all;" % (lib, _shortversion), file=f)
print(file=f)
if needPck:
print("use %s.pck_%s.all;" % (lib, intf.name), file=f)
print(file=f)
print("entity %s is" % intf.name, file=f)
del portConversions[:]
if intf.argnames:
f.write(" port (")
c = ''
for portname in intf.argnames:
s = intf.argdict[portname]
f.write("%s" % c)
c = ';'
# change name to convert to std_logic, or
# make sure signal name is equal to its port name
convertPort = False
if stdLogicPorts and s._type is intbv:
s._name = portname + "_num"
convertPort = True
for sl in s._slicesigs:
sl._setName('VHDL')
else:
s._name = portname
r = _getRangeString(s)
pt = st = _getTypeString(s)
if convertPort:
pt = "std_logic_vector"
if s._driven:
if s._read:
if not isinstance(s, _TristateSignal):
warnings.warn("%s: %s" % (_error.OutputPortRead, portname),
category=ToVHDLWarning
)
f.write("\n %s: inout %s%s" % (portname, pt, r))
else:
f.write("\n %s: out %s%s" % (portname, pt, r))
if convertPort:
portConversions.append("%s <= %s(%s);" % (portname, pt, s._name))
s._read = True
else:
if not s._read:
warnings.warn("%s: %s" % (_error.UnusedPort, portname),
category=ToVHDLWarning
)
f.write("\n %s: in %s%s" % (portname, pt, r))
if convertPort:
portConversions.append("%s <= %s(%s);" % (s._name, st, portname))
s._driven = True
f.write("\n );\n")
print("end entity %s;" % intf.name, file=f)
print(doc, file=f)
print(file=f)
print("architecture %s of %s is" % (arch, intf.name), file=f)
print(file=f)
def _writeFuncDecls(f):
return
# print >> f, package
def _writeTypeDefs(f):
f.write("\n")
sortedList = list(_enumTypeSet)
sortedList.sort(key=lambda x: x._name)
for t in sortedList:
f.write("%s\n" % t._toVHDL())
# f.write("\n"
constwires = []
def _writeSigDecls(f, intf, siglist, memlist):
del constwires[:]
for s in siglist:
if not s._used:
continue
if s._name in intf.argnames:
continue
r = _getRangeString(s)
p = _getTypeString(s)
if s._driven:
if not s._read and not isinstance(s, _TristateDriver):
warnings.warn("%s: %s" % (_error.UnreadSignal, s._name),
category=ToVHDLWarning
)
# the following line implements initial value assignments
sig_vhdl_obj = inferVhdlObj(s)
if not toVHDL.initial_values:
val_str = ""
else:
if isinstance(sig_vhdl_obj, vhd_std_logic):
# Single bit
val_str = " := '%s'" % int(s._init)
elif isinstance(sig_vhdl_obj, vhd_int):
val_str = " := %s" % s._init
elif isinstance(sig_vhdl_obj, (vhd_signed, vhd_unsigned)):
val_str = ' := %dX"%s"' % (
sig_vhdl_obj.size, str(s._init))
elif isinstance(sig_vhdl_obj, vhd_enum):
val_str = ' := %s' % (s._init,)
else:
# default to no initial value
val_str = ''
print("signal %s: %s%s%s;" % (s._name, p, r, val_str), file=f)
elif s._read:
# the original exception
# raise ToVHDLError(_error.UndrivenSignal, s._name)
# changed to a warning and a continuous assignment to a wire
warnings.warn("%s: %s" % (_error.UndrivenSignal, s._name),
category=ToVHDLWarning
)
constwires.append(s)
print("signal %s: %s%s;" % (s._name, p, r), file=f)
for m in memlist:
if not m._used:
continue
# infer attributes for the case of named signals in a list
for i, s in enumerate(m.mem):
if not m._driven and s._driven:
m._driven = s._driven
if not m._read and s._read:
m._read = s._read
if not m._driven and not m._read:
continue
r = _getRangeString(m.elObj)
p = _getTypeString(m.elObj)
t = "t_array_%s" % m.name
if not toVHDL.initial_values:
val_str = ""
else:
sig_vhdl_objs = [inferVhdlObj(each) for each in m.mem]
if all([each._init == m.mem[0]._init for each in m.mem]):
if isinstance(m.mem[0]._init, bool):
val_str = (
' := (others => \'%s\')' % str(int(m.mem[0]._init)))
else:
val_str = (
' := (others => %dX"%s")' %
(sig_vhdl_objs[0].size, str(m.mem[0]._init)))
else:
_val_str = ',\n '.join(
['%dX"%s"' % (obj.size, str(each._init)) for
obj, each in zip(sig_vhdl_objs, m.mem)])
val_str = ' := (\n ' + _val_str + ')'
print("type %s is array(0 to %s-1) of %s%s;" % (t, m.depth, p, r), file=f)
print("signal %s: %s%s;" % (m.name, t, val_str), file=f)
print(file=f)
def _writeCompDecls(f, compDecls):
if compDecls is not None:
print(compDecls, file=f)
def _writeModuleFooter(f, arch):
print("end architecture %s;" % arch, file=f)
def _getRangeString(s):
if isinstance(s._val, EnumItemType):
return ''
elif s._type is bool:
return ''
elif s._nrbits is not None:
msb = s._nrbits - 1
return "(%s downto 0)" % msb
else:
raise AssertionError
def _getTypeString(s):
if isinstance(s._val, EnumItemType):
return s._val._type._name
elif s._type is bool:
return "std_logic"
if s._min is not None and s._min < 0:
return "signed "
else:
return 'unsigned'
def _convertGens(genlist, siglist, memlist, vfile):
blockBuf = StringIO()
funcBuf = StringIO()
for tree in genlist:
if isinstance(tree, _UserVhdlCode):
blockBuf.write(str(tree))
continue
if tree.kind == _kind.ALWAYS:
Visitor = _ConvertAlwaysVisitor
elif tree.kind == _kind.INITIAL:
Visitor = _ConvertInitialVisitor
elif tree.kind == _kind.SIMPLE_ALWAYS_COMB:
Visitor = _ConvertSimpleAlwaysCombVisitor
elif tree.kind == _kind.ALWAYS_DECO:
Visitor = _ConvertAlwaysDecoVisitor
elif tree.kind == _kind.ALWAYS_SEQ:
Visitor = _ConvertAlwaysSeqVisitor
else: # ALWAYS_COMB
Visitor = _ConvertAlwaysCombVisitor
v = Visitor(tree, blockBuf, funcBuf)
v.visit(tree)
vfile.write(funcBuf.getvalue())
funcBuf.close()
print("begin", file=vfile)
print(file=vfile)
for st in portConversions:
print(st, file=vfile)
print(file=vfile)
for s in constwires:
if s._type is bool:
c = int(s._val)
pre, suf = "'", "'"
elif s._type is intbv:
c = int(s._val)
w = len(s)
assert w != 0
if s._min < 0:
if w <= 31:
pre, suf = "to_signed(", ", %s)" % w
else:
pre, suf = "signed'(", ")"
c = '"%s"' % bin(c, w)
else:
if w <= 31:
pre, suf = "to_unsigned(", ", %s)" % w
else:
pre, suf = "unsigned'(", ")"
c = '"%s"' % bin(c, w)
else:
raise ToVHDLError("Unexpected type for constant signal", s._name)
print("%s <= %s%s%s;" % (s._name, pre, c, suf), file=vfile)
print(file=vfile)
# shadow signal assignments
for s in siglist:
if hasattr(s, 'toVHDL') and s._read:
print(s.toVHDL(), file=vfile)
# hack for slice signals in a list
for m in memlist:
if m._read:
for s in m.mem:
if hasattr(s, 'toVHDL'):
print(s.toVHDL(), file=vfile)
print(file=vfile)
vfile.write(blockBuf.getvalue())
blockBuf.close()
opmap = {
ast.Add: '+',
ast.Sub: '-',
ast.Mult: '*',
ast.Div: '/',
ast.Mod: 'mod',
ast.Pow: '**',
ast.LShift: 'shift_left',
ast.RShift: 'shift_right',
ast.BitOr: 'or',
ast.BitAnd: 'and',
ast.BitXor: 'xor',
ast.FloorDiv: '/',
ast.Invert: 'not ',
ast.Not: 'not ',
ast.UAdd: '+',
ast.USub: '-',
ast.Eq: '=',
ast.Gt: '>',
ast.GtE: '>=',
ast.Lt: '<',
ast.LtE: '<=',
ast.NotEq: '/=',
ast.And: 'and',
ast.Or: 'or',
}
class _ConvertVisitor(ast.NodeVisitor, _ConversionMixin):
def __init__(self, tree, buf):
self.tree = tree
self.buf = buf
self.returnLabel = tree.name
self.ind = ''
self.SigAss = False
self.isLhs = False
self.labelStack = []
self.context = None
def write(self, arg):
self.buf.write("%s" % arg)
def writeline(self, nr=1):
for i in range(nr):
self.buf.write("\n%s" % self.ind)
def writeDoc(self, node):
assert hasattr(node, 'doc')
if node.doc is not None:
doc = _makeDoc(node.doc, self.ind)
self.write(doc)
self.writeline()
def IntRepr(self, obj):
if obj >= 0:
s = "%s" % int(obj)
else:
s = "(- %s)" % abs(int(obj))
return s
def BitRepr(self, item, var):
if isinstance(var._val, bool):
return '\'%s\'' % bin(item, len(var))
else:
return '"%s"' % bin(item, len(var))
def inferCast(self, vhd, ori):
pre, suf = "", ""
if isinstance(vhd, vhd_int):
if not isinstance(ori, vhd_int):
pre, suf = "to_integer(", ")"
elif isinstance(vhd, vhd_unsigned):
if isinstance(ori, vhd_unsigned):
if vhd.size != ori.size:
pre, suf = "resize(", ", %s)" % vhd.size
elif isinstance(ori, vhd_signed):
if vhd.size != ori.size:
# note the order of resizing and casting here (otherwise bug!)
pre, suf = "resize(unsigned(", "), %s)" % vhd.size
else:
pre, suf = "unsigned(", ")"
else:
pre, suf = "to_unsigned(", ", %s)" % vhd.size
elif isinstance(vhd, vhd_signed):
if isinstance(ori, vhd_signed):
if vhd.size != ori.size:
pre, suf = "resize(", ", %s)" % vhd.size
elif isinstance(ori, vhd_unsigned):
if vhd.size != ori.size:
# I think this should be the order of resizing and casting here
pre, suf = "signed(resize(", ", %s))" % vhd.size
else:
pre, suf = "signed(", ")"
else:
pre, suf = "to_signed(", ", %s)" % vhd.size
elif isinstance(vhd, vhd_boolean):
if not isinstance(ori, vhd_boolean):
pre, suf = "bool(", ")"
elif isinstance(vhd, vhd_std_logic):
if not isinstance(ori, vhd_std_logic):
if isinstance(ori, vhd_unsigned):
pre, suf = "", "(0)"
else:
pre, suf = "stdl(", ")"
# elif isinstance(vhd, vhd_string):
# if isinstance(ori, vhd_enum):
# pre, suf = "%s'image(" % ori._type._name, ")"
return pre, suf
def writeIntSize(self, n):
# write size for large integers (beyond 32 bits signed)
# with some safety margin
if n >= 2**30:
size = int(math.ceil(math.log(n + 1, 2))) + 1 # sign bit!
self.write("%s'sd" % size)
def writeDeclaration(self, obj, name, kind="", dir="", endchar=";", constr=True):
if isinstance(obj, EnumItemType):
tipe = obj._type._name
elif isinstance(obj, _Ram):
tipe = "t_array_%s" % name
elt = inferVhdlObj(obj.elObj).toStr(True)
self.write("type %s is array(0 to %s-1) of %s;" % (tipe, obj.depth, elt))
self.writeline()
else:
vhd = inferVhdlObj(obj)
if isinstance(vhd, vhd_enum):
tipe = obj._val._type._name
else:
tipe = vhd.toStr(constr)
if kind:
kind += " "
if dir:
dir += " "
self.write("%s%s: %s%s%s" % (kind, name, dir, tipe, endchar))
def writeDeclarations(self):
if self.tree.hasPrint:
self.writeline()
self.write("variable L: line;")
for name, obj in self.tree.vardict.items():
if isinstance(obj, _loopInt):
continue # hack for loop vars
self.writeline()
self.writeDeclaration(obj, name, kind="variable")
def indent(self):
self.ind += ' ' * 4
def dedent(self):
self.ind = self.ind[:-4]
def visit_BinOp(self, node):
if isinstance(node.op, (ast.LShift, ast.RShift)):
self.shiftOp(node)
elif isinstance(node.op, (ast.BitAnd, ast.BitOr, ast.BitXor)):
self.BitOp(node)
else:
self.BinOp(node)
def inferBinaryOpCast(self, node, left, right, op):
ns, os = node.vhd.size, node.vhdOri.size
ds = ns - os
if ds > 0:
if isinstance(left.vhd, vhd_vector) and isinstance(right.vhd, vhd_vector):
if isinstance(op, (ast.Add, ast.Sub)):
left.vhd.size = ns
# in general, resize right also
# for a simple name, resizing is not necessary
if not isinstance(right, ast.Name):
right.vhd.size = ns
node.vhdOri.size = ns
elif isinstance(op, ast.Mod):
right.vhd.size = ns
node.vhdOri.size = ns
elif isinstance(op, ast.FloorDiv):
left.vhd.size = ns
node.vhdOri.size = ns
elif isinstance(op, ast.Mult):
left.vhd.size += ds
node.vhdOri.size = ns
else:
raise AssertionError("unexpected op %s" % op)
elif isinstance(left.vhd, vhd_vector) and isinstance(right.vhd, vhd_int):
if isinstance(op, (ast.Add, ast.Sub, ast.Mod, ast.FloorDiv)):
left.vhd.size = ns
node.vhdOri.size = ns
elif isinstance(op, ast.Mult):
left.vhd.size += ds
node.vhdOri.size = 2 * left.vhd.size
else:
raise AssertionError("unexpected op %s" % op)
elif isinstance(left.vhd, vhd_int) and isinstance(right.vhd, vhd_vector):
if isinstance(op, (ast.Add, ast.Sub, ast.Mod, ast.FloorDiv)):
right.vhd.size = ns
node.vhdOri.size = ns
elif isinstance(op, ast.Mult):
node.vhdOri.size = 2 * right.vhd.size
else:
raise AssertionError("unexpected op %s" % op)
pre, suf = self.inferCast(node.vhd, node.vhdOri)
if pre == "":
pre, suf = "(", ")"
return pre, suf
def BinOp(self, node):
pre, suf = self.inferBinaryOpCast(node, node.left, node.right, node.op)
self.write(pre)
self.visit(node.left)
self.write(" %s " % opmap[type(node.op)])
self.visit(node.right)
self.write(suf)
def inferShiftOpCast(self, node, left, right, op):
ns, os = node.vhd.size, node.vhdOri.size
ds = ns - os
if ds > 0:
if isinstance(node.left.vhd, vhd_vector):
left.vhd.size = ns
node.vhdOri.size = ns
pre, suf = self.inferCast(node.vhd, node.vhdOri)
return pre, suf
def shiftOp(self, node):
pre, suf = self.inferShiftOpCast(node, node.left, node.right, node.op)
self.write(pre)
self.write("%s(" % opmap[type(node.op)])
self.visit(node.left)
self.write(", ")
self.visit(node.right)
self.write(")")
self.write(suf)
def BitOp(self, node):
pre, suf = self.inferCast(node.vhd, node.vhdOri)
self.write(pre)
self.write("(")
self.visit(node.left)
self.write(" %s " % opmap[type(node.op)])
self.visit(node.right)
self.write(")")
self.write(suf)
def visit_BoolOp(self, node):
if isinstance(node.vhd, vhd_std_logic):
self.write("stdl")
self.write("(")
self.visit(node.values[0])
for n in node.values[1:]:
self.write(" %s " % opmap[type(node.op)])
self.visit(n)
self.write(")")
def visit_UnaryOp(self, node):
# in python3 a negative Num is represented as an USub of a positive Num
# Fix: restore python2 behavior by a shortcut: invert value of Num, inherit
# vhdl type from UnaryOp node, and visit the modified operand
if isinstance(node.op, ast.USub) and isinstance(node.operand, ast.Num):
node.operand.n = -node.operand.n
node.operand.vhd = node.vhd
self.visit(node.operand)
return
pre, suf = self.inferCast(node.vhd, node.vhdOri)
self.write(pre)
self.write("(")
self.write(opmap[type(node.op)])
self.visit(node.operand)
self.write(")")
self.write(suf)
def visit_Attribute(self, node):
if isinstance(node.ctx, ast.Store):
self.setAttr(node)
else:
self.getAttr(node)
def setAttr(self, node):
assert node.attr == 'next'
self.SigAss = True
if isinstance(node.value, ast.Name):
sig = self.tree.symdict[node.value.id]
self.SigAss = sig._name
self.visit(node.value)
node.obj = self.getObj(node.value)
def getAttr(self, node):
if isinstance(node.value, ast.Subscript):
self.setAttr(node)
return
assert isinstance(node.value, ast.Name), node.value
n = node.value.id
if n in self.tree.symdict:
obj = self.tree.symdict[n]
elif n in self.tree.vardict:
obj = self.tree.vardict[n]
else:
raise AssertionError("object not found")
if isinstance(obj, _Signal):
if node.attr == 'next':
sig = self.tree.symdict[node.value.id]
self.SigAss = obj._name
self.visit(node.value)
elif node.attr == 'posedge':
self.write("rising_edge(")
self.visit(node.value)
self.write(")")
elif node.attr == 'negedge':
self.write("falling_edge(")
self.visit(node.value)
self.write(")")
elif node.attr == 'val':
pre, suf = self.inferCast(node.vhd, node.vhdOri)
self.write(pre)
self.visit(node.value)
self.write(suf)
if isinstance(obj, (_Signal, intbv)):
if node.attr in ('min', 'max'):
self.write("%s" % node.obj)
if isinstance(obj, EnumType):
assert hasattr(obj, node.attr)
e = getattr(obj, node.attr)
self.write(e._toVHDL())
def visit_Assert(self, node):
# XXX
self.write("assert ")
self.visit(node.test)
self.indent()
self.writeline()
self.write('report "*** AssertionError ***"')
self.writeline()
self.write("severity error;")
self.dedent()
def visit_Assign(self, node):
lhs = node.targets[0]
rhs = node.value
# shortcut for expansion of ROM in case statement
if isinstance(node.value, ast.Subscript) and \
isinstance(node.value.slice, ast.Index) and \
isinstance(node.value.value.obj, _Rom):
rom = node.value.value.obj.rom
self.write("case ")
self.visit(node.value.slice)
self.write(" is")
self.indent()
size = lhs.vhd.size
for i, n in enumerate(rom):
self.writeline()
if i == len(rom) - 1:
self.write("when others => ")
else:
self.write("when %s => " % i)
self.visit(lhs)
if self.SigAss:
self.write(' <= ')
self.SigAss = False
else:
self.write(' := ')
if isinstance(lhs.vhd, vhd_std_logic):
self.write("'%s';" % n)
elif isinstance(lhs.vhd, vhd_int):
self.write("%s;" % n)
else:
self.write('"%s";' % bin(n, size))
self.dedent()
self.writeline()
self.write("end case;")
return
elif isinstance(node.value, ast.ListComp):
# skip list comprehension assigns for now
return
# default behavior
convOpen, convClose = "", ""
if isinstance(lhs.vhd, vhd_type):
rhs.vhd = lhs.vhd
self.isLhs = True
self.visit(lhs)
self.isLhs = False
if self.SigAss:
if isinstance(lhs.value, ast.Name):
sig = self.tree.symdict[lhs.value.id]
self.write(' <= ')
self.SigAss = False
else:
self.write(' := ')
self.write(convOpen)
# node.expr.target = obj = self.getObj(node.nodes[0])
self.visit(rhs)
self.write(convClose)
self.write(';')
def visit_AugAssign(self, node):
# XXX apparently no signed context required for augmented assigns
left, op, right = node.target, node.op, node.value
isFunc = False
pre, suf = "", ""
if isinstance(op, (ast.Add, ast.Sub, ast.Mult, ast.Mod, ast.FloorDiv)):
pre, suf = self.inferBinaryOpCast(node, left, right, op)
elif isinstance(op, (ast.LShift, ast.RShift)):
isFunc = True
pre, suf = self.inferShiftOpCast(node, left, right, op)
self.visit(left)
self.write(" := ")
self.write(pre)
if isFunc:
self.write("%s(" % opmap[type(op)])
self.visit(left)
if isFunc:
self.write(", ")
else:
self.write(" %s " % opmap[type(op)])
self.visit(right)
if isFunc:
self.write(")")
self.write(suf)
self.write(";")
def visit_Break(self, node):
self.write("exit;")
def visit_Call(self, node):
fn = node.func
# assert isinstance(fn, astNode.Name)
f = self.getObj(fn)
if f is print:
self.visit_Print(node)
return
fname = ''
pre, suf = '', ''
opening, closing = '(', ')'
sep = ", "
if f is bool:
opening, closing = '', ''
arg = node.args[0]
arg.vhd = node.vhd
elif f is len:
val = self.getVal(node)
self.require(node, val is not None, "cannot calculate len")
self.write(repr(val))
return
elif f is now:
pre, suf = self.inferCast(node.vhd, node.vhdOri)
self.write(pre)
self.write("(now / 1 ns)")
self.write(suf)
return
elif f is ord:
opening, closing = '', ''
v = ord(node.args[0].s)
node.args[0].s = v
self.write(v)
return
elif f in integer_types:
opening, closing = '', ''
pre, suf = self.inferCast(node.vhd, node.vhdOri)
# convert number argument to integer
if isinstance(node.args[0], ast.Num):
node.args[0].n = int(node.args[0].n)
elif inspect.isclass(f) and issubclass(f, intbv):
pre, post = "", ""
arg = node.args[0]
if isinstance(node.vhd, vhd_unsigned):
pre, post = "to_unsigned(", ", %s)" % node.vhd.size
elif isinstance(node.vhd, vhd_signed):
pre, post = "to_signed(", ", %s)" % node.vhd.size
self.write(pre)
self.visit(arg)
self.write(post)
return
elif f == intbv.signed: # note equality comparison
# this call comes from a getattr
arg = fn.value
pre, suf = self.inferCast(node.vhd, node.vhdOri)
opening, closing = '', ''
if isinstance(arg.vhd, vhd_unsigned):
opening, closing = "signed(", ")"
self.write(pre)
self.write(opening)
self.visit(arg)
self.write(closing)
self.write(suf)
return
elif (type(f) in class_types) and issubclass(f, Exception):
self.write(f.__name__)
elif f in (posedge, negedge):
opening, closing = ' ', ''
self.write(f.__name__)
elif f is delay:
self.visit(node.args[0])
self.write(" * 1 ns")
return
elif f is concat:
pre, suf = self.inferCast(node.vhd, node.vhdOri)
opening, closing = "unsigned'(", ")"
sep = " & "
elif hasattr(node, 'tree'):
pre, suf = self.inferCast(node.vhd, node.tree.vhd)
fname = node.tree.name
else:
self.write(f.__name__)
if node.args:
self.write(pre)
# TODO rewrite making use of fname variable
self.write(fname)
self.write(opening)
self.visit(node.args[0])
for arg in node.args[1:]:
self.write(sep)
self.visit(arg)
self.write(closing)
self.write(suf)
if hasattr(node, 'tree'):
if node.tree.kind == _kind.TASK:
Visitor = _ConvertTaskVisitor
else:
Visitor = _ConvertFunctionVisitor
v = Visitor(node.tree, self.funcBuf)
v.visit(node.tree)
def visit_Compare(self, node):
n = node.vhd
ns = node.vhd.size
pre, suf = "(", ")"
if isinstance(n, vhd_std_logic):
pre = "stdl("
elif isinstance(n, vhd_unsigned):
pre, suf = "to_unsigned(", ", %s)" % ns
elif isinstance(n, vhd_signed):
pre, suf = "to_signed(", ", %s)" % ns
self.write(pre)
self.visit(node.left)
op, right = node.ops[0], node.comparators[0]
self.write(" %s " % opmap[type(op)])
self.visit(right)
self.write(suf)
def visit_Num(self, node):
n = node.n
if isinstance(node.vhd, vhd_std_logic):
self.write("'%s'" % n)
elif isinstance(node.vhd, vhd_boolean):
self.write("%s" % bool(n))
# elif isinstance(node.vhd, (vhd_unsigned, vhd_signed)):
# self.write('"%s"' % bin(n, node.vhd.size))
elif isinstance(node.vhd, vhd_unsigned):
if abs(n) < 2**31:
self.write("to_unsigned(%s, %s)" % (n, node.vhd.size))
else:
self.write('unsigned\'("%s")' % bin(n, node.vhd.size))
elif isinstance(node.vhd, vhd_signed):
if abs(n) < 2**31:
self.write("to_signed(%s, %s)" % (n, node.vhd.size))
else:
self.write('signed\'("%s")' % bin(n, node.vhd.size))
else:
if n < 0:
self.write("(")
self.write(n)
if n < 0:
self.write(")")
def visit_Str(self, node):
typemark = 'string'
if isinstance(node.vhd, vhd_unsigned):
typemark = 'unsigned'
self.write("%s'(\"%s\")" % (typemark, node.s))
def visit_Continue(self, node, *args):
self.write("next;")
def visit_Expr(self, node):
expr = node.value
# docstrings on unofficial places
if isinstance(expr, ast.Str):
doc = _makeDoc(expr.s, self.ind)
self.write(doc)
return
# skip extra semicolons
if isinstance(expr, ast.Num):
return
self.visit(expr)
# ugly hack to detect an orphan "task" call
if isinstance(expr, ast.Call) and hasattr(expr, 'tree'):
self.write(';')
def visit_IfExp(self, node):
# propagate the node's vhd attribute
node.body.vhd = node.orelse.vhd = node.vhd
self.write('tern_op(')
self.write('cond => ')
self.visit(node.test)
self.write(', if_true => ')
self.visit(node.body)
self.write(', if_false => ')
self.visit(node.orelse)
self.write(')')
def visit_For(self, node):
self.labelStack.append(node.breakLabel)
self.labelStack.append(node.loopLabel)
var = node.target.id
cf = node.iter
f = self.getObj(cf.func)
args = cf.args
assert len(args) <= 3
self.require(node, len(args) < 3, "explicit step not supported")
if f is range:
cmp = '<'
op = 'to'
oneoff = ''
if len(args) == 1:
start, stop, step = None, args[0], None
elif len(args) == 2:
start, stop, step = args[0], args[1], None
else:
start, stop, step = args
else: # downrange
cmp = '>='
op = 'downto'
if len(args) == 1:
start, stop, step = args[0], None, None
elif len(args) == 2:
start, stop, step = args[0], args[1], None
else:
start, stop, step = args
assert step is None
# if node.breakLabel.isActive:
## self.write("begin: %s" % node.breakLabel)
# self.writeline()
# if node.loopLabel.isActive:
## self.write("%s: " % node.loopLabel)
self.write("for %s in " % var)
if start is None:
self.write("0")
else:
self.visit(start)
if f is downrange:
self.write("-1")
self.write(" %s " % op)
if stop is None:
self.write("0")
else:
self.visit(stop)
if f is range:
self.write("-1")
self.write(" loop")
self.indent()
self.visit_stmt(node.body)
self.dedent()
self.writeline()
self.write("end loop;")
# if node.breakLabel.isActive:
# self.writeline()
# self.write("end")
self.labelStack.pop()
self.labelStack.pop()
def visit_FunctionDef(self, node):
raise AssertionError("To be implemented in subclass")
def visit_If(self, node):
if node.ignore:
return
# only map to VHDL case if it's a full case
if node.isFullCase:
self.mapToCase(node)
else:
self.mapToIf(node)
def mapToCase(self, node):
var = node.caseVar
obj = self.getObj(var)
self.write("case ")
self.visit(var)
self.write(" is")
self.indent()
for i, (test, suite) in enumerate(node.tests):
self.writeline()
item = test.case[1]
if isinstance(item, EnumItemType):
itemRepr = item._toVHDL()
elif hasattr(obj, '_nrbits'):
itemRepr = self.BitRepr(item, obj)
else:
itemRepr = i
comment = ""
# potentially use default clause for last test
if (i == len(node.tests) - 1) and not node.else_:
self.write("when others")
comment = " -- %s" % itemRepr
else:
self.write("when ")
self.write(itemRepr)
self.write(" =>%s" % comment)
self.indent()
self.visit_stmt(suite)
self.dedent()
if node.else_:
self.writeline()
self.write("when others =>")
self.indent()
self.visit_stmt(node.else_)
self.dedent()
self.dedent()
self.writeline()
self.write("end case;")
def mapToIf(self, node):
first = True
for test, suite in node.tests:
if first:
ifstring = "if "
first = False
else:
ifstring = "elsif "
self.writeline()
self.write(ifstring)
self.visit(test)
self.write(" then")
self.indent()
self.visit_stmt(suite)
self.dedent()
if node.else_:
self.writeline()
edges = self.getEdge(node)
if edges is not None:
edgeTests = [e._toVHDL() for e in edges]
self.write("elsif ")
self.write(" or ".join(edgeTests))
self.write(" then")
else:
self.write("else")
self.indent()
self.visit_stmt(node.else_)
self.dedent()
self.writeline()
self.write("end if;")
def visit_ListComp(self, node):
pass # do nothing
def visit_Module(self, node):
for stmt in node.body:
self.visit(stmt)
def visit_NameConstant(self, node):
node.id = str(node.value)
self.getName(node)
def visit_Name(self, node):
if isinstance(node.ctx, ast.Store):
self.setName(node)
else:
self.getName(node)
def setName(self, node):
self.write(node.id)
def getName(self, node):
n = node.id
if n == 'False':
if isinstance(node.vhd, vhd_std_logic):
s = "'0'"
else:
s = "False"
elif n == 'True':
if isinstance(node.vhd, vhd_std_logic):
s = "'1'"
else:
s = "True"
elif n == 'None':
if isinstance(node.vhd, vhd_std_logic):
s = "'Z'"
else:
assert hasattr(node.vhd, 'size')
s = '"%s"' % ('Z' * node.vhd.size)
elif n in self.tree.vardict:
s = n
obj = self.tree.vardict[n]
ori = inferVhdlObj(obj)
pre, suf = self.inferCast(node.vhd, ori)
s = "%s%s%s" % (pre, s, suf)
elif n in self.tree.argnames:
assert n in self.tree.symdict
obj = self.tree.symdict[n]
vhd = inferVhdlObj(obj)
if isinstance(vhd, vhd_std_logic) and isinstance(node.vhd, vhd_boolean):
s = "(%s = '1')" % n
else:
s = n
elif n in self.tree.symdict:
obj = self.tree.symdict[n]
s = n
if isinstance(obj, bool):
if isinstance(node.vhd, vhd_std_logic):
s = "'%s'" % int(obj)
else:
s = "%s" % obj
elif isinstance(obj, integer_types):
if isinstance(node.vhd, vhd_int):
s = self.IntRepr(obj)
elif isinstance(node.vhd, vhd_boolean):
s = "%s" % bool(obj)
elif isinstance(node.vhd, vhd_std_logic):
s = "'%s'" % int(obj)
elif isinstance(node.vhd, vhd_unsigned):
if abs(obj) < 2 ** 31:
s = "to_unsigned(%s, %s)" % (obj, node.vhd.size)
else:
s = 'unsigned\'("%s")' % bin(obj, node.vhd.size)
elif isinstance(node.vhd, vhd_signed):
if abs(obj) < 2 ** 31:
s = "to_signed(%s, %s)" % (obj, node.vhd.size)
else:
s = 'signed\'("%s")' % bin(obj, node.vhd.size)
elif isinstance(obj, _Signal):
s = str(obj)
ori = inferVhdlObj(obj)
pre, suf = self.inferCast(node.vhd, ori)
s = "%s%s%s" % (pre, s, suf)
elif _isMem(obj):
m = _getMemInfo(obj)
assert m.name
s = m.name
elif isinstance(obj, EnumItemType):
s = obj._toVHDL()
elif (type(obj) in class_types) and issubclass(obj, Exception):
s = n
else:
self.raiseError(node, _error.UnsupportedType, "%s, %s" % (n, type(obj)))
else:
raise AssertionError("name ref: %s" % n)
self.write(s)
def visit_Pass(self, node):
self.write("null;")
def visit_Print(self, node):
argnr = 0
for s in node.format:
if isinstance(s, str):
self.write('write(L, string\'("%s"));' % s)
else:
a = node.args[argnr]
argnr += 1
to_string = "to_string"
if s.conv is int:
a.vhd = vhd_int()
else:
if isinstance(a.vhdOri, vhd_vector):
to_string = "to_hstring"
# to_hstring correctly does sign extension
# however, Verilog doesn not: therefore, interprete
# print values as unsigned...
a.vhd = vhd_unsigned(a.vhd.size)
elif isinstance(a.vhdOri, vhd_std_logic):
a.vhd = vhd_boolean()
self.write("write(L, %s(" % to_string)
self.visit(a)
self.write("))")
self.write(';')
self.writeline()
self.write("writeline(output, L);")
def visit_Raise(self, node):
self.write('assert False report "End of Simulation" severity Failure;')
def visit_Return(self, node):
pass
def visit_Subscript(self, node):
if isinstance(node.slice, ast.Slice):
self.accessSlice(node)
else:
self.accessIndex(node)
def accessSlice(self, node):
if isinstance(node.value, ast.Call) and \
node.value.func.obj in (intbv, modbv) and \
_isConstant(node.value.args[0], self.tree.symdict):
c = self.getVal(node)._val
pre, post = "", ""
if node.vhd.size <= 30:
if isinstance(node.vhd, vhd_unsigned):
pre, post = "to_unsigned(", ", %s)" % node.vhd.size
elif isinstance(node.vhd, vhd_signed):
pre, post = "to_signed(", ", %s)" % node.vhd.size
else:
if isinstance(node.vhd, vhd_unsigned):
pre, post = "unsigned'(", ")"
c = '"%s"' % bin(c, node.vhd.size)
elif isinstance(node.vhd, vhd_signed):
pre, post = "signed'(", ")"
c = '"%s"' % bin(c, node.vhd.size)
self.write(pre)
self.write("%s" % c)
self.write(post)
return
pre, suf = self.inferCast(node.vhd, node.vhdOri)
if isinstance(node.value.vhd, vhd_signed) and isinstance(node.ctx, ast.Load):
pre = pre + "unsigned("
suf = ")" + suf
self.write(pre)
self.visit(node.value)
lower, upper = node.slice.lower, node.slice.upper
# special shortcut case for [:] slice
if lower is None and upper is None:
self.write(suf)
return
self.write("(")
if lower is None:
self.write("%s" % node.obj._nrbits)
else:
self.visit(lower)
self.write("-1 downto ")
if upper is None:
self.write("0")
else:
self.visit(upper)
self.write(")")
self.write(suf)
def accessIndex(self, node):
pre, suf = self.inferCast(node.vhd, node.vhdOri)
self.write(pre)
self.visit(node.value)
self.write("(")
#assert len(node.subs) == 1
self.visit(node.slice.value)
self.write(")")
self.write(suf)
def visit_stmt(self, body):
for stmt in body:
self.writeline()
self.visit(stmt)
# ugly hack to detect an orphan "task" call
if isinstance(stmt, ast.Call) and hasattr(stmt, 'tree'):
self.write(';')
def visit_Tuple(self, node):
assert self.context != None
sep = ", "
tpl = node.elts
self.visit(tpl[0])
for elt in tpl[1:]:
self.write(sep)
self.visit(elt)
def visit_While(self, node):
self.labelStack.append(node.breakLabel)
self.labelStack.append(node.loopLabel)
self.write("while ")
self.visit(node.test)
self.write(" loop")
self.indent()
self.visit_stmt(node.body)
self.dedent()
self.writeline()
self.write("end loop")
self.write(";")
self.labelStack.pop()
self.labelStack.pop()
def visit_Yield(self, node):
self.write("wait ")
yieldObj = self.getObj(node.value)
if isinstance(yieldObj, delay):
self.write("for ")
elif isinstance(yieldObj, _WaiterList):
self.write("until ")
else:
self.write("on ")
self.context = _context.YIELD
self.visit(node.value)
self.context = _context.UNKNOWN
self.write(";")
def manageEdges(self, ifnode, senslist):
""" Helper method to convert MyHDL style template into VHDL style"""
first = senslist[0]
if isinstance(first, _WaiterList):
bt = _WaiterList
elif isinstance(first, _Signal):
bt = _Signal
elif isinstance(first, delay):
bt = delay
assert bt
for e in senslist:
if not isinstance(e, bt):
self.raiseError(ifnode, "base type error in sensitivity list")
if len(senslist) >= 2 and bt == _WaiterList:
# ifnode = node.code.nodes[0]
# print ifnode
assert isinstance(ifnode, ast.If)
asyncEdges = []
for test, suite in ifnode.tests:
e = self.getEdge(test)
if e is None:
self.raiseError(ifnode, "No proper edge value test")
asyncEdges.append(e)
if not ifnode.else_:
self.raiseError(ifnode, "No separate else clause found")
edges = []
for s in senslist:
for e in asyncEdges:
if s is e:
break
else:
edges.append(s)
ifnode.edge = edges
senslist = [s.sig for s in senslist]
return senslist
class _ConvertAlwaysVisitor(_ConvertVisitor):
def __init__(self, tree, blockBuf, funcBuf):
_ConvertVisitor.__init__(self, tree, blockBuf)
self.funcBuf = funcBuf
def visit_FunctionDef(self, node):
self.writeDoc(node)
w = node.body[-1]
y = w.body[0]
if isinstance(y, ast.Expr):
y = y.value
assert isinstance(y, ast.Yield)
senslist = y.senslist
senslist = self.manageEdges(w.body[1], senslist)
singleEdge = (len(senslist) == 1) and isinstance(senslist[0], _WaiterList)
self.write("%s: process (" % self.tree.name)
if singleEdge:
self.write(senslist[0].sig)
else:
for e in senslist[:-1]:
self.write(e)
self.write(', ')
self.write(senslist[-1])
self.write(") is")
self.indent()
self.writeDeclarations()
self.dedent()
self.writeline()
self.write("begin")
self.indent()
if singleEdge:
self.writeline()
self.write("if %s then" % senslist[0]._toVHDL())
self.indent()
# assert isinstance(w.body, ast.stmt)
for stmt in w.body[1:]:
self.writeline()
self.visit(stmt)
self.dedent()
if singleEdge:
self.writeline()
self.write("end if;")
self.dedent()
self.writeline()
self.write("end process %s;" % self.tree.name)
self.writeline(2)
class _ConvertInitialVisitor(_ConvertVisitor):
def __init__(self, tree, blockBuf, funcBuf):
_ConvertVisitor.__init__(self, tree, blockBuf)
self.funcBuf = funcBuf
def visit_FunctionDef(self, node):
self.writeDoc(node)
self.write("%s: process is" % self.tree.name)
self.indent()
self.writeDeclarations()
self.dedent()
self.writeline()
self.write("begin")
self.indent()
self.visit_stmt(node.body)
self.writeline()
self.write("wait;")
self.dedent()
self.writeline()
self.write("end process %s;" % self.tree.name)
self.writeline(2)
class _ConvertAlwaysCombVisitor(_ConvertVisitor):
def __init__(self, tree, blockBuf, funcBuf):
_ConvertVisitor.__init__(self, tree, blockBuf)
self.funcBuf = funcBuf
def visit_FunctionDef(self, node):
# a local function works nicely too
def compressSensitivityList(senslist):
''' reduce spelled out list items like [*name*(0), *name*(1), ..., *name*(n)] to just *name*'''
r = []
for item in senslist:
name = item._name.split('(', 1)[0]
if not name in r:
# note that the list now contains names and not Signals, but we are
# interested in the strings anyway ...
r.append(name)
return r
self.writeDoc(node)
senslist = compressSensitivityList(self.tree.senslist)
self.write("%s: process (" % self.tree.name)
for e in senslist[:-1]:
self.write(e)
self.write(', ')
self.write(senslist[-1])
self.write(") is")
self.indent()
self.writeDeclarations()
self.dedent()
self.writeline()
self.write("begin")
self.indent()
self.visit_stmt(node.body)
self.dedent()
self.writeline()
self.write("end process %s;" % self.tree.name)
self.writeline(2)
class _ConvertSimpleAlwaysCombVisitor(_ConvertVisitor):
def __init__(self, tree, blockBuf, funcBuf):
_ConvertVisitor.__init__(self, tree, blockBuf)
self.funcBuf = funcBuf
def visit_Attribute(self, node):
if isinstance(node.ctx, ast.Store):
self.SigAss = True
if isinstance(node.value, ast.Name):
sig = self.tree.symdict[node.value.id]
self.SigAss = sig._name
self.visit(node.value)
else:
self.getAttr(node)
def visit_FunctionDef(self, node, *args):
self.writeDoc(node)
self.visit_stmt(node.body)
self.writeline(2)
class _ConvertAlwaysDecoVisitor(_ConvertVisitor):
def __init__(self, tree, blockBuf, funcBuf):
_ConvertVisitor.__init__(self, tree, blockBuf)
self.funcBuf = funcBuf
def visit_FunctionDef(self, node, *args):
self.writeDoc(node)
assert self.tree.senslist
senslist = self.tree.senslist
senslist = self.manageEdges(node.body[-1], senslist)
singleEdge = (len(senslist) == 1) and isinstance(senslist[0], _WaiterList)
self.write("%s: process (" % self.tree.name)
if singleEdge:
self.write(senslist[0].sig)
else:
for e in senslist[:-1]:
self.write(e)
self.write(', ')
self.write(senslist[-1])
self.write(") is")
self.indent()
self.writeDeclarations()
self.dedent()
self.writeline()
self.write("begin")
self.indent()
if singleEdge:
self.writeline()
self.write("if %s then" % senslist[0]._toVHDL())
self.indent()
self.visit_stmt(node.body)
self.dedent()
if singleEdge:
self.writeline()
self.write("end if;")
self.dedent()
self.writeline()
self.write("end process %s;" % self.tree.name)
self.writeline(2)
def _convertInitVal(reg, init):
pre, suf = '', ''
if isinstance(reg, _Signal):
tipe = reg._type
else:
assert isinstance(reg, intbv)
tipe = intbv
if tipe is bool:
v = "'1'" if init else "'0'"
elif tipe is intbv:
init = int(init) # int representation
vhd_tipe = 'unsigned'
if reg._min is not None and reg._min < 0:
vhd_tipe = 'signed'
if abs(init) < 2**31:
v = '%sto_%s(%s, %s)%s' % (pre, vhd_tipe, init, len(reg), suf)
else:
v = '%s%s\'"%s"%s' % (pre, vhd_tipe, bin(init, len(reg)), suf)
else:
assert isinstance(init, EnumItemType)
v = init._toVHDL()
return v
class _ConvertAlwaysSeqVisitor(_ConvertVisitor):
def __init__(self, tree, blockBuf, funcBuf):
_ConvertVisitor.__init__(self, tree, blockBuf)
self.funcBuf = funcBuf
def visit_FunctionDef(self, node, *args):
self.writeDoc(node)
assert self.tree.senslist
senslist = self.tree.senslist
edge = senslist[0]
reset = self.tree.reset
async = reset is not None and reset.async
sigregs = self.tree.sigregs
varregs = self.tree.varregs
self.write("%s: process (" % self.tree.name)
self.write(edge.sig)
if async:
self.write(', ')
self.write(reset)
self.write(") is")
self.indent()
self.writeDeclarations()
self.dedent()
self.writeline()
self.write("begin")
self.indent()
if not async:
self.writeline()
self.write("if %s then" % edge._toVHDL())
self.indent()
if reset is not None:
self.writeline()
self.write("if (%s = '%s') then" % (reset, int(reset.active)))
self.indent()
for s in sigregs:
self.writeline()
self.write("%s <= %s;" % (s, _convertInitVal(s, s._init)))
for v in varregs:
n, reg, init = v
self.writeline()
self.write("%s := %s;" % (n, _convertInitVal(reg, init)))
self.dedent()
self.writeline()
if async:
self.write("elsif %s then" % edge._toVHDL())
else:
self.write("else")
self.indent()
self.visit_stmt(node.body)
self.dedent()
if reset is not None:
self.writeline()
self.write("end if;")
self.dedent()
if not async:
self.writeline()
self.write("end if;")
self.dedent()
self.writeline()
self.write("end process %s;" % self.tree.name)
self.writeline(2)
class _ConvertFunctionVisitor(_ConvertVisitor):
def __init__(self, tree, funcBuf):
_ConvertVisitor.__init__(self, tree, funcBuf)
self.returnObj = tree.returnObj
self.returnLabel = _Label("RETURN")
def writeOutputDeclaration(self):
self.write(self.tree.vhd.toStr(constr=False))
def writeInputDeclarations(self):
endchar = ""
for name in self.tree.argnames:
self.write(endchar)
endchar = ";"
obj = self.tree.symdict[name]
self.writeline()
self.writeDeclaration(obj, name, dir="in", constr=False, endchar="")
def visit_FunctionDef(self, node):
self.write("function %s(" % self.tree.name)
self.indent()
self.writeInputDeclarations()
self.writeline()
self.write(") return ")
self.writeOutputDeclaration()
self.write(" is")
self.writeDeclarations()
self.dedent()
self.writeline()
self.write("begin")
self.indent()
self.visit_stmt(node.body)
self.dedent()
self.writeline()
self.write("end function %s;" % self.tree.name)
self.writeline(2)
def visit_Return(self, node):
self.write("return ")
node.value.vhd = self.tree.vhd
self.visit(node.value)
self.write(";")
class _ConvertTaskVisitor(_ConvertVisitor):
def __init__(self, tree, funcBuf):
_ConvertVisitor.__init__(self, tree, funcBuf)
self.returnLabel = _Label("RETURN")
def writeInterfaceDeclarations(self):
endchar = ""
for name in self.tree.argnames:
self.write(endchar)
endchar = ";"
obj = self.tree.symdict[name]
output = name in self.tree.outputs
input = name in self.tree.inputs
inout = input and output
dir = (inout and "inout") or (output and "out") or "in"
self.writeline()
if isinstance(obj, _Signal):
kind = 'signal'
else:
kind = ''
self.writeDeclaration(obj, name, kind=kind, dir=dir,
constr=False, endchar="")
def visit_FunctionDef(self, node):
self.write("procedure %s" % self.tree.name)
if self.tree.argnames:
self.write("(")
self.indent()
self.writeInterfaceDeclarations()
self.write(")")
self.write(" is")
self.writeDeclarations()
self.dedent()
self.writeline()
self.write("begin")
self.indent()
self.visit_stmt(node.body)
self.dedent()
self.writeline()
self.write("end procedure %s;" % self.tree.name)
self.writeline(2)
# type inference
class vhd_type(object):
def __init__(self, size=0):
self.size = size
def __repr__(self):
return "%s(%s)" % (type(self).__name__, self.size)
class vhd_string(vhd_type):
pass
class vhd_enum(vhd_type):
def __init__(self, tipe):
self._type = tipe
def toStr(self, constr=True):
return self._type.__dict__['_name']
class vhd_std_logic(vhd_type):
def __init__(self, size=0):
vhd_type.__init__(self)
self.size = 1
def toStr(self, constr=True):
return 'std_logic'
class vhd_boolean(vhd_type):
def __init__(self, size=0):
vhd_type.__init__(self)
self.size = 1
def toStr(self, constr=True):
return 'boolean'
class vhd_vector(vhd_type):
def __init__(self, size=0):
vhd_type.__init__(self, size)
class vhd_unsigned(vhd_vector):
def toStr(self, constr=True):
if constr:
return "unsigned(%s downto 0)" % (self.size - 1)
else:
return "unsigned"
class vhd_signed(vhd_vector):
def toStr(self, constr=True):
if constr:
return "signed(%s downto 0)" % (self.size - 1)
else:
return "signed"
class vhd_int(vhd_type):
def toStr(self, constr=True):
return "integer"
class vhd_nat(vhd_int):
def toStr(self, constr=True):
return "natural"
class _loopInt(int):
pass
def maxType(o1, o2):
s1 = s2 = 0
if isinstance(o1, vhd_type):
s1 = o1.size
if isinstance(o2, vhd_type):
s2 = o2.size
s = max(s1, s2)
if isinstance(o1, vhd_signed) or isinstance(o2, vhd_signed):
return vhd_signed(s)
elif isinstance(o1, vhd_unsigned) or isinstance(o2, vhd_unsigned):
return vhd_unsigned(s)
elif isinstance(o1, vhd_std_logic) or isinstance(o2, vhd_std_logic):
return vhd_std_logic()
elif isinstance(o1, vhd_int) or isinstance(o2, vhd_int):
return vhd_int()
else:
return None
def inferVhdlObj(obj):
vhd = None
if (isinstance(obj, _Signal) and obj._type is intbv) or \
isinstance(obj, intbv):
if obj.min is None or obj.min < 0:
vhd = vhd_signed(size=len(obj))
else:
vhd = vhd_unsigned(size=len(obj))
elif (isinstance(obj, _Signal) and obj._type is bool) or \
isinstance(obj, bool):
vhd = vhd_std_logic()
elif (isinstance(obj, _Signal) and isinstance(obj._val, EnumItemType)) or\
isinstance(obj, EnumItemType):
if isinstance(obj, _Signal):
tipe = obj._val._type
else:
tipe = obj._type
vhd = vhd_enum(tipe)
elif isinstance(obj, integer_types):
if obj >= 0:
vhd = vhd_nat()
else:
vhd = vhd_int()
# vhd = vhd_int()
return vhd
def maybeNegative(vhd):
if isinstance(vhd, vhd_signed):
return True
if isinstance(vhd, vhd_int) and not isinstance(vhd, vhd_nat):
return True
return False
class _AnnotateTypesVisitor(ast.NodeVisitor, _ConversionMixin):
def __init__(self, tree):
self.tree = tree
def visit_FunctionDef(self, node):
# don't visit arguments and decorators
for stmt in node.body:
self.visit(stmt)
def visit_Attribute(self, node):
self.generic_visit(node)
node.vhd = inferVhdlObj(node.obj)
node.vhdOri = copy(node.vhd)
def visit_Assert(self, node):
self.visit(node.test)
node.test.vhd = vhd_boolean()
def visit_AugAssign(self, node):
self.visit(node.target)
self.visit(node.value)
if isinstance(node.op, (ast.BitOr, ast.BitAnd, ast.BitXor)):
node.value.vhd = copy(node.target.vhd)
node.vhdOri = copy(node.target.vhd)
elif isinstance(node.op, (ast.RShift, ast.LShift)):
node.value.vhd = vhd_int()
node.vhdOri = copy(node.target.vhd)
else:
node.left, node.right = node.target, node.value
self.inferBinOpType(node)
node.vhd = copy(node.target.vhd)
def visit_Call(self, node):
fn = node.func
# assert isinstance(fn, astNode.Name)
f = self.getObj(fn)
node.vhd = inferVhdlObj(node.obj)
self.generic_visit(node)
if f is concat:
s = 0
for a in node.args:
if isinstance(a, ast.Str):
a.vhd = vhd_unsigned(a.vhd.size)
elif isinstance(a.vhd, vhd_signed):
a.vhd = vhd_unsigned(a.vhd.size)
s += a.vhd.size
node.vhd = vhd_unsigned(s)
elif f is bool:
node.vhd = vhd_boolean()
elif f in _flatten(integer_types, ord):
node.vhd = vhd_int()
node.args[0].vhd = vhd_int()
elif f in (intbv, modbv):
node.vhd = vhd_int()
elif f is len:
node.vhd = vhd_int()
elif f is now:
node.vhd = vhd_nat()
elif f == intbv.signed: # note equality comparison
# this comes from a getattr
# node.vhd = vhd_int()
node.vhd = vhd_signed(fn.value.vhd.size)
elif hasattr(node, 'tree'):
v = _AnnotateTypesVisitor(node.tree)
v.visit(node.tree)
node.vhd = node.tree.vhd = inferVhdlObj(node.tree.returnObj)
node.vhdOri = copy(node.vhd)
def visit_Compare(self, node):
node.vhd = vhd_boolean()
self.generic_visit(node)
left, op, right = node.left, node.ops[0], node.comparators[0]
if isinstance(left.vhd, vhd_std_logic) or isinstance(right.vhd, vhd_std_logic):
left.vhd = right.vhd = vhd_std_logic()
elif isinstance(left.vhd, vhd_unsigned) and maybeNegative(right.vhd):
left.vhd = vhd_signed(left.vhd.size + 1)
elif maybeNegative(left.vhd) and isinstance(right.vhd, vhd_unsigned):
right.vhd = vhd_signed(right.vhd.size + 1)
node.vhdOri = copy(node.vhd)
def visit_Str(self, node):
node.vhd = vhd_string()
node.vhdOri = copy(node.vhd)
def visit_Num(self, node):
if node.n < 0:
node.vhd = vhd_int()
else:
node.vhd = vhd_nat()
node.vhdOri = copy(node.vhd)
def visit_For(self, node):
var = node.target.id
# make it possible to detect loop variable
self.tree.vardict[var] = _loopInt(-1)
self.generic_visit(node)
def visit_NameConstant(self, node):
node.vhd = inferVhdlObj(node.value)
node.vhdOri = copy(node.vhd)
def visit_Name(self, node):
if node.id in self.tree.vardict:
node.obj = self.tree.vardict[node.id]
node.vhd = inferVhdlObj(node.obj)
node.vhdOri = copy(node.vhd)
def visit_BinOp(self, node):
self.generic_visit(node)
if isinstance(node.op, (ast.LShift, ast.RShift)):
self.inferShiftType(node)
elif isinstance(node.op, (ast.BitAnd, ast.BitOr, ast.BitXor)):
self.inferBitOpType(node)
elif isinstance(node.op, ast.Mod) and isinstance(node.left, ast.Str): # format string
pass
else:
self.inferBinOpType(node)
def inferShiftType(self, node):
node.vhd = copy(node.left.vhd)
node.right.vhd = vhd_nat()
node.vhdOri = copy(node.vhd)
def inferBitOpType(self, node):
obj = maxType(node.left.vhd, node.right.vhd)
node.vhd = node.left.vhd = node.right.vhd = obj
node.vhdOri = copy(node.vhd)
def inferBinOpType(self, node):
left, op, right = node.left, node.op, node.right
if isinstance(left.vhd, (vhd_boolean, vhd_std_logic)):
left.vhd = vhd_unsigned(1)
if isinstance(right.vhd, (vhd_boolean, vhd_std_logic)):
right.vhd = vhd_unsigned(1)
if isinstance(right.vhd, vhd_unsigned):
if maybeNegative(left.vhd) or \
(isinstance(op, ast.Sub) and not hasattr(node, 'isRhs')):
right.vhd = vhd_signed(right.vhd.size + 1)
if isinstance(left.vhd, vhd_unsigned):
if maybeNegative(right.vhd) or \
(isinstance(op, ast.Sub) and not hasattr(node, 'isRhs')):
left.vhd = vhd_signed(left.vhd.size + 1)
l, r = left.vhd, right.vhd
ls, rs = l.size, r.size
if isinstance(r, vhd_vector) and isinstance(l, vhd_vector):
if isinstance(op, (ast.Add, ast.Sub)):
s = max(ls, rs)
elif isinstance(op, ast.Mod):
s = rs
elif isinstance(op, ast.FloorDiv):
s = ls
elif isinstance(op, ast.Mult):
s = ls + rs
else:
raise AssertionError("unexpected op %s" % op)
elif isinstance(l, vhd_vector) and isinstance(r, vhd_int):
if isinstance(op, (ast.Add, ast.Sub, ast.Mod, ast.FloorDiv)):
s = ls
elif isinstance(op, ast.Mult):
s = 2 * ls
else:
raise AssertionError("unexpected op %s" % op)
elif isinstance(l, vhd_int) and isinstance(r, vhd_vector):
if isinstance(op, (ast.Add, ast.Sub, ast.Mod, ast.FloorDiv)):
s = rs
elif isinstance(op, ast.Mult):
s = 2 * rs
else:
raise AssertionError("unexpected op %s" % op)
if isinstance(l, vhd_int) and isinstance(r, vhd_int):
node.vhd = vhd_int()
elif isinstance(l, (vhd_signed, vhd_int)) and isinstance(r, (vhd_signed, vhd_int)):
node.vhd = vhd_signed(s)
elif isinstance(l, (vhd_unsigned, vhd_int)) and isinstance(r, (vhd_unsigned, vhd_int)):
node.vhd = vhd_unsigned(s)
else:
node.vhd = vhd_int()
node.vhdOri = copy(node.vhd)
def visit_BoolOp(self, node):
self.generic_visit(node)
for n in node.values:
n.vhd = vhd_boolean()
node.vhd = vhd_boolean()
node.vhdOri = copy(node.vhd)
def visit_If(self, node):
if node.ignore:
return
self.generic_visit(node)
for test, suite in node.tests:
test.vhd = vhd_boolean()
def visit_IfExp(self, node):
self.generic_visit(node)
node.test.vhd = vhd_boolean()
def visit_ListComp(self, node):
pass # do nothing
def visit_Subscript(self, node):
if isinstance(node.slice, ast.Slice):
self.accessSlice(node)
else:
self.accessIndex(node)
def accessSlice(self, node):
self.generic_visit(node)
lower = node.value.vhd.size
t = type(node.value.vhd)
# node.expr.vhd = vhd_unsigned(node.expr.vhd.size)
if node.slice.lower:
node.slice.lower.vhd = vhd_int()
lower = self.getVal(node.slice.lower)
upper = 0
if node.slice.upper:
node.slice.upper.vhd = vhd_int()
upper = self.getVal(node.slice.upper)
if isinstance(node.ctx, ast.Store):
node.vhd = t(lower - upper)
else:
node.vhd = vhd_unsigned(lower - upper)
node.vhdOri = copy(node.vhd)
def accessIndex(self, node):
self.generic_visit(node)
node.vhd = vhd_std_logic() # XXX default
node.slice.value.vhd = vhd_int()
obj = node.value.obj
if isinstance(obj, list):
assert len(obj)
node.vhd = inferVhdlObj(obj[0])
elif isinstance(obj, _Ram):
node.vhd = inferVhdlObj(obj.elObj)
elif isinstance(obj, _Rom):
node.vhd = vhd_int()
elif isinstance(obj, intbv):
node.vhd = vhd_std_logic()
node.vhdOri = copy(node.vhd)
def visit_UnaryOp(self, node):
self.visit(node.operand)
node.vhd = copy(node.operand.vhd)
if isinstance(node.op, ast.Not):
# postpone this optimization until initial values are written
# if isinstance(node.operand.vhd, vhd_std_logic):
# node.vhd = vhd_std_logic()
# else:
# node.vhd = node.operand.vhd = vhd_boolean()
node.vhd = node.operand.vhd = vhd_boolean()
elif isinstance(node.op, ast.USub):
if isinstance(node.vhd, vhd_unsigned):
node.vhd = vhd_signed(node.vhd.size + 1)
elif isinstance(node.vhd, vhd_nat):
node.vhd = vhd_int()
node.vhdOri = copy(node.vhd)
def visit_While(self, node):
self.generic_visit(node)
node.test.vhd = vhd_boolean()
def _annotateTypes(genlist):
for tree in genlist:
if isinstance(tree, _UserVhdlCode):
continue
v = _AnnotateTypesVisitor(tree)
v.visit(tree)
|
juhasch/myhdl
|
myhdl/conversion/_toVHDL.py
|
Python
|
lgpl-2.1
| 78,411
|
[
"VisIt"
] |
bbdc18264db5304f6fe0257f4ef3e1a30e4106e93a8db77c977429aea65cd046
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
import unittest
import os
import json
import numpy as np
import warnings
import xml.etree.cElementTree as ET
from pymatgen.core.periodic_table import Element
from pymatgen.electronic_structure.core import OrbitalType
from pymatgen.io.vasp.inputs import Kpoints
from pymatgen.io.vasp.outputs import Chgcar, Locpot, Oszicar, Outcar, \
Vasprun, Procar, Xdatcar, Dynmat, BSVasprun, UnconvergedVASPWarning, \
Wavecar
from pymatgen import Spin, Orbital, Lattice, Structure
from pymatgen.entries.compatibility import MaterialsProjectCompatibility
from pymatgen.electronic_structure.core import Magmom
from pymatgen.util.testing import PymatgenTest
"""
Created on Jul 16, 2012
"""
__author__ = "Shyue Ping Ong, Stephen Dacek, Mark Turiansky"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyue@mit.edu"
__date__ = "Jul 16, 2012"
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
'test_files')
class VasprunTest(unittest.TestCase):
def setUp(self):
warnings.simplefilter("ignore")
def tearDown(self):
warnings.resetwarnings()
def test_multiple_dielectric(self):
v = Vasprun(os.path.join(test_dir, "vasprun.GW0.xml"))
self.assertEqual(len(v.other_dielectric), 3)
def test_charge_charge_dielectric(self):
"""
VASP 5.4.4 writes out two dielectric functions to vasprun.xml
These are the "density-density" and "velocity-velocity" linear response functions.
See the comments in `linear_optics.F` for details.
"""
v = Vasprun(os.path.join(test_dir, "vasprun.xml.dielectric_5.4.4"),
parse_potcar_file=False)
self.assertEqual( v.dielectric is not None, True )
self.assertEqual( 'density' in v.dielectric_data, True )
self.assertEqual( 'velocity' in v.dielectric_data, True )
def test_optical_absorption_coeff(self):
v = Vasprun(os.path.join(test_dir, "vasprun.BSE.xml.gz"))
absorption_coeff = v.optical_absorption_coeff
self.assertEqual(absorption_coeff[1], 24966408728.917931)
def test_vasprun_with_more_than_two_unlabelled_dielectric_functions(self):
with self.assertRaises(NotImplementedError):
Vasprun(os.path.join(test_dir, "vasprun.xml.dielectric_bad"),
parse_potcar_file=False)
def test_bad_vasprun(self):
self.assertRaises(ET.ParseError,
Vasprun, os.path.join(test_dir, "bad_vasprun.xml"))
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
v = Vasprun(os.path.join(test_dir, "bad_vasprun.xml"),
exception_on_bad_xml=False)
# Verify some things
self.assertEqual(len(v.ionic_steps), 1)
self.assertAlmostEqual(v.final_energy, -269.00551374)
self.assertTrue(issubclass(w[-1].category,
UserWarning))
def test_vdw(self):
v = Vasprun(os.path.join(test_dir, "vasprun.xml.vdw"))
self.assertAlmostEqual(v.final_energy, -9.78310677)
def test_properties(self):
filepath = os.path.join(test_dir, 'vasprun.xml.nonlm')
vasprun = Vasprun(filepath, parse_potcar_file=False)
orbs = list(vasprun.complete_dos.pdos[vasprun.final_structure[
0]].keys())
self.assertIn(OrbitalType.s, orbs)
filepath = os.path.join(test_dir, 'vasprun.xml')
vasprun = Vasprun(filepath, parse_potcar_file=False)
# Test NELM parsing.
self.assertEqual(vasprun.parameters["NELM"], 60)
# test pdos parsing
pdos0 = vasprun.complete_dos.pdos[vasprun.final_structure[0]]
self.assertAlmostEqual(pdos0[Orbital.s][Spin.up][16], 0.0026)
self.assertAlmostEqual(pdos0[Orbital.pz][Spin.down][16], 0.0012)
self.assertEqual(pdos0[Orbital.s][Spin.up].shape, (301,))
filepath2 = os.path.join(test_dir, 'lifepo4.xml')
vasprun_ggau = Vasprun(filepath2, parse_projected_eigen=True,
parse_potcar_file=False)
totalscsteps = sum([len(i['electronic_steps'])
for i in vasprun.ionic_steps])
self.assertEqual(29, len(vasprun.ionic_steps))
self.assertEqual(len(vasprun.structures), len(vasprun.ionic_steps))
self.assertEqual(vasprun.lattice,
vasprun.lattice_rec.reciprocal_lattice)
for i, step in enumerate(vasprun.ionic_steps):
self.assertEqual(vasprun.structures[i], step["structure"])
self.assertTrue(all([vasprun.structures[i] == vasprun.ionic_steps[i][
"structure"] for i in range(len(vasprun.ionic_steps))]))
self.assertEqual(308, totalscsteps,
"Incorrect number of energies read from vasprun.xml")
self.assertEqual(['Li'] + 4 * ['Fe'] + 4 * ['P'] + 16 * ["O"],
vasprun.atomic_symbols)
self.assertEqual(vasprun.final_structure.composition.reduced_formula,
"LiFe4(PO4)4")
self.assertIsNotNone(vasprun.incar, "Incar cannot be read")
self.assertIsNotNone(vasprun.kpoints, "Kpoints cannot be read")
self.assertIsNotNone(vasprun.eigenvalues, "Eigenvalues cannot be read")
self.assertAlmostEqual(vasprun.final_energy, -269.38319884, 7)
self.assertAlmostEqual(vasprun.tdos.get_gap(), 2.0589, 4)
expectedans = (2.539, 4.0906, 1.5516, False)
(gap, cbm, vbm, direct) = vasprun.eigenvalue_band_properties
self.assertAlmostEqual(gap, expectedans[0])
self.assertAlmostEqual(cbm, expectedans[1])
self.assertAlmostEqual(vbm, expectedans[2])
self.assertEqual(direct, expectedans[3])
self.assertFalse(vasprun.is_hubbard)
self.assertEqual(vasprun.potcar_symbols,
['PAW_PBE Li 17Jan2003', 'PAW_PBE Fe 06Sep2000',
'PAW_PBE Fe 06Sep2000', 'PAW_PBE P 17Jan2003',
'PAW_PBE O 08Apr2002'])
self.assertIsNotNone(vasprun.kpoints, "Kpoints cannot be read")
self.assertIsNotNone(vasprun.actual_kpoints,
"Actual kpoints cannot be read")
self.assertIsNotNone(vasprun.actual_kpoints_weights,
"Actual kpoints weights cannot be read")
for atomdoses in vasprun.pdos:
for orbitaldos in atomdoses:
self.assertIsNotNone(orbitaldos, "Partial Dos cannot be read")
# test skipping ionic steps.
vasprun_skip = Vasprun(filepath, 3, parse_potcar_file=False)
self.assertEqual(vasprun_skip.nionic_steps, 29)
self.assertEqual(len(vasprun_skip.ionic_steps),
int(vasprun.nionic_steps / 3) + 1)
self.assertEqual(len(vasprun_skip.ionic_steps),
len(vasprun_skip.structures))
self.assertEqual(len(vasprun_skip.ionic_steps),
int(vasprun.nionic_steps / 3) + 1)
# Check that nionic_steps is preserved no matter what.
self.assertEqual(vasprun_skip.nionic_steps,
vasprun.nionic_steps)
self.assertNotAlmostEqual(vasprun_skip.final_energy,
vasprun.final_energy)
# Test with ionic_step_offset
vasprun_offset = Vasprun(filepath, 3, 6, parse_potcar_file=False)
self.assertEqual(len(vasprun_offset.ionic_steps),
int(len(vasprun.ionic_steps) / 3) - 1)
self.assertEqual(vasprun_offset.structures[0],
vasprun_skip.structures[2])
self.assertTrue(vasprun_ggau.is_hubbard)
self.assertEqual(vasprun_ggau.hubbards["Fe"], 4.3)
self.assertAlmostEqual(vasprun_ggau.projected_eigenvalues[Spin.up][
0][0][96][0], 0.0032)
d = vasprun_ggau.as_dict()
self.assertEqual(d["elements"], ["Fe", "Li", "O", "P"])
self.assertEqual(d["nelements"], 4)
filepath = os.path.join(test_dir, 'vasprun.xml.unconverged')
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
vasprun_unconverged = Vasprun(filepath, parse_potcar_file=False)
# Verify some things
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[-1].category,
UnconvergedVASPWarning))
self.assertTrue(vasprun_unconverged.converged_ionic)
self.assertFalse(vasprun_unconverged.converged_electronic)
self.assertFalse(vasprun_unconverged.converged)
filepath = os.path.join(test_dir, 'vasprun.xml.dfpt')
vasprun_dfpt = Vasprun(filepath, parse_potcar_file=False)
self.assertAlmostEqual(vasprun_dfpt.epsilon_static[0][0], 3.26105533)
self.assertAlmostEqual(vasprun_dfpt.epsilon_static[0][1], -0.00459066)
self.assertAlmostEqual(vasprun_dfpt.epsilon_static[2][2], 3.24330517)
self.assertAlmostEqual(vasprun_dfpt.epsilon_static_wolfe[0][0],
3.33402531)
self.assertAlmostEqual(vasprun_dfpt.epsilon_static_wolfe[0][1],
-0.00559998)
self.assertAlmostEqual(vasprun_dfpt.epsilon_static_wolfe[2][2],
3.31237357)
self.assertTrue(vasprun_dfpt.converged)
entry = vasprun_dfpt.get_computed_entry()
entry = MaterialsProjectCompatibility(
check_potcar_hash=False).process_entry(entry)
self.assertAlmostEqual(entry.uncorrected_energy + entry.correction,
entry.energy)
filepath = os.path.join(test_dir, 'vasprun.xml.dfpt.ionic')
vasprun_dfpt_ionic = Vasprun(filepath, parse_potcar_file=False)
self.assertAlmostEqual(vasprun_dfpt_ionic.epsilon_ionic[0][0],
515.73485838)
self.assertAlmostEqual(vasprun_dfpt_ionic.epsilon_ionic[0][1],
-0.00263523)
self.assertAlmostEqual(vasprun_dfpt_ionic.epsilon_ionic[2][2],
19.02110169)
filepath = os.path.join(test_dir, 'vasprun.xml.dfpt.unconverged')
vasprun_dfpt_unconv = Vasprun(filepath, parse_potcar_file=False)
self.assertFalse(vasprun_dfpt_unconv.converged_electronic)
self.assertTrue(vasprun_dfpt_unconv.converged_ionic)
self.assertFalse(vasprun_dfpt_unconv.converged)
vasprun_uniform = Vasprun(os.path.join(test_dir, "vasprun.xml.uniform"),
parse_potcar_file=False)
self.assertEqual(vasprun_uniform.kpoints.style,
Kpoints.supported_modes.Reciprocal)
vasprun_no_pdos = Vasprun(os.path.join(test_dir, "Li_no_projected.xml"),
parse_potcar_file=False)
self.assertIsNotNone(vasprun_no_pdos.complete_dos)
self.assertFalse(vasprun_no_pdos.dos_has_errors)
vasprun_diel = Vasprun(os.path.join(test_dir, "vasprun.xml.dielectric"),
parse_potcar_file=False)
self.assertAlmostEqual(0.4294, vasprun_diel.dielectric[0][10])
self.assertAlmostEqual(19.941, vasprun_diel.dielectric[1][51][0])
self.assertAlmostEqual(19.941, vasprun_diel.dielectric[1][51][1])
self.assertAlmostEqual(19.941, vasprun_diel.dielectric[1][51][2])
self.assertAlmostEqual(0.0, vasprun_diel.dielectric[1][51][3])
self.assertAlmostEqual(34.186, vasprun_diel.dielectric[2][85][0])
self.assertAlmostEqual(34.186, vasprun_diel.dielectric[2][85][1])
self.assertAlmostEqual(34.186, vasprun_diel.dielectric[2][85][2])
self.assertAlmostEqual(0.0, vasprun_diel.dielectric[2][85][3])
v = Vasprun(os.path.join(test_dir, "vasprun.xml.indirect.gz"))
(gap, cbm, vbm, direct) = v.eigenvalue_band_properties
self.assertFalse(direct)
vasprun_optical = Vasprun(
os.path.join(test_dir, "vasprun.xml.opticaltransitions"),
parse_potcar_file=False)
self.assertAlmostEqual(3.084, vasprun_optical.optical_transition[0][0])
self.assertAlmostEqual(3.087, vasprun_optical.optical_transition[3][0])
self.assertAlmostEqual(0.001, vasprun_optical.optical_transition[0][1])
self.assertAlmostEqual(0.001, vasprun_optical.optical_transition[1][1])
self.assertAlmostEqual(0.001, vasprun_optical.optical_transition[7][1])
self.assertAlmostEqual(0.001, vasprun_optical.optical_transition[19][1])
self.assertAlmostEqual(3.3799999999,
vasprun_optical.optical_transition[54][0])
self.assertAlmostEqual(3.381, vasprun_optical.optical_transition[55][0])
self.assertAlmostEqual(3.381, vasprun_optical.optical_transition[56][0])
self.assertAlmostEqual(10554.9860,
vasprun_optical.optical_transition[54][1])
self.assertAlmostEqual(0.0, vasprun_optical.optical_transition[55][1])
self.assertAlmostEqual(0.001, vasprun_optical.optical_transition[56][1])
def test_force_constants(self):
vasprun_fc = Vasprun(os.path.join(test_dir, "vasprun.xml.dfpt.phonon"),
parse_potcar_file=False)
fc_ans = [[-0.00184451, -0., -0.],
[-0., -0.00933824, -0.03021279],
[-0., -0.03021279, 0.01202547]]
nm_ans = [[0.0884346, -0.08837289, -0.24995639],
[-0.0884346, 0.08837289, 0.24995639],
[0.15306645, -0.05105771, -0.14441306],
[-0.15306645, 0.05105771, 0.14441306],
[-0.0884346, 0.08837289, 0.24995639],
[0.0884346, -0.08837289, -0.24995639],
[-0.15306645, 0.05105771, 0.14441306],
[0.15306645, -0.05105771, -0.14441306],
[-0.0884346, 0.08837289, 0.24995639],
[0.0884346, -0.08837289, -0.24995639],
[-0.15306645, 0.05105771, 0.14441306],
[0.15306645, -0.05105771, -0.14441306],
[0.0884346, -0.08837289, -0.24995639],
[-0.0884346, 0.08837289, 0.24995639],
[0.15306645, -0.05105771, -0.14441306],
[-0.15306645, 0.05105771, 0.14441306]]
nm_eigenval_ans = [-0.59067079, -0.59067079, -0.59067003, -0.59067003,
-0.59067003, -0.59067003, -0.585009, -0.585009,
-0.58500895, -0.58500883, -0.5062956, -0.5062956]
self.assertEqual(vasprun_fc.force_constants.shape, (16, 16, 3, 3))
self.assertTrue(np.allclose(vasprun_fc.force_constants[8, 9], fc_ans))
self.assertEqual(vasprun_fc.normalmode_eigenvals.size, 48)
self.assertTrue(np.allclose(vasprun_fc.normalmode_eigenvals[17:29],
nm_eigenval_ans))
self.assertEqual(vasprun_fc.normalmode_eigenvecs.shape, (48, 16, 3))
self.assertTrue(
np.allclose(vasprun_fc.normalmode_eigenvecs[33], nm_ans))
def test_Xe(self):
vr = Vasprun(os.path.join(test_dir, 'vasprun.xml.xe'),
parse_potcar_file=False)
self.assertEqual(vr.atomic_symbols, ['Xe'])
def test_invalid_element(self):
self.assertRaises(ValueError, Vasprun,
os.path.join(test_dir, 'vasprun.xml.wrong_sp'))
def test_selective_dynamics(self):
vsd = Vasprun(os.path.join(test_dir, 'vasprun.xml.indirect.gz'))
np.testing.assert_array_equal(
vsd.final_structure.site_properties.get('selective_dynamics'),
[[True] * 3, [False] * 3], "Selective dynamics parsing error")
def test_as_dict(self):
filepath = os.path.join(test_dir, 'vasprun.xml')
vasprun = Vasprun(filepath,
parse_potcar_file=False)
# Test that as_dict() is json-serializable
self.assertIsNotNone(json.dumps(vasprun.as_dict()))
self.assertEqual(
vasprun.as_dict()["input"]["potcar_type"],
['PAW_PBE', 'PAW_PBE', 'PAW_PBE', 'PAW_PBE', 'PAW_PBE'])
def test_get_band_structure(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
filepath = os.path.join(test_dir, 'vasprun_Si_bands.xml')
vasprun = Vasprun(filepath,
parse_projected_eigen=True,
parse_potcar_file=False)
bs = vasprun.get_band_structure(kpoints_filename=
os.path.join(test_dir,
'KPOINTS_Si_bands'))
cbm = bs.get_cbm()
vbm = bs.get_vbm()
self.assertEqual(cbm['kpoint_index'], [13],
"wrong cbm kpoint index")
self.assertAlmostEqual(cbm['energy'], 6.2301, "wrong cbm energy")
self.assertEqual(cbm['band_index'], {Spin.up: [4], Spin.down: [4]},
"wrong cbm bands")
self.assertEqual(vbm['kpoint_index'], [0, 63, 64])
self.assertAlmostEqual(vbm['energy'], 5.6158, "wrong vbm energy")
self.assertEqual(vbm['band_index'], {Spin.up: [1, 2, 3],
Spin.down: [1, 2, 3]},
"wrong vbm bands")
self.assertEqual(vbm['kpoint'].label, "\\Gamma", "wrong vbm label")
self.assertEqual(cbm['kpoint'].label, None, "wrong cbm label")
projected = bs.get_projection_on_elements()
self.assertAlmostEqual(projected[Spin.up][0][0]["Si"], 0.4238)
projected = bs.get_projections_on_elements_and_orbitals(
{"Si": ["s"]})
self.assertAlmostEqual(projected[Spin.up][0][0]["Si"]["s"], 0.4238)
def test_sc_step_overflow(self):
filepath = os.path.join(test_dir, 'vasprun.xml.sc_overflow')
# with warnings.catch_warnings(record=True) as w:
# warnings.simplefilter("always")
# vasprun = Vasprun(filepath)
# self.assertEqual(len(w), 3)
vasprun = Vasprun(filepath)
estep = vasprun.ionic_steps[0]['electronic_steps'][29]
self.assertTrue(np.isnan(estep['e_wo_entrp']))
def test_update_potcar(self):
filepath = os.path.join(test_dir, 'vasprun.xml')
potcar_path = os.path.join(test_dir, 'POTCAR.LiFePO4.gz')
potcar_path2 = os.path.join(test_dir, 'POTCAR2.LiFePO4.gz')
vasprun = Vasprun(filepath, parse_potcar_file=False)
self.assertEqual(vasprun.potcar_spec,
[{"titel": "PAW_PBE Li 17Jan2003", "hash": None},
{"titel": "PAW_PBE Fe 06Sep2000", "hash": None},
{"titel": "PAW_PBE Fe 06Sep2000", "hash": None},
{"titel": "PAW_PBE P 17Jan2003", "hash": None},
{"titel": "PAW_PBE O 08Apr2002", "hash": None}])
vasprun.update_potcar_spec(potcar_path)
self.assertEqual(vasprun.potcar_spec, [{"titel": "PAW_PBE Li 17Jan2003",
"hash": "65e83282d1707ec078c1012afbd05be8"},
{"titel": "PAW_PBE Fe 06Sep2000",
"hash": "9530da8244e4dac17580869b4adab115"},
{"titel": "PAW_PBE Fe 06Sep2000",
"hash": "9530da8244e4dac17580869b4adab115"},
{"titel": "PAW_PBE P 17Jan2003",
"hash": "7dc3393307131ae67785a0cdacb61d5f"},
{"titel": "PAW_PBE O 08Apr2002",
"hash": "7a25bc5b9a5393f46600a4939d357982"}])
vasprun2 = Vasprun(filepath, parse_potcar_file=False)
self.assertRaises(ValueError, vasprun2.update_potcar_spec, potcar_path2)
vasprun = Vasprun(filepath, parse_potcar_file=potcar_path)
self.assertEqual(vasprun.potcar_spec, [{"titel": "PAW_PBE Li 17Jan2003",
"hash": "65e83282d1707ec078c1012afbd05be8"},
{"titel": "PAW_PBE Fe 06Sep2000",
"hash": "9530da8244e4dac17580869b4adab115"},
{"titel": "PAW_PBE Fe 06Sep2000",
"hash": "9530da8244e4dac17580869b4adab115"},
{"titel": "PAW_PBE P 17Jan2003",
"hash": "7dc3393307131ae67785a0cdacb61d5f"},
{"titel": "PAW_PBE O 08Apr2002",
"hash": "7a25bc5b9a5393f46600a4939d357982"}])
self.assertRaises(ValueError, Vasprun, filepath,
parse_potcar_file=potcar_path2)
def test_search_for_potcar(self):
filepath = os.path.join(test_dir, 'vasprun.xml')
vasprun = Vasprun(filepath, parse_potcar_file=True)
self.assertEqual(vasprun.potcar_spec, [{"titel": "PAW_PBE Li 17Jan2003",
"hash": "65e83282d1707ec078c1012afbd05be8"},
{"titel": "PAW_PBE Fe 06Sep2000",
"hash": "9530da8244e4dac17580869b4adab115"},
{"titel": "PAW_PBE Fe 06Sep2000",
"hash": "9530da8244e4dac17580869b4adab115"},
{"titel": "PAW_PBE P 17Jan2003",
"hash": "7dc3393307131ae67785a0cdacb61d5f"},
{"titel": "PAW_PBE O 08Apr2002",
"hash": "7a25bc5b9a5393f46600a4939d357982"}])
def test_potcar_not_found(self):
filepath = os.path.join(test_dir, 'vasprun.xml')
# Ensure no potcar is found and nothing is updated
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
vasprun = Vasprun(filepath, parse_potcar_file='.')
self.assertEqual(len(w), 2)
self.assertEqual(vasprun.potcar_spec, [{"titel": "PAW_PBE Li 17Jan2003", "hash": None},
{"titel": "PAW_PBE Fe 06Sep2000", "hash": None},
{"titel": "PAW_PBE Fe 06Sep2000", "hash": None},
{"titel": "PAW_PBE P 17Jan2003", "hash": None},
{"titel": "PAW_PBE O 08Apr2002", "hash": None}])
def test_parsing_chemical_shift_calculations(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
filepath = os.path.join(test_dir, "nmr", "cs", "basic",
'vasprun.xml.chemical_shift.scstep')
vasprun = Vasprun(filepath)
nestep = len(vasprun.ionic_steps[-1]['electronic_steps'])
self.assertEqual(nestep, 10)
self.assertTrue(vasprun.converged)
def test_charged_structure(self):
vpath = os.path.join(test_dir, 'vasprun.charged.xml')
potcar_path = os.path.join(test_dir, 'POT_GGA_PAW_PBE', 'POTCAR.Si.gz')
vasprun = Vasprun(vpath, parse_potcar_file=False)
vasprun.update_charge_from_potcar(potcar_path)
self.assertEqual(vasprun.parameters.get("NELECT", 8), 9)
self.assertEqual(vasprun.structures[0].charge, 1)
class OutcarTest(unittest.TestCase):
def test_init(self):
for f in ['OUTCAR', 'OUTCAR.gz']:
filepath = os.path.join(test_dir, f)
outcar = Outcar(filepath)
expected_mag = ({'d': 0.0, 'p': 0.003, 's': 0.002, 'tot': 0.005},
{'d': 0.798, 'p': 0.008, 's': 0.007, 'tot': 0.813},
{'d': 0.798, 'p': 0.008, 's': 0.007, 'tot': 0.813},
{'d': 0.0, 'p': -0.117, 's': 0.005, 'tot': -0.112},
{'d': 0.0, 'p': -0.165, 's': 0.004, 'tot': -0.162},
{'d': 0.0, 'p': -0.117, 's': 0.005, 'tot': -0.112},
{'d': 0.0, 'p': -0.165, 's': 0.004, 'tot': -0.162})
expected_chg = ({'p': 0.154, 's': 0.078, 'd': 0.0, 'tot': 0.232},
{'p': 0.707, 's': 0.463, 'd': 8.316, 'tot': 9.486},
{'p': 0.707, 's': 0.463, 'd': 8.316, 'tot': 9.486},
{'p': 3.388, 's': 1.576, 'd': 0.0, 'tot': 4.964},
{'p': 3.365, 's': 1.582, 'd': 0.0, 'tot': 4.947},
{'p': 3.388, 's': 1.576, 'd': 0.0, 'tot': 4.964},
{'p': 3.365, 's': 1.582, 'd': 0.0, 'tot': 4.947})
self.assertAlmostEqual(outcar.magnetization, expected_mag, 5,
"Wrong magnetization read from Outcar")
self.assertAlmostEqual(outcar.charge, expected_chg, 5,
"Wrong charge read from Outcar")
self.assertFalse(outcar.is_stopped)
self.assertEqual(outcar.run_stats, {'System time (sec)': 0.938,
'Total CPU time used (sec)': 545.142,
'Elapsed time (sec)': 546.709,
'Maximum memory used (kb)': 0.0,
'Average memory used (kb)': 0.0,
'User time (sec)': 544.204,
'cores': '8'})
self.assertAlmostEqual(outcar.efermi, 2.0112)
self.assertAlmostEqual(outcar.nelect, 44.9999991)
self.assertAlmostEqual(outcar.total_mag, 0.9999998)
self.assertIsNotNone(outcar.as_dict())
self.assertFalse(outcar.lepsilon)
filepath = os.path.join(test_dir, 'OUTCAR.stopped')
outcar = Outcar(filepath)
self.assertTrue(outcar.is_stopped)
for f in ['OUTCAR.lepsilon', 'OUTCAR.lepsilon.gz']:
filepath = os.path.join(test_dir, f)
outcar = Outcar(filepath)
self.assertTrue(outcar.lepsilon)
self.assertAlmostEqual(outcar.dielectric_tensor[0][0], 3.716432)
self.assertAlmostEqual(outcar.dielectric_tensor[0][1], -0.20464)
self.assertAlmostEqual(outcar.dielectric_tensor[1][2], -0.20464)
self.assertAlmostEqual(outcar.dielectric_ionic_tensor[0][0],
0.001419)
self.assertAlmostEqual(outcar.dielectric_ionic_tensor[0][2],
0.001419)
self.assertAlmostEqual(outcar.dielectric_ionic_tensor[2][2],
0.001419)
self.assertAlmostEqual(outcar.piezo_tensor[0][0], 0.52799)
self.assertAlmostEqual(outcar.piezo_tensor[1][3], 0.35998)
self.assertAlmostEqual(outcar.piezo_tensor[2][5], 0.35997)
self.assertAlmostEqual(outcar.piezo_ionic_tensor[0][0], 0.05868)
self.assertAlmostEqual(outcar.piezo_ionic_tensor[1][3], 0.06241)
self.assertAlmostEqual(outcar.piezo_ionic_tensor[2][5], 0.06242)
self.assertAlmostEqual(outcar.born[0][1][2], -0.385)
self.assertAlmostEqual(outcar.born[1][2][0], 0.36465)
filepath = os.path.join(test_dir, 'OUTCAR.NiO_SOC.gz')
outcar = Outcar(filepath)
expected_mag = (
{'s': Magmom([0.0, 0.0, -0.001]), 'p': Magmom([0.0, 0.0, -0.003]),
'd': Magmom([0.0, 0.0, 1.674]), 'tot': Magmom([0.0, 0.0, 1.671])},
{'s': Magmom([0.0, 0.0, 0.001]), 'p': Magmom([0.0, 0.0, 0.003]),
'd': Magmom([0.0, 0.0, -1.674]),
'tot': Magmom([0.0, 0.0, -1.671])},
{'s': Magmom([0.0, 0.0, 0.0]), 'p': Magmom([0.0, 0.0, 0.0]),
'd': Magmom([0.0, 0.0, 0.0]), 'tot': Magmom([0.0, 0.0, 0.0])},
{'s': Magmom([0.0, 0.0, 0.0]), 'p': Magmom([0.0, 0.0, 0.0]),
'd': Magmom([0.0, 0.0, 0.0]), 'tot': Magmom([0.0, 0.0, 0.0])}
)
# test note: Magmom class uses np.allclose() when testing for equality
# so fine to use assertEqual here
self.assertEqual(outcar.magnetization, expected_mag,
"Wrong vector magnetization read from Outcar for SOC calculation")
def test_polarization(self):
filepath = os.path.join(test_dir, "OUTCAR.BaTiO3.polar")
outcar = Outcar(filepath)
self.assertEqual(outcar.spin, True)
self.assertEqual(outcar.noncollinear, False)
self.assertAlmostEqual(outcar.p_ion[0], 0.0)
self.assertAlmostEqual(outcar.p_ion[1], 0.0)
self.assertAlmostEqual(outcar.p_ion[2], -5.56684)
self.assertAlmostEqual(outcar.p_sp1[0], 2.00068)
self.assertAlmostEqual(outcar.p_sp2[0], -2.00044)
self.assertAlmostEqual(outcar.p_elec[0], 0.00024)
self.assertAlmostEqual(outcar.p_elec[1], 0.00019)
self.assertAlmostEqual(outcar.p_elec[2], 3.61674)
def test_pseudo_zval(self):
filepath = os.path.join(test_dir, "OUTCAR.BaTiO3.polar")
outcar = Outcar(filepath)
self.assertDictEqual({'Ba': 10.00, 'Ti': 10.00, 'O': 6.00},
outcar.zval_dict)
def test_dielectric(self):
filepath = os.path.join(test_dir, "OUTCAR.dielectric")
outcar = Outcar(filepath)
outcar.read_corrections()
self.assertAlmostEqual(outcar.data["dipol_quadrupol_correction"],
0.03565)
self.assertAlmostEqual(outcar.final_energy, -797.46760559)
def test_freq_dielectric(self):
filepath = os.path.join(test_dir, "OUTCAR.LOPTICS")
outcar = Outcar(filepath)
outcar.read_freq_dielectric()
self.assertAlmostEqual(outcar.frequencies[0], 0)
self.assertAlmostEqual(outcar.frequencies[-1], 39.826101)
self.assertAlmostEqual(outcar.dielectric_tensor_function[0][0, 0],
8.96938800)
self.assertAlmostEqual(outcar.dielectric_tensor_function[-1][0, 0],
7.36167000e-01 + 1.53800000e-03j)
self.assertEqual(len(outcar.frequencies),
len(outcar.dielectric_tensor_function))
np.testing.assert_array_equal(outcar.dielectric_tensor_function[0],
outcar.dielectric_tensor_function[
0].transpose())
def test_freq_dielectric_vasp544(self):
filepath = os.path.join(test_dir, "OUTCAR.LOPTICS.vasp544")
outcar = Outcar(filepath)
outcar.read_freq_dielectric()
self.assertAlmostEqual(outcar.frequencies[0], 0)
self.assertAlmostEqual(outcar.frequencies[-1], 39.63964)
self.assertAlmostEqual(outcar.dielectric_tensor_function[0][0, 0],
12.769435 + 0j)
self.assertAlmostEqual(outcar.dielectric_tensor_function[-1][0, 0],
0.828615 + 0.016594j)
self.assertEqual(len(outcar.frequencies),
len(outcar.dielectric_tensor_function))
np.testing.assert_array_equal(outcar.dielectric_tensor_function[0],
outcar.dielectric_tensor_function[
0].transpose())
def test_read_elastic_tensor(self):
filepath = os.path.join(test_dir, "OUTCAR.total_tensor.Li2O.gz")
outcar = Outcar(filepath)
outcar.read_elastic_tensor()
self.assertAlmostEqual(outcar.data["elastic_tensor"][0][0], 1986.3391)
self.assertAlmostEqual(outcar.data["elastic_tensor"][0][1], 187.8324)
self.assertAlmostEqual(outcar.data["elastic_tensor"][3][3], 586.3034)
def test_read_piezo_tensor(self):
filepath = os.path.join(test_dir, "OUTCAR.lepsilon.gz")
outcar = Outcar(filepath)
outcar.read_piezo_tensor()
self.assertAlmostEqual(outcar.data["piezo_tensor"][0][0], 0.52799)
self.assertAlmostEqual(outcar.data["piezo_tensor"][1][3], 0.35998)
self.assertAlmostEqual(outcar.data["piezo_tensor"][2][5], 0.35997)
def test_core_state_eigen(self):
filepath = os.path.join(test_dir, "OUTCAR.CL")
cl = Outcar(filepath).read_core_state_eigen()
self.assertAlmostEqual(cl[6]["2s"][-1], -174.4779)
filepath = os.path.join(test_dir, "OUTCAR.icorelevel")
cl = Outcar(filepath).read_core_state_eigen()
self.assertAlmostEqual(cl[4]["3d"][-1], -31.4522)
def test_avg_core_poten(self):
filepath = os.path.join(test_dir, "OUTCAR.lepsilon")
cp = Outcar(filepath).read_avg_core_poten()
self.assertAlmostEqual(cp[-1][1], -90.0487)
filepath = os.path.join(test_dir, "OUTCAR")
cp = Outcar(filepath).read_avg_core_poten()
self.assertAlmostEqual(cp[0][6], -73.1068)
def test_single_atom(self):
filepath = os.path.join(test_dir, "OUTCAR.Al")
outcar = Outcar(filepath)
expected_mag = ({u'p': 0.0, u's': 0.0, u'd': 0.0, u'tot': 0.0},)
expected_chg = ({u'p': 0.343, u's': 0.425, u'd': 0.0, u'tot': 0.768},)
self.assertAlmostEqual(outcar.magnetization, expected_mag)
self.assertAlmostEqual(outcar.charge, expected_chg)
self.assertFalse(outcar.is_stopped)
self.assertEqual(outcar.run_stats, {'System time (sec)': 0.592,
'Total CPU time used (sec)': 50.194,
'Elapsed time (sec)': 52.337,
'Maximum memory used (kb)': 62900.0,
'Average memory used (kb)': 0.0,
'User time (sec)': 49.602,
'cores': '32'})
self.assertAlmostEqual(outcar.efermi, 8.0942)
self.assertAlmostEqual(outcar.nelect, 3)
self.assertAlmostEqual(outcar.total_mag, 8.2e-06)
self.assertIsNotNone(outcar.as_dict())
def test_chemical_shifts(self):
filename = os.path.join(test_dir, "nmr", "cs", "core.diff",
"hydromagnesite", "OUTCAR")
outcar = Outcar(filename)
outcar.read_chemical_shifts()
expected_chemical_shifts = [[191.9974, 69.5232, 0.6342],
[195.0808, 68.183, 0.833],
[192.0389, 69.5762, 0.6329],
[195.0844, 68.1756, 0.8336],
[192.005, 69.5289, 0.6339],
[195.0913, 68.1859, 0.833],
[192.0237, 69.565, 0.6333],
[195.0788, 68.1733, 0.8337]]
self.assertAlmostEqual(
len(outcar.data["chemical_shifts"]["valence_only"][20: 28]),
len(expected_chemical_shifts))
for c1, c2 in zip(
outcar.data["chemical_shifts"]["valence_only"][20: 28],
expected_chemical_shifts):
for x1, x2 in zip(list(c1.maryland_values), c2):
self.assertAlmostEqual(x1, x2, places=5)
def test_chemical_shifts_with_different_core_contribution(self):
filename = os.path.join(test_dir, "nmr", "cs", "core.diff",
"core.diff.chemical.shifts.OUTCAR")
outcar = Outcar(filename)
outcar.read_chemical_shifts()
c_vo = outcar.data["chemical_shifts"]["valence_only"][7].maryland_values
for x1, x2 in zip(list(c_vo),
[198.7009, 73.7484, 1.0000]):
self.assertAlmostEqual(x1, x2)
c_vc = outcar.data["chemical_shifts"]["valence_and_core"][
7].maryland_values
for x1, x2 in zip(list(c_vc),
[-1.9406, 73.7484, 1.0000]):
self.assertAlmostEqual(x1, x2)
def test_cs_raw_tensors(self):
filename = os.path.join(test_dir, "nmr", "cs", "core.diff",
"core.diff.chemical.shifts.OUTCAR")
outcar = Outcar(filename)
unsym_tensors = outcar.read_cs_raw_symmetrized_tensors()
self.assertEqual(unsym_tensors[0],
[[-145.814605, -4.263425, 0.000301],
[4.263434, -145.812238, -8.7e-05],
[0.000136, -0.000189, -142.794068]])
self.assertEqual(unsym_tensors[29],
[[287.789318, -53.799325, 30.900024],
[-53.799571, 225.668117, -17.839598],
[3.801103, -2.195218, 88.896756]])
def test_cs_g0_contribution(self):
filename = os.path.join(test_dir, "nmr", "cs", "core.diff",
"core.diff.chemical.shifts.OUTCAR")
outcar = Outcar(filename)
g0_contrib = outcar.read_cs_g0_contribution()
self.assertEqual(g0_contrib,
[[-8.773535, 9e-06, 1e-06],
[1.7e-05, -8.773536, -0.0792],
[-6e-06, -0.008328, -9.320237]])
def test_cs_core_contribution(self):
filename = os.path.join(test_dir, "nmr", "cs", "core.diff",
"core.diff.chemical.shifts.OUTCAR")
outcar = Outcar(filename)
core_contrib = outcar.read_cs_core_contribution()
self.assertEqual(core_contrib,
{'Mg': -412.8248405,
'C': -200.5098812,
'O': -271.0766979})
def test_nmr_efg(self):
filename = os.path.join(test_dir, "nmr", "efg", "AlPO4", "OUTCAR")
outcar = Outcar(filename)
outcar.read_nmr_efg()
expected_efg = [
{'eta': 0.465, 'nuclear_quadrupole_moment': 146.6, 'cq': -5.573},
{'eta': 0.465, 'nuclear_quadrupole_moment': 146.6, 'cq': -5.573},
{'eta': 0.137, 'nuclear_quadrupole_moment': 146.6, 'cq': 6.327},
{'eta': 0.137, 'nuclear_quadrupole_moment': 146.6, 'cq': 6.327},
{'eta': 0.112, 'nuclear_quadrupole_moment': 146.6, 'cq': -7.453},
{'eta': 0.112, 'nuclear_quadrupole_moment': 146.6, 'cq': -7.453},
{'eta': 0.42, 'nuclear_quadrupole_moment': 146.6, 'cq': -5.58},
{'eta': 0.42, 'nuclear_quadrupole_moment': 146.6, 'cq': -5.58}]
self.assertEqual(len(outcar.data["efg"][2:10]), len(expected_efg))
for e1, e2 in zip(outcar.data["efg"][2:10], expected_efg):
for k in e1.keys():
self.assertAlmostEqual(e1[k], e2[k], places=5)
def test_read_fermi_contact_shift(self):
filepath = os.path.join(test_dir, "OUTCAR_fc")
outcar = Outcar(filepath)
outcar.read_fermi_contact_shift()
self.assertAlmostEqual(outcar.data["fermi_contact_shift"][u'fch'][0][0],
-0.002)
self.assertAlmostEqual(outcar.data["fermi_contact_shift"][u'th'][0][0],
-0.052)
self.assertAlmostEqual(outcar.data["fermi_contact_shift"][u'dh'][0][0],
0.0)
def test_drift(self):
outcar = Outcar(os.path.join(test_dir, "OUTCAR"))
self.assertEqual(len(outcar.drift), 5)
self.assertAlmostEqual(np.sum(outcar.drift), 0)
outcar = Outcar(os.path.join(test_dir, "OUTCAR.CL"))
self.assertEqual(len(outcar.drift), 79)
self.assertAlmostEqual(np.sum(outcar.drift), 0.448010)
def test_electrostatic_potential(self):
outcar = Outcar(os.path.join(test_dir, "OUTCAR"))
self.assertEqual(outcar.ngf, [54, 30, 54])
self.assertTrue(
np.allclose(outcar.sampling_radii, [0.9748, 0.9791, 0.7215]))
self.assertTrue(np.allclose(outcar.electrostatic_potential,
[-26.0704, -45.5046, -45.5046, -72.9539,
-73.0621, -72.9539, -73.0621]))
class BSVasprunTest(unittest.TestCase):
def test_get_band_structure(self):
filepath = os.path.join(test_dir, 'vasprun_Si_bands.xml')
vasprun = BSVasprun(filepath, parse_potcar_file=False)
bs = vasprun.get_band_structure(kpoints_filename=
os.path.join(test_dir,
'KPOINTS_Si_bands'))
cbm = bs.get_cbm()
vbm = bs.get_vbm()
self.assertEqual(cbm['kpoint_index'], [13], "wrong cbm kpoint index")
self.assertAlmostEqual(cbm['energy'], 6.2301, "wrong cbm energy")
self.assertEqual(cbm['band_index'], {Spin.up: [4], Spin.down: [4]},
"wrong cbm bands")
self.assertEqual(vbm['kpoint_index'], [0, 63, 64])
self.assertAlmostEqual(vbm['energy'], 5.6158, "wrong vbm energy")
self.assertEqual(vbm['band_index'], {Spin.up: [1, 2, 3],
Spin.down: [1, 2, 3]},
"wrong vbm bands")
self.assertEqual(vbm['kpoint'].label, "\\Gamma", "wrong vbm label")
self.assertEqual(cbm['kpoint'].label, None, "wrong cbm label")
d = vasprun.as_dict()
self.assertIn("eigenvalues", d["output"])
class OszicarTest(unittest.TestCase):
def test_init(self):
filepath = os.path.join(test_dir, 'OSZICAR')
oszicar = Oszicar(filepath)
self.assertEqual(len(oszicar.electronic_steps),
len(oszicar.ionic_steps))
self.assertEqual(len(oszicar.all_energies), 60)
self.assertAlmostEqual(oszicar.final_energy, -526.63928)
class LocpotTest(unittest.TestCase):
def test_init(self):
filepath = os.path.join(test_dir, 'LOCPOT')
locpot = Locpot.from_file(filepath)
self.assertAlmostEqual(-217.05226954,
sum(locpot.get_average_along_axis(0)))
self.assertAlmostEqual(locpot.get_axis_grid(0)[-1], 2.87629, 2)
self.assertAlmostEqual(locpot.get_axis_grid(1)[-1], 2.87629, 2)
self.assertAlmostEqual(locpot.get_axis_grid(2)[-1], 2.87629, 2)
class ChgcarTest(PymatgenTest):
def test_init(self):
filepath = os.path.join(test_dir, 'CHGCAR.nospin')
chg = Chgcar.from_file(filepath)
self.assertAlmostEqual(chg.get_integrated_diff(0, 2)[0, 1], 0)
filepath = os.path.join(test_dir, 'CHGCAR.spin')
chg = Chgcar.from_file(filepath)
self.assertAlmostEqual(chg.get_integrated_diff(0, 1)[0, 1],
-0.0043896932237534022)
# test sum
chg += chg
self.assertAlmostEqual(chg.get_integrated_diff(0, 1)[0, 1],
-0.0043896932237534022 * 2)
filepath = os.path.join(test_dir, 'CHGCAR.Fe3O4')
chg = Chgcar.from_file(filepath)
ans = [1.56472768, 3.25985108, 3.49205728, 3.66275028, 3.8045896,
5.10813352]
myans = chg.get_integrated_diff(0, 3, 6)
self.assertTrue(np.allclose(myans[:, 1], ans))
def test_write(self):
filepath = os.path.join(test_dir, 'CHGCAR.spin')
chg = Chgcar.from_file(filepath)
chg.write_file("CHGCAR_pmg")
with open("CHGCAR_pmg") as f:
for i, line in enumerate(f):
if i == 22130:
self.assertEqual("augmentation occupancies 1 15\n", line)
if i == 44255:
self.assertEqual("augmentation occupancies 1 15\n", line)
os.remove("CHGCAR_pmg")
def test_soc_chgcar(self):
filepath = os.path.join(test_dir, "CHGCAR.NiO_SOC.gz")
chg = Chgcar.from_file(filepath)
self.assertEqual(set(chg.data.keys()),
{'total', 'diff_x', 'diff_y', 'diff_z', 'diff'})
self.assertTrue(chg.is_soc)
self.assertEqual(chg.data['diff'].shape, chg.data['diff_y'].shape)
# check our construction of chg.data['diff'] makes sense
# this has been checked visually too and seems reasonable
self.assertEqual(abs(chg.data['diff'][0][0][0]),
np.linalg.norm([chg.data['diff_x'][0][0][0],
chg.data['diff_y'][0][0][0],
chg.data['diff_z'][0][0][0]]))
# and that the net magnetization is about zero
# note: we get ~ 0.08 here, seems a little high compared to
# vasp output, but might be due to chgcar limitations?
self.assertAlmostEqual(chg.net_magnetization, 0.0, places=0)
chg.write_file("CHGCAR_pmg_soc")
chg_from_file = Chgcar.from_file("CHGCAR_pmg_soc")
self.assertTrue(chg_from_file.is_soc)
os.remove("CHGCAR_pmg_soc")
def test_hdf5(self):
chgcar = Chgcar.from_file(os.path.join(test_dir, "CHGCAR.NiO_SOC.gz"))
chgcar.to_hdf5("chgcar_test.hdf5")
import h5py
with h5py.File("chgcar_test.hdf5", "r") as f:
self.assertArrayAlmostEqual(np.array(f["vdata"]["total"]),
chgcar.data["total"])
self.assertArrayAlmostEqual(np.array(f["vdata"]["diff"]),
chgcar.data["diff"])
self.assertArrayAlmostEqual(np.array(f["lattice"]),
chgcar.structure.lattice.matrix)
self.assertArrayAlmostEqual(np.array(f["fcoords"]),
chgcar.structure.frac_coords)
for z in f["Z"]:
self.assertIn(z, [Element.Ni.Z, Element.O.Z])
for sp in f["species"]:
self.assertIn(sp, ["Ni", "O"])
chgcar2 = Chgcar.from_hdf5("chgcar_test.hdf5")
self.assertArrayAlmostEqual(chgcar2.data["total"],
chgcar.data["total"])
os.remove("chgcar_test.hdf5")
class ProcarTest(unittest.TestCase):
def test_init(self):
filepath = os.path.join(test_dir, 'PROCAR.simple')
p = Procar(filepath)
self.assertAlmostEqual(p.get_occupation(0, 'd')[Spin.up], 0)
self.assertAlmostEqual(p.get_occupation(0, 's')[Spin.up],
0.35381249999999997)
self.assertAlmostEqual(p.get_occupation(0, 'p')[Spin.up], 1.19540625)
self.assertRaises(ValueError, p.get_occupation, 1, 'm')
self.assertEqual(p.nbands, 10)
self.assertEqual(p.nkpoints, 10)
self.assertEqual(p.nions, 3)
lat = Lattice.cubic(3.)
s = Structure(lat, ["Li", "Na", "K"], [[0., 0., 0.],
[0.25, 0.25, 0.25],
[0.75, 0.75, 0.75]])
d = p.get_projection_on_elements(s)
self.assertAlmostEqual(d[Spin.up][2][2],
{'Na': 0.042, 'K': 0.646, 'Li': 0.042})
filepath = os.path.join(test_dir, 'PROCAR')
p = Procar(filepath)
self.assertAlmostEqual(p.get_occupation(0, 'dxy')[Spin.up],
0.96214813853000025)
self.assertAlmostEqual(p.get_occupation(0, 'dxy')[Spin.down],
0.85796295426000124)
def test_phase_factors(self):
filepath = os.path.join(test_dir, 'PROCAR.phase')
p = Procar(filepath)
self.assertAlmostEqual(p.phase_factors[Spin.up][0, 0, 0, 0],
-0.746 + 0.099j)
self.assertAlmostEqual(p.phase_factors[Spin.down][0, 0, 0, 0],
0.372 - 0.654j)
# Two Li should have same phase factor.
self.assertAlmostEqual(p.phase_factors[Spin.up][0, 0, 0, 0],
p.phase_factors[Spin.up][0, 0, 1, 0])
self.assertAlmostEqual(p.phase_factors[Spin.up][0, 0, 2, 0],
-0.053 + 0.007j)
self.assertAlmostEqual(p.phase_factors[Spin.down][0, 0, 2, 0],
0.027 - 0.047j)
class XdatcarTest(unittest.TestCase):
def test_init(self):
filepath = os.path.join(test_dir, 'XDATCAR_4')
x = Xdatcar(filepath)
structures = x.structures
self.assertEqual(len(structures), 4)
for s in structures:
self.assertEqual(s.formula, "Li2 O1")
filepath = os.path.join(test_dir, 'XDATCAR_5')
x = Xdatcar(filepath)
structures = x.structures
self.assertEqual(len(structures), 4)
for s in structures:
self.assertEqual(s.formula, "Li2 O1")
x.concatenate(os.path.join(test_dir, 'XDATCAR_4'))
self.assertEqual(len(x.structures), 8)
self.assertIsNotNone(x.get_string())
class DynmatTest(unittest.TestCase):
def test_init(self):
# nosetests pymatgen/io/vasp/tests/test_outputs.py:DynmatTest.test_init
filepath = os.path.join(test_dir, 'DYNMAT')
d = Dynmat(filepath)
self.assertEqual(d.nspecs, 2)
self.assertEqual(d.natoms, 6)
self.assertEqual(d.ndisps, 3)
self.assertTrue(np.allclose(d.masses, [63.546, 196.966]))
self.assertTrue(4 in d.data)
self.assertTrue(2 in d.data[4])
self.assertTrue(np.allclose(
d.data[4][2]['dispvec'], [0., 0.05, 0.]
))
self.assertTrue(np.allclose(
d.data[4][2]['dynmat'][3], [0.055046, -0.298080, 0.]
))
# TODO: test get_phonon_frequencies once cross-checked
class WavecarTest(unittest.TestCase):
def setUp(self):
self.w = Wavecar(os.path.join(test_dir, 'WAVECAR.N2'))
self.a = np.array([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
self.vol = np.dot(self.a[0, :], np.cross(self.a[1, :], self.a[2, :]))
self.b = np.array([np.cross(self.a[1, :], self.a[2, :]),
np.cross(self.a[2, :], self.a[0, :]),
np.cross(self.a[0, :], self.a[1, :])])
self.b = 2*np.pi*self.b/self.vol
def test_init(self):
self.assertEqual(self.w.filename, os.path.join(test_dir, 'WAVECAR.N2'))
self.assertAlmostEqual(self.w.efermi, -5.7232, places=4)
self.assertEqual(self.w.encut, 25)
self.assertEqual(self.w.nb, 9)
self.assertEqual(self.w.nk, 1)
self.assertTrue(np.allclose(self.w.a, self.a))
self.assertTrue(np.allclose(self.w.b, self.b))
self.assertAlmostEqual(self.w.vol, self.vol)
self.assertEqual(len(self.w.kpoints), self.w.nk)
self.assertEqual(len(self.w.coeffs), self.w.nk)
self.assertEqual(len(self.w.coeffs[0]), self.w.nb)
self.assertEqual(len(self.w.band_energy), self.w.nk)
self.assertEqual(self.w.band_energy[0].shape, (self.w.nb, 3))
self.assertLessEqual(len(self.w.Gpoints[0]), 257)
for k in range(self.w.nk):
for b in range(self.w.nb):
self.assertEqual(len(self.w.coeffs[k][b]),
len(self.w.Gpoints[k]))
with self.assertRaises(ValueError):
Wavecar(os.path.join(test_dir, 'WAVECAR.N2.malformed'))
import sys
from io import StringIO
saved_stdout = sys.stdout
try:
out = StringIO()
sys.stdout = out
Wavecar(os.path.join(test_dir, 'WAVECAR.N2'), verbose=True)
self.assertNotEqual(out.getvalue().strip(), '')
finally:
sys.stdout = saved_stdout
self.w = Wavecar(os.path.join(test_dir, 'WAVECAR.N2.45210'))
self.assertEqual(self.w.filename, os.path.join(test_dir,
'WAVECAR.N2.45210'))
self.assertAlmostEqual(self.w.efermi, -5.7232, places=4)
self.assertEqual(self.w.encut, 25)
self.assertEqual(self.w.nb, 9)
self.assertEqual(self.w.nk, 1)
self.assertTrue(np.allclose(self.w.a, self.a))
self.assertTrue(np.allclose(self.w.b, self.b))
self.assertAlmostEqual(self.w.vol, self.vol)
self.assertEqual(len(self.w.kpoints), self.w.nk)
self.assertEqual(len(self.w.coeffs), self.w.nk)
self.assertEqual(len(self.w.coeffs[0]), self.w.nb)
self.assertEqual(len(self.w.band_energy), self.w.nk)
self.assertEqual(self.w.band_energy[0].shape, (self.w.nb, 3))
self.assertLessEqual(len(self.w.Gpoints[0]), 257)
with self.assertRaises(ValueError):
Wavecar(os.path.join(test_dir, 'WAVECAR.N2.spin'))
temp_ggp = Wavecar._generate_G_points
try:
Wavecar._generate_G_points = lambda x, y: []
with self.assertRaises(ValueError):
Wavecar(os.path.join(test_dir, 'WAVECAR.N2'))
finally:
Wavecar._generate_G_points = temp_ggp
def test__generate_nbmax(self):
self.w._generate_nbmax()
self.assertEqual(self.w._nbmax.tolist(), [5, 5, 5])
def test__generate_G_points(self):
for k in range(self.w.nk):
kp = self.w.kpoints[k]
self.assertLessEqual(len(self.w._generate_G_points(kp)), 257)
def test_evaluate_wavefunc(self):
self.w.Gpoints.append(np.array([0, 0, 0]))
self.w.kpoints.append(np.array([0, 0, 0]))
self.w.coeffs.append([[1+1j]])
self.assertAlmostEqual(self.w.evaluate_wavefunc(-1, -1, [0, 0, 0]),
(1+1j)/np.sqrt(self.vol), places=4)
self.assertAlmostEqual(self.w.evaluate_wavefunc(0, 0, [0, 0, 0]),
np.sum(self.w.coeffs[0][0])/np.sqrt(self.vol),
places=4)
def test_fft_mesh(self):
mesh = self.w.fft_mesh(0, 5)
ind = np.argmax(np.abs(mesh))
self.assertEqual(np.unravel_index(ind, mesh.shape), (14, 1, 1))
self.assertEqual(mesh[tuple((self.w.ng/2).astype(np.int))], 0j)
mesh = self.w.fft_mesh(0, 5, shift=False)
ind = np.argmax(np.abs(mesh))
self.assertEqual(np.unravel_index(ind, mesh.shape), (6, 8, 8))
self.assertEqual(mesh[0, 0, 0], 0j)
if __name__ == "__main__":
unittest.main()
|
czhengsci/pymatgen
|
pymatgen/io/vasp/tests/test_outputs.py
|
Python
|
mit
| 55,313
|
[
"VASP",
"pymatgen"
] |
aa2aa7a14e4e3f26fa067282a07391dd4484c4df5cba3df418d5565f9425d3f1
|
from pylab import *
import numpy as np
import pandas as pd
# from matplotlib.pyplot import plot,show,draw
import scipy.io
from functions import *
import _pickle as cPickle
import time
import os, sys
import ipyparallel
import neuroseries as nts
from numba import jit
data_directory = '/mnt/DataGuillaume/MergedData/'
datasets = np.loadtxt(data_directory+'datasets_ThalHpc.list', delimiter = '\n', dtype = str, comments = '#')
@jit(nopython=True)
def histo(spk, obins):
n = len(obins)
count = np.zeros(n)
for i in range(n):
count[i] = np.sum((spk>obins[i,0]) * (spk < obins[i,1]))
return count
def decodeHD(tuning_curves, spikes, ep, bin_size = 200, px = None):
"""
See : Zhang, 1998, Interpreting Neuronal Population Activity by Reconstruction: Unified Framework With Application to Hippocampal Place Cells
tuning_curves: pd.DataFrame with angular position as index and columns as neuron
spikes : dictionnary of spike times
ep : nts.IntervalSet, the epochs for decoding
bin_size : in ms (default:200ms)
px : Occupancy. If None, px is uniform
"""
if len(ep) == 1:
bins = np.arange(ep.as_units('ms').start.iloc[0], ep.as_units('ms').end.iloc[-1], bin_size)
else:
# ep2 = nts.IntervalSet(ep.copy().as_units('ms'))
# ep2 = ep2.drop_short_intervals(bin_size*2)
# bins = []
# for i in ep2.index:
# bins.append(np.arange())
# bins = np.arange(ep2.start.iloc[0], ep.end.iloc[-1], bin_size)
print("TODO")
sys.exit()
order = tuning_curves.columns.values
# TODO CHECK MATCH
spike_counts = pd.DataFrame(index = bins[0:-1]+np.diff(bins)/2, columns = order)
for k in spike_counts:
spks = spikes[k].restrict(ep).as_units('ms').index.values
spike_counts[k], _ = np.histogram(spks, bins)
tcurves_array = tuning_curves.values
spike_counts_array = spike_counts.values
proba_angle = np.zeros((spike_counts.shape[0], tuning_curves.shape[0]))
part1 = np.exp(-(bin_size/1000)*tcurves_array.sum(1))
if px is not None:
part2 = px
else:
part2 = np.ones(tuning_curves.shape[0])
#part2 = np.histogram(position['ry'], np.linspace(0, 2*np.pi, 61), weights = np.ones_like(position['ry'])/float(len(position['ry'])))[0]
for i in range(len(proba_angle)):
part3 = np.prod(tcurves_array**spike_counts_array[i], 1)
p = part1 * part2 * part3
proba_angle[i] = p/p.sum() # Normalization process here
proba_angle = pd.DataFrame(index = spike_counts.index.values, columns = tuning_curves.index.values, data= proba_angle)
proba_angle = proba_angle.astype('float')
decoded = nts.Tsd(t = proba_angle.index.values, d = proba_angle.idxmax(1).values, time_units = 'ms')
return decoded, proba_angle
datatosave = {}
count = {}
decoded_angle = {}
for session in datasets:
# for session in ['Mouse32/Mouse32-140822']:
hd_info = scipy.io.loadmat(data_directory+session+'/Analysis/HDCells.mat')['hdCellStats'][:,-1]
if np.sum(hd_info)>5:
############################################################################################################
# LOADING DATA
############################################################################################################
generalinfo = scipy.io.loadmat(data_directory+session+'/Analysis/GeneralInfo.mat')
shankStructure = loadShankStructure(generalinfo)
if len(generalinfo['channelStructure'][0][0][1][0]) == 2:
hpc_channel = generalinfo['channelStructure'][0][0][1][0][1][0][0] - 1
else:
hpc_channel = generalinfo['channelStructure'][0][0][1][0][0][0][0] - 1
spikes,shank = loadSpikeData(data_directory+session+'/Analysis/SpikeData.mat', shankStructure['thalamus'])
n_channel,fs, shank_to_channel = loadXML(data_directory+session+"/"+session.split("/")[1]+'.xml')
wake_ep = loadEpoch(data_directory+session, 'wake')
sleep_ep = loadEpoch(data_directory+session, 'sleep')
sws_ep = loadEpoch(data_directory+session, 'sws')
rem_ep = loadEpoch(data_directory+session, 'rem')
sleep_ep = sleep_ep.merge_close_intervals(threshold=1.e3)
sws_ep = sleep_ep.intersect(sws_ep)
rem_ep = sleep_ep.intersect(rem_ep)
rip_ep,rip_tsd = loadRipples(data_directory+session)
rip_ep = sws_ep.intersect(rip_ep)
rip_tsd = rip_tsd.restrict(sws_ep)
speed = loadSpeed(data_directory+session+'/Analysis/linspeed.mat').restrict(wake_ep)
hd_info = scipy.io.loadmat(data_directory+session+'/Analysis/HDCells.mat')['hdCellStats'][:,-1]
hd_info_neuron = np.array([hd_info[n] for n in spikes.keys()])
spikeshd = {k:spikes[k] for k in np.where(hd_info_neuron==1)[0]}
neurons = np.sort(list(spikeshd.keys()))
print(session, len(neurons))
bin_size = 30
nb_bins_tcurves = 61
std_spike_count = 3
std_angle = 3
nan_empty = False
n_rnd = 1
# left_bound = np.arange(-1000-bin_size/2, 1000 - bin_size/4,bin_size/4) # 75% overlap
left_bound = np.arange(-500 - bin_size/3/2, 500 - bin_size/2, bin_size/2) # 50% overlap
# left_bound = np.arange(-1000-bin_size+3*bin_size/4, 1000 - 3*bin_size/4,3*bin_size/4) # 25% overlap
obins = np.vstack((left_bound, left_bound+bin_size)).T
times = obins[:,0]+(np.diff(obins)/2).flatten()
# cutting times between -500 to 500
# times = times[np.logical_and(times>=-500, times<=500)]
n_rip = len(rip_tsd)
####################################################################################################################
# TUNING CURVES
####################################################################################################################
position = pd.read_csv(data_directory+session+"/"+session.split("/")[1] + ".csv", delimiter = ',', header = None, index_col = [0])
angle = nts.Tsd(t = position.index.values, d = position[1].values, time_units = 's')
tcurves = computeAngularTuningCurves(spikeshd, angle, wake_ep, nb_bins = nb_bins_tcurves, frequency = 1/0.0256)
neurons = tcurves.idxmax().sort_values().index.values
####################################################################################################################
# SWR
####################################################################################################################
# BINNING
good_ex = (np.array([4644.8144,4924.4720,5244.9392,7222.9480,7780.2968,11110.1888,11292.3240,11874.5688])*1e6).astype('int')
# tmp = good_ex/1000
tmp = rip_tsd.index.values/1000
# tmp = tmp[0:500]
rates_swr = []
n_swr = len(tmp)
for j, t in enumerate(tmp):
print('swr', j/len(tmp))
tbins = t + obins
spike_counts = pd.DataFrame(index = obins[:,0]+(np.diff(obins)/2).flatten(), columns = neurons)
for k in neurons:
spks = spikes[k].as_units('ms').index.values
spike_counts[k] = histo(spks, tbins)
spike_counts = spike_counts.rolling(window = 20, win_type='gaussian', center = True, min_periods=1).mean(std=std_spike_count)
rates_swr.append(spike_counts)
####################################################################################################################
# RANDOM
####################################################################################################################
# BINNING
rnd_tsd = nts.Ts(t = np.sort(np.hstack([np.random.randint(sws_ep.loc[j,'start']+500000, sws_ep.loc[j,'end']+500000, np.maximum(1,n_rnd//len(sws_ep))) for j in sws_ep.index])))
rnd_tsd = rnd_tsd
rates_rnd = []
tmp3 = rnd_tsd.index.values/1000
tmp3 = tmp3
n_rnd = len(tmp3)
for j, t in enumerate(tmp3):
print('rnd', j/len(tmp3))
tbins = t + obins
spike_counts = pd.DataFrame(index = obins[:,0]+(np.diff(obins)/2).flatten(), columns = neurons)
for k in neurons:
spks = spikes[k].as_units('ms').index.values
spike_counts[k] = histo(spks, tbins)
spike_counts = spike_counts.rolling(window = 20, win_type='gaussian', center = True, min_periods=1).mean(std=std_spike_count)
rates_rnd.append(spike_counts)
rates_swr = pd.concat(rates_swr)
rates_rnd = pd.concat(rates_rnd)
####################################################################################################################
# DECODING
####################################################################################################################
tcurves_array = tcurves[neurons].values
#SWR
spike_counts_array = rates_swr.values
proba_angle = np.zeros((spike_counts_array.shape[0], tcurves_array.shape[0]))
part1 = np.exp(-(bin_size/1000)*tcurves_array.sum(1))
part2 = np.histogram(angle, np.linspace(0, 2*np.pi, len(tcurves)+1), weights = np.ones_like(angle)/float(len(angle)))[0]
for j in range(len(proba_angle)):
part3 = np.prod(tcurves_array**spike_counts_array[j], 1)
p = part1 * part2 * part3
proba_angle[j] = p/p.sum() # Normalization process here
proba_angle_swr = proba_angle.copy()
proba_angle_swr = proba_angle_swr.reshape(len(rip_tsd), len(times), len(tcurves))
angswr = [tcurves.index.values[proba_angle_swr[i].argmax(1)] for i in range(len(proba_angle_swr))]
angswr = pd.DataFrame(index = times, data = np.array(angswr).T)
decoded_angle[session] = angswr
nanidx = (rates_swr.sum(1) == 0).values
nanidx = nanidx.reshape(angswr.shape)
swrvel = []
for i in angswr.columns:
a = np.unwrap(angswr[i].values)
b = pd.Series(index = times, data = a)
c = b.rolling(window = 20, win_type='gaussian', center=True, min_periods=1).mean(std=std_angle)
if nan_empty:
c[nanidx[:,i]] = np.nan
swrvel.append(np.abs(np.diff(c.values))/bin_size)
swrvel = np.array(swrvel).T
# swrvel[np.where(nanidx[0:-1])] = np.nan
swrvel = pd.DataFrame(index = times[0:-1]+np.diff(times)/2, data = swrvel)
#RND
spike_counts_array = rates_rnd.values
proba_angle = np.zeros((spike_counts_array.shape[0], tcurves_array.shape[0]))
part1 = np.exp(-(bin_size/1000)*tcurves_array.sum(1))
part2 = np.histogram(angle, np.linspace(0, 2*np.pi, len(tcurves)+1), weights = np.ones_like(angle)/float(len(angle)))[0]
for j in range(len(proba_angle)):
part3 = np.prod(tcurves_array**spike_counts_array[j], 1)
p = part1 * part2 * part3
proba_angle[j] = p/p.sum() # Normalization process here
proba_angle_rnd = proba_angle.copy()
proba_angle_rnd = proba_angle_rnd.reshape(n_rnd, len(times), len(tcurves))
angrnd = [tcurves.index.values[proba_angle_rnd[i].argmax(1)] for i in range(n_rnd)]
angrnd = pd.DataFrame(index = times, data = np.array(angrnd).T)
nanidx = (rates_rnd.sum(1) == 0).values
nanidx = nanidx.reshape(angrnd.shape)
rndvel = []
for i in angrnd.columns:
a = np.unwrap(angrnd[i].values)
b = pd.Series(index = times, data = a)
c = b.rolling(window = 20, win_type='gaussian', center=True, min_periods=1).mean(std=std_angle)
if nan_empty:
c[nanidx[:,i]] = np.nan
rndvel.append(np.abs(np.diff(c.values))/bin_size)
rndvel = np.array(rndvel).T
# rndvel[np.where(nanidx[0:-1])] = np.nan
rndvel = pd.DataFrame(index = times[0:-1]+np.diff(times)/2, data = rndvel)
total = (swrvel.mean(1)-rndvel.mean(1))/rndvel.mean(1)
datatosave[session] = total
count[session] = np.sum(hd_info)
# figure()
# subplot(211)
# plot(swrvel.mean(1).loc[-500:500])
# plot(rndvel.mean(1).loc[-500:500])
# subplot(212)
# plot(total.loc[-500:500])
# show()
# rates_swr = rates_swr.values.reshape(len(rip_tsd), len(times), len(neurons))
# figure()
# for i, t in enumerate(good_ex[0:3]/1000):
# idx = np.where(int(t*1000) == rip_tsd.index.values)[0][0]
# # spikes
# subplot(5,3,i+1)
# for j,n in enumerate(neurons):
# plot(spikes[n].as_units('ms').loc[t-500:t+500].fillna(j), '|', color = 'black')
# xlim(t-500,t+500)
# ylim(0, len(neurons))
# # spikes count
# subplot(5,3,i+1+3)
# tmp = pd.DataFrame(index = times, data = rates_swr[idx])
# imshow(tmp.loc[-500:500].T, origin = 'lower', aspect = 'auto')
# # matrix decoding
# subplot(5,3,i+1+6)
# tmp = pd.DataFrame(index = times, data = proba_angle_swr[idx])
# imshow(tmp.loc[-500:500].T, extent = (times[0], times[-1], 0, 2*np.pi), origin = 'lower', aspect = 'auto')
# plot(angswr.iloc[:,idx])
# # angle
# subplot(5,3,i+1+9)
# plot(angswr.iloc[:,idx])
# ylim(0, 2*np.pi)
# # velocity
# subplot(5,3,i+1+12)
# plot(swrvel.iloc[:,idx])
# show()
# subplot(311)
# imshow(rates_swr.T, aspect = 'auto')
# subplot(312)
# plot(angswr)
# subplot(313)
# plot(swrvel)
# xlim(times[0], times[-1])
# show()
# sys.exit()
datasets = ['Mouse12/Mouse12-120806', 'Mouse12/Mouse12-120807', 'Mouse12/Mouse12-120808', 'Mouse12/Mouse12-120809', 'Mouse12/Mouse12-120810',
'Mouse17/Mouse17-130130', 'Mouse32/Mouse32-140822']
pref_angle = {}
bins = np.linspace(0, 2*np.pi, 13)
figure()
for i,s in enumerate(datasets):
a = scipy.stats.circmean(decoded_angle[s].loc[-50:50], 0, 2*np.pi, 0)
b, c = np.histogram(a, bins)
pref_angle[s] = pd.Series(index = c[0:-1]+np.diff(c)/2, data = b)
subplot(3,5,i+1)
hist(a, bins)
title(s)
pref_angle = pd.DataFrame.from_dict(pref_angle)
show()
sys.exit()
datatosave = pd.DataFrame.from_dict(datatosave)
count = pd.Series(count)
alldata = {'swrvel':datatosave,'count':count}
# cPickle.dump(alldata, open("../figures/figures_articles_v4/figure1/decodage_bayesian.pickle", 'wb'))
figure()
subplot(131)
plot(datatosave[count[count>10].index], linewidth = 1, color = 'grey')
plot(datatosave[count[count>10].index].mean(1), linewidth = 3)
ylim(-0.3, 0.3)
title(">10 neurons")
subplot(132)
plot(datatosave[count[count<10].index], linewidth = 1, color = 'grey')
plot(datatosave[count[count<10].index].mean(1), linewidth = 3)
ylim(-0.3, 0.3)
title("<10 neurons")
subplot(133)
plot(datatosave, linewidth = 1, color = 'grey')
plot(datatosave.mean(1), linewidth = 3)
ylim(-0.3, 0.3)
title("all neurons")
show()
|
gviejo/ThalamusPhysio
|
python/main_make_decoding_swr.py
|
Python
|
gpl-3.0
| 13,745
|
[
"Gaussian",
"NEURON"
] |
2971229f637db31b3bd039b4563f5fa0eb815b2359570202f35135d52f9cdb78
|
''' Plot animation of the Boomerang. '''
import numpy as np
import os
import sys
sys.path.append('..')
import vtk
import boomerang as bm
from config_local import DATA_DIR
from quaternion_integrator.quaternion import Quaternion
from general_application_utils import read_trajectory_from_txt
N_SPHERES = len(bm.M)
TIME_SKIP = 8
WRITE = True
DRAW_COH = False
class vtkTimerCallback():
def __init__(self):
self.timer_count = 0
self.n = 1
def execute(self,obj,event):
print self.timer_count
self.textActor.SetInput('Time: %s' % self.timer_count)
r_vectors = bm.get_boomerang_r_vectors_15(
self.locations[self.n*TIME_SKIP],
Quaternion(self.orientations[self.n*TIME_SKIP]))
for k in range(N_SPHERES):
self.sources[k].SetCenter(r_vectors[k][0], r_vectors[k][1],
r_vectors[k][2])
if DRAW_COH:
coh = bm.calculate_boomerang_coh(
self.locations[self.n*TIME_SKIP],
Quaternion(self.orientations[self.n*TIME_SKIP]))
cod = bm.calculate_boomerang_cod(
self.locations[self.n*TIME_SKIP],
Quaternion(self.orientations[self.n*TIME_SKIP]))
self.coh_source.SetCenter(coh)
self.cod_source.SetCenter(cod)
iren = obj
iren.GetRenderWindow().Render()
if WRITE:
self.w2if.Update()
self.w2if.Modified()
self.lwr.SetFileName(os.path.join(
'.', 'figures',
'frame'+ ('%03d' % self.n)+'.png'))
self.lwr.Write()
self.timer_count += 0.01*TIME_SKIP
self.n += 1
if __name__ == '__main__':
# Data file name where trajectory data is stored.
data_name = sys.argv[1]
#######
data_file_name = os.path.join(DATA_DIR, 'boomerang', data_name)
params, locations, orientations = read_trajectory_from_txt(data_file_name)
initial_r_vectors = bm.get_boomerang_r_vectors_15(
locations[0], Quaternion(orientations[0]))
# Create blobs
blob_sources = []
for k in range(N_SPHERES):
blob_sources.append(vtk.vtkSphereSource())
blob_sources[k].SetCenter(initial_r_vectors[0][0],
initial_r_vectors[0][1],
initial_r_vectors[0][2])
blob_sources[k].SetRadius(bm.A)
if DRAW_COH:
coh_source = vtk.vtkSphereSource()
coh_point = bm.calculate_boomerang_coh(
locations[0], Quaternion(orientations[0]))
coh_source.SetCenter(coh_point)
coh_source.SetRadius(0.1)
cod_source = vtk.vtkSphereSource()
cod_point = bm.calculate_boomerang_cod(
locations[0], Quaternion(orientations[0]))
cod_source.SetCenter(coh_point)
cod_source.SetRadius(0.1)
wall_source = vtk.vtkCubeSource()
wall_source.SetCenter(0., 0., -0.125)
wall_source.SetXLength(10.)
wall_source.SetYLength(10.)
wall_source.SetZLength(0.25)
#Create a blob mappers and blob actors
blob_mappers = []
blob_actors = []
for k in range(N_SPHERES):
blob_mappers.append(vtk.vtkPolyDataMapper())
blob_mappers[k].SetInputConnection(blob_sources[k].GetOutputPort())
blob_actors.append(vtk.vtkActor())
blob_actors[k].SetMapper(blob_mappers[k])
blob_actors[k].GetProperty().SetColor(1, 0, 0)
if DRAW_COH:
coh_mapper = vtk.vtkPolyDataMapper()
coh_mapper.SetInputConnection(coh_source.GetOutputPort())
coh_actor = vtk.vtkActor()
coh_actor.SetMapper(coh_mapper)
coh_actor.GetProperty().SetColor(5., 5., 1.)
cod_mapper = vtk.vtkPolyDataMapper()
cod_mapper.SetInputConnection(cod_source.GetOutputPort())
cod_actor = vtk.vtkActor()
cod_actor.SetMapper(cod_mapper)
cod_actor.GetProperty().SetColor(0., 0., 1.)
# Set up wall actor and mapper
wall_mapper = vtk.vtkPolyDataMapper()
wall_mapper.SetInputConnection(wall_source.GetOutputPort())
wall_actor = vtk.vtkActor()
wall_actor.SetMapper(wall_mapper)
wall_actor.GetProperty().SetColor(0.1, 0.95, 0.1)
# Create camera
camera = vtk.vtkCamera()
camera.SetPosition(0., -20., 5.)
camera.SetFocalPoint(0., 0., 1.)
camera.SetViewAngle(37.)
# Setup a renderer, render window, and interactor
renderer = vtk.vtkRenderer()
renderer.SetActiveCamera(camera)
renderWindow = vtk.vtkRenderWindow()
renderWindow.SetSize(1000, 1000)
renderWindow.AddRenderer(renderer);
renderWindowInteractor = vtk.vtkRenderWindowInteractor()
renderWindowInteractor.SetRenderWindow(renderWindow)
#Add the actors to the scene
for k in range(len(bm.M)):
renderer.AddActor(blob_actors[k])
if DRAW_COH:
renderer.AddActor(coh_actor)
renderer.AddActor(cod_actor)
renderer.AddActor(wall_actor)
renderer.SetBackground(0.9, 0.9, 0.9) # Background color off white
#Render and interact
renderWindow.Render()
# Initialize must be called prior to creating timer events.
renderWindowInteractor.Initialize()
# Set up writer for pngs so we can save a movie.
w2if = vtk.vtkWindowToImageFilter()
w2if.SetInput(renderWindow)
w2if.SetMagnification(1.5)
w2if.Update()
w2if.ReadFrontBufferOff()
lwr = vtk.vtkPNGWriter()
lwr.SetInput( w2if.GetOutput() )
# Set up text
textActor = vtk.vtkTextActor()
textActor.GetTextProperty().SetFontSize (24)
textActor.SetDisplayPosition(400, 120)
renderer.AddActor2D(textActor)
textActor.GetTextProperty().SetColor( 0.0, 0.0, 0.0 )
# Sign up to receive TimerEvent
cb = vtkTimerCallback()
cb.actors = blob_actors
cb.lwr = lwr
cb.w2if = w2if
cb.sources = blob_sources
if DRAW_COH:
cb.coh_source = coh_source
cb.cod_source = cod_source
cb.locations = locations
cb.orientations = orientations
cb.textActor = textActor
renderWindowInteractor.AddObserver('TimerEvent', cb.execute)
timerId = renderWindowInteractor.CreateRepeatingTimer(300);
#start the interaction and timer
renderWindowInteractor.Start()
|
stochasticHydroTools/RotationalDiffusion
|
boomerang/plot_boomerang_trajectory.py
|
Python
|
gpl-3.0
| 5,789
|
[
"VTK"
] |
bc10eefe2c7aa18546ad9fbd4fc9e303b42cf0cf08b63266716fcd8da1b54da8
|
#!/usr/bin/python
# Copyright 2010 Doug Zongker
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Implements the player skill estimation algorithm from Herbrich et al.,
"TrueSkill(TM): A Bayesian Skill Rating System".
"""
from __future__ import print_function
__author__ = "Doug Zongker <dougz@isotropic.org>"
import sys
if sys.hexversion < 0x02060000:
print("requires Python 2.6 or higher")
sys.exit(1)
from scipy.stats.distributions import norm as scipy_norm
from math import sqrt
norm = scipy_norm()
pdf = norm.pdf
cdf = norm.cdf
icdf = norm.ppf # inverse CDF
# Update rules for approximate marginals for the win and draw cases,
# respectively.
def Vwin(t, e):
return pdf(t-e) / cdf(t-e)
def Wwin(t, e):
return Vwin(t, e) * (Vwin(t, e) + t - e)
def Vdraw(t, e):
return (pdf(-e-t) - pdf(e-t)) / (cdf(e-t) - cdf(-e-t))
def Wdraw(t, e):
return Vdraw(t, e) ** 2 + ((e-t) * pdf(e-t) + (e+t) * pdf(e+t)) / (cdf(e-t) - cdf(-e-t))
class Gaussian(object):
"""
Object representing a gaussian distribution. Create as:
Gaussian(mu=..., sigma=...)
or
Gaussian(pi=..., tau=...)
or
Gaussian() # gives 0 mean, infinite sigma
"""
def __init__(self, mu=None, sigma=None, pi=None, tau=None):
if pi is not None:
self.pi = pi
self.tau = tau
elif mu is not None:
self.pi = sigma ** -2
self.tau = self.pi * mu
else:
self.pi = 0
self.tau = 0
def __repr__(self):
return "N(pi={0.pi},tau={0.tau})".format(self)
def __str__(self):
if self.pi == 0.0:
return "N(mu=0,sigma=inf)"
else:
sigma = sqrt(1/self.pi)
mu = self.tau / self.pi
return "N(mu={0:.3f},sigma={1:.3f})".format(mu, sigma)
def MuSigma(self):
""" Return the value of this object as a (mu, sigma) tuple. """
if self.pi == 0.0:
return 0, float("inf")
else:
return self.tau / self.pi, sqrt(1/self.pi)
def __mul__(self, other):
return Gaussian(pi=self.pi+other.pi, tau=self.tau+other.tau)
def __div__(self, other):
return Gaussian(pi=self.pi-other.pi, tau=self.tau-other.tau)
class Variable(object):
""" A variable node in the factor graph. """
def __init__(self):
self.value = Gaussian()
self.factors = {}
def AttachFactor(self, factor):
self.factors[factor] = Gaussian()
def UpdateMessage(self, factor, message):
old_message = self.factors[factor]
self.value = self.value / old_message * message
self.factors[factor] = message
def UpdateValue(self, factor, value):
old_message = self.factors[factor]
self.factors[factor] = value * old_message / self.value
self.value = value
def GetMessage(self, factor):
return self.factors[factor]
class Factor(object):
""" Base class for a factor node in the factor graph. """
def __init__(self, variables):
self.variables = variables
for v in variables:
v.AttachFactor(self)
# The following Factor classes implement the five update equations
# from Table 1 of the Herbrich et al. paper.
class PriorFactor(Factor):
""" Connects to a single variable, pushing a fixed (Gaussian) value
to that variable. """
def __init__(self, variable, param):
super(PriorFactor, self).__init__([variable])
self.param = param
def Start(self):
self.variables[0].UpdateValue(self, self.param)
class LikelihoodFactor(Factor):
""" Connects two variables, the value of one being the mean of the
message sent to the other. """
def __init__(self, mean_variable, value_variable, variance):
super(LikelihoodFactor, self).__init__([mean_variable, value_variable])
self.mean = mean_variable
self.value = value_variable
self.variance = variance
def UpdateValue(self):
""" Update the value after a change in the mean (going "down" in
the TrueSkill factor graph. """
y = self.mean.value
fy = self.mean.GetMessage(self)
a = 1.0 / (1.0 + self.variance * (y.pi - fy.pi))
self.value.UpdateMessage(self, Gaussian(pi=a*(y.pi - fy.pi),
tau=a*(y.tau - fy.tau)))
def UpdateMean(self):
""" Update the mean after a change in the value (going "up" in
the TrueSkill factor graph. """
# Note this is the same as UpdateValue, with self.mean and
# self.value interchanged.
x = self.value.value
fx = self.value.GetMessage(self)
a = 1.0 / (1.0 + self.variance * (x.pi - fx.pi))
self.mean.UpdateMessage(self, Gaussian(pi=a*(x.pi - fx.pi),
tau=a*(x.tau - fx.tau)))
class SumFactor(Factor):
""" A factor that connects a sum variable with 1 or more terms,
which are summed after being multiplied by fixed (real)
coefficients. """
def __init__(self, sum_variable, terms_variables, coeffs):
assert len(terms_variables) == len(coeffs)
self.sum = sum_variable
self.terms = terms_variables
self.coeffs = coeffs
super(SumFactor, self).__init__([sum_variable] + terms_variables)
def _InternalUpdate(self, var, y, fy, a):
new_pi = 1.0 / (sum(a[j]**2 / (y[j].pi - fy[j].pi) for j in range(len(a))))
new_tau = new_pi * sum(a[j] *
(y[j].tau - fy[j].tau) / (y[j].pi - fy[j].pi)
for j in range(len(a)))
var.UpdateMessage(self, Gaussian(pi=new_pi, tau=new_tau))
def UpdateSum(self):
""" Update the sum value ("down" in the factor graph). """
y = [t.value for t in self.terms]
fy = [t.GetMessage(self) for t in self.terms]
a = self.coeffs
self._InternalUpdate(self.sum, y, fy, a)
def UpdateTerm(self, index):
""" Update one of the term values ("up" in the factor graph). """
# Swap the coefficients around to make the term we want to update
# be the 'sum' of the other terms and the factor's sum, eg.,
# change:
#
# x = y_1 + y_2 + y_3
#
# to
#
# y_2 = x - y_1 - y_3
#
# then use the same update equation as for UpdateSum.
b = self.coeffs
a = [-b[i] / b[index] for i in range(len(b)) if i != index]
a.insert(index, 1.0 / b[index])
v = self.terms[:]
v[index] = self.sum
y = [i.value for i in v]
fy = [i.GetMessage(self) for i in v]
self._InternalUpdate(self.terms[index], y, fy, a)
class TruncateFactor(Factor):
""" A factor for (approximately) truncating the team difference
distribution based on a win or a draw (the choice of which is
determined by the functions you pass as V and W). """
def __init__(self, variable, V, W, epsilon):
super(TruncateFactor, self).__init__([variable])
self.var = variable
self.V = V
self.W = W
self.epsilon = epsilon
def Update(self):
x = self.var.value
fx = self.var.GetMessage(self)
c = x.pi - fx.pi
d = x.tau - fx.tau
sqrt_c = sqrt(c)
args = (d / sqrt_c, self.epsilon * sqrt_c)
V = self.V(*args)
W = self.W(*args)
new_val = Gaussian(pi=c / (1.0 - W), tau=(d + sqrt_c * V) / (1.0 - W))
self.var.UpdateValue(self, new_val)
def DrawProbability(epsilon, beta, total_players=2):
""" Compute the draw probability given the draw margin (epsilon). """
return 2 * cdf(epsilon / (sqrt(total_players) * beta)) - 1
def DrawMargin(p, beta, total_players=2):
""" Compute the draw margin (epsilon) given the draw probability. """
return icdf((p+1.0)/2) * sqrt(total_players) * beta
INITIAL_MU = 25.0
INITIAL_SIGMA = INITIAL_MU / 3.0
def SetParameters(beta=None, epsilon=None, draw_probability=None,
gamma=None):
"""
Sets three global parameters used in the TrueSkill algorithm.
beta is a measure of how random the game is. You can think of it as
the difference in skill (mean) needed for the better player to have
an ~80% chance of winning. A high value means the game is more
random (I need to be *much* better than you to consistently overcome
the randomness of the game and beat you 80% of the time); a low
value is less random (a slight edge in skill is enough to win
consistently). The default value of beta is half of INITIAL_SIGMA
(the value suggested by the Herbrich et al. paper).
epsilon is a measure of how common draws are. Instead of specifying
epsilon directly you can pass draw_probability instead (a number
from 0 to 1, saying what fraction of games end in draws), and
epsilon will be determined from that. The default epsilon
corresponds to a draw probability of 0.1 (10%). (You should pass a
value for either epsilon or draw_probability, not both.)
gamma is a small amount by which a player's uncertainty (sigma) is
increased prior to the start of each game. This allows us to
account for skills that vary over time; the effect of old games
on the estimate will slowly disappear unless reinforced by evidence
from new games.
"""
global BETA, EPSILON, GAMMA
if beta is None:
BETA = INITIAL_SIGMA / 2.0
else:
BETA = beta
if epsilon is None:
if draw_probability is None:
draw_probability = 0.10
EPSILON = DrawMargin(draw_probability, BETA)
else:
EPSILON = epsilon
if gamma is None:
GAMMA = INITIAL_SIGMA / 100.0
else:
GAMMA = gamma
SetParameters()
def AdjustPlayers(players):
"""
Adjust the skills of a list of players.
'players' is a list of player objects, for all the players who
participated in a single game. A 'player object' is any object with
a "skill" attribute (a (mu, sigma) tuple) and a "rank" attribute.
Lower ranks are better; the lowest rank is the overall winner of the
game. Equal ranks mean that the two players drew.
This function updates all the "skill" attributes of the player
objects to reflect the outcome of the game. The input list is not
altered.
"""
players = players[:]
# Sort players by rank, the factor graph will connect adjacent team
# performance variables.
players.sort(key=lambda p: p.rank)
# Create all the variable nodes in the graph. "Teams" are each a
# single player; there's a one-to-one correspondence between players
# and teams. (It would be straightforward to make multiplayer
# teams, but it's not needed for my current purposes.)
ss = [Variable() for p in players]
ps = [Variable() for p in players]
ts = [Variable() for p in players]
ds = [Variable() for p in players[:-1]]
# Create each layer of factor nodes. At the top we have priors
# initialized to the player's current skill estimate.
skill = [PriorFactor(s, Gaussian(mu=pl.skill[0],
sigma=pl.skill[1] + GAMMA))
for (s, pl) in zip(ss, players)]
skill_to_perf = [LikelihoodFactor(s, p, BETA**2)
for (s, p) in zip(ss, ps)]
perf_to_team = [SumFactor(t, [p], [1])
for (p, t) in zip(ps, ts)]
team_diff = [SumFactor(d, [t1, t2], [+1, -1])
for (d, t1, t2) in zip(ds, ts[:-1], ts[1:])]
# At the bottom we connect adjacent teams with a 'win' or 'draw'
# factor, as determined by the rank values.
trunc = [TruncateFactor(d,
Vdraw if pl1.rank == pl2.rank else Vwin,
Wdraw if pl1.rank == pl2.rank else Wwin,
EPSILON)
for (d, pl1, pl2) in zip(ds, players[:-1], players[1:])]
# Start evaluating the graph by pushing messages 'down' from the
# priors.
for f in skill:
f.Start()
for f in skill_to_perf:
f.UpdateValue()
for f in perf_to_team:
f.UpdateSum()
# Because the truncation factors are approximate, we iterate,
# adjusting the team performance (t) and team difference (d)
# variables until they converge. In practice this seems to happen
# very quickly, so I just do a fixed number of iterations.
#
# This order of evaluation is given by the numbered arrows in Figure
# 1 of the Herbrich paper.
for i in range(5):
for f in team_diff:
f.UpdateSum() # arrows (1) and (4)
for f in trunc:
f.Update() # arrows (2) and (5)
for f in team_diff:
f.UpdateTerm(0) # arrows (3) and (6)
f.UpdateTerm(1)
# Now we push messages back up the graph, from the teams back to the
# player skills.
for f in perf_to_team:
f.UpdateTerm(0)
for f in skill_to_perf:
f.UpdateMean()
# Finally, the players' new skills are the new values of the s
# variables.
for s, pl in zip(ss, players):
pl.skill = s.value.MuSigma()
__all__ = ["AdjustPlayers", "SetParameters", "INITIAL_MU", "INITIAL_SIGMA"]
|
nhaehnle/aiant
|
test/trueskill.py
|
Python
|
apache-2.0
| 13,012
|
[
"Gaussian"
] |
8dc84005802b3ba14ac915f42997411966e001ac9139875591c066af5b1efa65
|
"""
Test basic molecular features.
"""
import numpy as np
import unittest
from rdkit import Chem
from vs_utils.features.basic import MolecularWeight, SimpleDescriptors
class TestMolecularWeight(unittest.TestCase):
"""
Test MolecularWeight.
"""
def setUp(self):
"""
Set up tests.
"""
smiles = 'CC(=O)OC1=CC=CC=C1C(=O)O'
self.mol = Chem.MolFromSmiles(smiles)
self.engine = MolecularWeight()
def testMW(self):
"""
Test MW.
"""
assert np.allclose(self.engine([self.mol]), 180, atol=0.1)
class TestSimpleDescriptors(unittest.TestCase):
"""
Test SimpleDescriptors.
"""
def setUp(self):
"""
Set up tests.
"""
smiles = 'CC(=O)OC1=CC=CC=C1C(=O)O'
self.mol = Chem.MolFromSmiles(smiles)
self.engine = SimpleDescriptors()
def testSimpleDescriptors(self):
"""
Test simple descriptors.
"""
descriptors = self.engine([self.mol])
assert np.allclose(
descriptors[0, self.engine.descriptors.index('ExactMolWt')], 180,
atol=0.1)
|
rbharath/vs-utils
|
vs_utils/features/tests/test_basic.py
|
Python
|
gpl-3.0
| 1,034
|
[
"RDKit"
] |
14eb179c993fc70d240b6c6fa3b53cf8b38c39d13428166bd41f5c8d07a00c44
|
# coding: utf-8
from __future__ import print_function, division, absolute_import
from cutadapt.colorspace import encode, decode
from cutadapt.scripts.cutadapt import main
from .utils import run, datapath
# If there are any unknown characters in the test sequence,
# round tripping will only work if all characters after the
# first unknown character are also unknown:
# encode("TNGN") == "T444", but
# decode("T444") == "TNNN".
sequences = [
"",
"C",
"ACGGTC",
"TN",
"TN.",
"TNN.N",
"CCGGCAGCATTCATTACGACAACGTGGCACCGTGTTTTCTCGGTGGTA",
"TGCAGTTGATGATCGAAGAAAACGACATCATCAGCCAGCAAGTGC",
"CAGGGTTTGATGAGTGGCTGTGGGTGCTGGCGTATCCGGG"
]
def test_encode():
assert encode("AA") == "A0"
assert encode("AC") == "A1"
assert encode("AG") == "A2"
assert encode("AT") == "A3"
assert encode("CA") == "C1"
assert encode("CC") == "C0"
assert encode("CG") == "C3"
assert encode("CT") == "C2"
assert encode("GA") == "G2"
assert encode("GC") == "G3"
assert encode("GG") == "G0"
assert encode("GT") == "G1"
assert encode("TA") == "T3"
assert encode("TC") == "T2"
assert encode("TG") == "T1"
assert encode("TT") == "T0"
assert encode("TN") == "T4"
assert encode("NT") == "N4"
assert encode("NN") == "N4"
assert encode("ACGGTC") == "A13012"
assert encode("TTT.N") == "T0044"
assert encode("TTNT.N") == "T04444"
def test_decode():
for s in sequences:
expected = s.replace('.', 'N')
encoded = encode(s)
assert decode(encoded) == expected
assert decode('A.') == 'AN'
assert decode('C.') == 'CN'
assert decode('G.') == 'GN'
assert decode('T.') == 'TN'
def test_qualtrim_csfastaqual():
'''-q with csfasta/qual files'''
run("-c -q 10", "solidqual.fastq", "solid.csfasta", 'solid.qual')
def test_E3M():
'''Read the E3M dataset'''
# not really colorspace, but a fasta/qual file pair
main(['-o', '/dev/null', datapath("E3M.fasta"), datapath("E3M.qual")])
def test_bwa():
'''MAQ-/BWA-compatible output'''
run("-c -e 0.12 -a 330201030313112312 -x 552: --maq", "solidmaq.fastq", "solid.csfasta", 'solid.qual')
def test_bfast():
'''BFAST-compatible output'''
run("-c -e 0.12 -a 330201030313112312 -x abc: --strip-f3", "solidbfast.fastq", "solid.csfasta", 'solid.qual')
def test_trim_095():
'''some reads properly trimmed since cutadapt 0.9.5'''
run("-c -e 0.122 -a 330201030313112312", "solid.fasta", "solid.fasta")
def test_solid():
run("-c -e 0.122 -a 330201030313112312", "solid.fastq", "solid.fastq")
def test_solid_basespace_adapter():
'''colorspace adapter given in basespace'''
run("-c -e 0.122 -a CGCCTTGGCCGTACAGCAG", "solid.fastq", "solid.fastq")
def test_solid5p():
'''test 5' colorspace adapter'''
# this is not a real adapter, just a random string
# in colorspace: C0302201212322332333
run("-c -e 0.1 --trim-primer -g CCGGAGGTCAGCTCGCTATA", "solid5p.fasta", "solid5p.fasta")
def test_solid5p_prefix_notrim():
'''test anchored 5' colorspace adapter, no primer trimming'''
run("-c -e 0.1 -g ^CCGGAGGTCAGCTCGCTATA", "solid5p-anchored.notrim.fasta", "solid5p.fasta")
def test_solid5p_prefix():
'''test anchored 5' colorspace adapter'''
run("-c -e 0.1 --trim-primer -g ^CCGGAGGTCAGCTCGCTATA", "solid5p-anchored.fasta", "solid5p.fasta")
def test_solid5p_fastq():
'''test 5' colorspace adapter'''
# this is not a real adapter, just a random string
# in colorspace: C0302201212322332333
run("-c -e 0.1 --trim-primer -g CCGGAGGTCAGCTCGCTATA", "solid5p.fastq", "solid5p.fastq")
def test_solid5p_prefix_notrim_fastq():
'''test anchored 5' colorspace adapter, no primer trimming'''
run("-c -e 0.1 -g ^CCGGAGGTCAGCTCGCTATA", "solid5p-anchored.notrim.fastq", "solid5p.fastq")
def test_solid5p_prefix_fastq():
'''test anchored 5' colorspace adapter'''
run("-c -e 0.1 --trim-primer -g ^CCGGAGGTCAGCTCGCTATA", "solid5p-anchored.fastq", "solid5p.fastq")
def test_sra_fastq():
'''test SRA-formatted colorspace FASTQ'''
run("-c -e 0.1 --format sra-fastq -a CGCCTTGGCCGTACAGCAG", "sra.fastq", "sra.fastq")
def test_no_zero_cap():
run("--no-zero-cap -c -e 0.122 -a CGCCTTGGCCGTACAGCAG", "solid-no-zerocap.fastq", "solid.fastq")
|
Chris7/cutadapt
|
tests/testcolorspace.py
|
Python
|
mit
| 4,096
|
[
"BWA"
] |
41f26e3523f76ef168c546fa33908291e085bf09e43ee68a17acd15421438a5d
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
from retools.limiter import Limiter as ReToolsLimiter
from octopus.limiter.in_memory.per_domain import Limiter as InMemoryPerDomainLimiter
class Limiter(InMemoryPerDomainLimiter):
def __init__(self, *domains, **kw):
limiter_miss_timeout_ms = None
if 'limiter_miss_timeout_ms' in kw:
limiter_miss_timeout_ms = kw['limiter_miss_timeout_ms']
# Skips InMemoryPerDomainLimiter constructor
super(InMemoryPerDomainLimiter, self).__init__(limiter_miss_timeout_ms=limiter_miss_timeout_ms)
if not 'redis' in kw:
raise RuntimeError('You must specify a connection to redis in order to use Redis Limiter.')
self.redis = kw['redis']
self.expiration_in_seconds = float(kw.get('expiration_in_seconds', 10))
self.update_domain_definitions(*domains)
def update_domain_definitions(self, *domains):
self.domains = domains
self.limiters = {}
for domain in self.domains:
for key, limit in domain.items():
self.limiters[key] = ReToolsLimiter(
limit=limit,
prefix='limit-for-%s' % key,
expiration_in_seconds=self.expiration_in_seconds,
redis=self.redis
)
def acquire(self, url):
domain = self.get_domain_from_url(url)
if domain is None:
logging.info('Tried to acquire lock to a domain that was not specified in the limiter (%s).' % url)
return True
could_lock = self.limiters[domain].acquire_limit(url)
if not could_lock:
logging.info('Tried to acquire lock for %s but could not.' % url)
return could_lock
def release(self, url):
domain = self.get_domain_from_url(url)
if domain is None:
logging.info('Tried to release lock to a domain that was not specified in the limiter (%s).' % url)
return True
self.limiters[domain].release_limit(url)
|
heynemann/octopus
|
octopus/limiter/redis/per_domain.py
|
Python
|
mit
| 2,072
|
[
"Octopus"
] |
75021185aa1d5cc6ee283f18eaf1d7e1fe13dde2f7dc28b9ac786a6e86bc4888
|
'''
Created on 5 Sep 2013
@author: Brian
===============================================================================
INETCOMMS has classes for communications over the internet
Supported features are:
CLASS FTPLINK:
Uploading data and video files to an FTP server
Downloading new setup files from an FTP server
CLASS UDPECHO
CLASS TCPECHO
TCPecho forwards VT requests to the attached VT system and returns replies
UDPecho forwards UDP messages from a remote address to the VT system on port 47473
and forwards UDP packets from the VT system to the remote address
They allow VT7viewer to be used to setup the VT as though it were directly connected
The site IP and port should be set to:
IP address of the Python device (e.g. RPi) and port 47481
CLASS VTmailer
emails
CLASS formPoster
sends an html form by the POST method to a known html server, with php.
The php page will store the form element values in a mySQL table
CLASS QServer
Responds to POST requests and returns an HTML page which shows current queue
and occupancy values
'''
import socket
import ftplib
import time
import threading
import os
import cgi
# EMAILING
import smtplib
# multi-part forms
import itertools
import mimetools
import mimetypes
from cStringIO import StringIO
import urllib
import urllib2
import vtsetter
import watchdogUtils
# email
from email.MIMEBase import MIMEBase
from email.MIMEText import MIMEText
from email.MIMEMultipart import MIMEMultipart
from email import Encoders
# html server
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
'''
==========================================================================================
CLASS FTPLINK
is used to upload and download files from the videoturnstile website
==========================================================================================
'''
class FTPLink():
myLog = None
listDropboxFiles = {}
downloadFiles = ["sites.xml","wmManager.xml","Queue.ini","sensorCheck.xml","PiVT_readLogger.py"]
startpath = "/home/pi/data/"
sHost = "ftpproxy2.one.com"
sUN = "davidleecollins.co.uk"
sPWD = "Halibuf123"
sRootDir = "/VT"
ftp = None
curDir = None # the current directory on the ftp server
VEreply=0 # this is the list of expected VT addresses (as a binary number
bIsRunning = True
bNeedsReboot = False
managerDoc = None
myWatchdog = None
def getNeedsReboot(self):
return self.bNeedsReboot
def getIsRunning(self):
return self.bIsRunning
def setVEreply(self,vereply):
self.VEreply = int(vereply)
def FTPopenLink(self):
try:
ipAddr = socket.gethostbyname(self.sHost)
self.myLog.appendToLog("Connecting to "+self.sHost+ " at "+ipAddr)
self.ftp = ftplib.FTP(ipAddr)
self.ftp.login(self.sUN, self.sPWD)
if not(self.rootDir()):
self.ftp = None
except:
self.myLog.appendToLog("No internet connection")
self.ftp=None
def FTPclose(self):
if (self.ftp != None):
self.myLog.appendToLog("Closing FTP connection")
try:
self.ftp.quit()
except:
pass
self.ftp = None
def FTPterminate(self):
if (self.ftp != None):
self.ftp.close()
def rootDir(self):
try:
self.ftp.cwd(self.sRootDir)
self.curDir = self.sRootDir
return True
except:
self.myLog.appendToLog("FTP command cwd("+self.sRootDir+") failed")
return False
def uploadFile(self, localSpec, specfile):
''' build the directory to the file spec '''
if not(self.rootDir()):
return False
''' reset the watchdog timer '''
if (self.myWatchdog != None):
self.myWatchdog.reset()
sList = specfile.split('/')
# goto the last but one element in the list
n = len(sList)
if (n > 1):
i = 0
while (i < n-1):
sDir = sList[i]
if ((not("." in sDir)) and (len(sDir)> 0)):
self.changeDir(sDir)
i += 1
try:
self.myLog.appendToLog("Transferring: "+localSpec+" to: "+specfile)
''' reset the watchdog timer '''
if (self.myWatchdog != None):
self.myWatchdog.reset()
f = open(localSpec,"rb")
a = "STOR " + sList[n-1]
self.ftp.storbinary (a, f)
''' reset the watchdog timer '''
if (self.myWatchdog != None):
self.myWatchdog.reset()
time.sleep(2)
self.myLog.appendToLog("Transfer complete")
f.close()
''' reset the watchdog timer '''
if (self.myWatchdog != None):
self.myWatchdog.reset()
return True
except:
self.myLog.appendToLog("Failed to transfer "+localSpec)
return False
def downloadFile(self, filename, specfile):
''' build the directory to the file spec '''
if not(self.rootDir()):
return False
''' reset the watchdog timer '''
if (self.myWatchdog != None):
self.myWatchdog.reset()
sList = specfile.split('/')
# goto the last but one element in the list
n = len(sList)
if (n > 1):
i = 0
while (i < n-1):
sDir = sList[i]
if ((not("." in sDir)) and (len(sDir)> 0)):
self.changeDir(sDir)
i += 1
''' does the file exist on the server '''
sName = sList[n-1]
sDirList = self.ftp.nlst()
if not(sName in sDirList):
self.myLog.appendToLog("File "+sName+" not found")
return False
try:
self.myLog.appendToLog("Downloading: "+specfile)
f = open(filename,'wb')
self.ftp.retrbinary('RETR '+sName,f.write)
time.sleep(2)
f.close()
self.ftp.delete(sName)
return True
except:
self.myLog.appendToLog("failed to download "+specfile)
return False
def addToList(self, localSpec, dropboxSpec):
self.listDropboxFiles[localSpec] = dropboxSpec
def changeDir(self,sDir):
if (sDir == ""):
return
try:
self.ftp.cwd(self.curDir+"/"+sDir)
except:
self.ftp.mkd(sDir)
self.ftp.cwd(self.curDir+"/"+sDir)
self.curDir+="/"+sDir
#appendToLog"current FTP server directory: "+self.curDir
def uploadList(self, liFiles):
self.listDropboxFiles = liFiles
#self.client = dropbox.client.DropboxClient(self.access_token)
if (self.ftp == None):
return
self.myLog.appendToLog("uploading .wl files")
n = len(self.listDropboxFiles)
if (n == 0):
return
for localSpec in self.listDropboxFiles.keys():
if (('.wl' in localSpec) or ('.ew7' in localSpec) or ('.txt' in localSpec)):
basename, extension = localSpec.split(".")
if ((extension == "wl") or (extension == "ew7") or (extension == "txt")):
bDone = self.uploadFile(localSpec,self.listDropboxFiles[localSpec])
if (bDone):
''' delete the dictionary entry '''
del self.listDropboxFiles[localSpec]
''' delete the local file TODO if required'''
self.myLog.appendToLog("uploading .vvd and .vvs files")
n = len(self.listDropboxFiles)
if (n == 0):
return
for localSpec in self.listDropboxFiles.keys():
if ('.vv' in localSpec):
basename, extension = localSpec.split(".")
if extension == "vvs" or extension == "vvd":
bDone = self.uploadFile(localSpec,self.listDropboxFiles[localSpec])
if (bDone):
''' delete the dictionary entry '''
del self.listDropboxFiles[localSpec]
''' delete the local file '''
''' TODO '''
def getFileList(self):
return self.listDropboxFiles
def downloadList(self,myManager, mySites):
''' While we are connected try to download new setup files
These come from /VT/customer/downloads/prefix/
File names are:
sites.xml
wmManager.xml
Queue.ini
sensorCheck.xml
PiVT_readLogger.py
'''
if (self.ftp == None):
self.myLog.appendToLog('downloadList - No connection to FTP server')
return
sPrefix = mySites.getPrefix()
sCustomer = myManager.getCustomer()
dropboxDir ="/downloads/"+sCustomer+"/"+sPrefix+"/"
localDir = myManager.getSetupsDir()
self.bIsRunning = True # because we must be running to have got here
self.bNeedsReboot =False
try:
for s in self.downloadFiles:
if (self.downloadFile(localDir+s, dropboxDir+s)):
if (s == "wmManager.xml"):
self.bIsRunning = False # restart program
self.bNeedReboot = True
if (s == "sites.xml"):
self.bIsRunning = False
self.bNeedReboot = False
if (s == "Queue.ini"):
self.bIsRunning = False
self.bNeedReboot = False
if (s == "sensorCheck.xml"):
self.bIsRunning = False
self.bNeedReboot = False
if (s == "PiVT_readLogger.py"):
self.bIsRunning = False
self.bNeedReboot = True
''' and now try to get new settings file for each of the attached VTs
The file name will be:
VTS_Pprefix_L0_VTx.xml where x is the VT address
'''
''' TODO - test this'''
myVT = vtsetter.setVT()
sBase = dropboxDir+"VTS_P"+sPrefix+"_L0_VT"
nVE = self.VEreply # we have previously read the expected VT list (inreadBasicData)
nVal = 256*128
iAddr = 15
while (nVal >= 1):
if (nVE >= nVal):
''' This address should exist - so try to download a new settings file '''
sFTPfile = sBase+str(iAddr)+".xml"
if (self.downloadFile(localDir+"newVT.xml", sFTPfile)):
myVT.getVTsettings(localDir+"newVT.xml")
self.myLog.appendToLog("New settings received for VT: "+str(iAddr))
myVT.sendSettingsToVT(self.myComms, iAddr)
nVE = nVE - nVal
nVal = nVal / 2
iAddr = iAddr - 1
except:
self.myLog.appendToLog("Error downloading files")
def __init__(self, loggingInstance, wmManagerDoc, wd):
self.myLog = loggingInstance
self.managerDoc = wmManagerDoc
self.myWatchdog = wd
self.sHost = self.managerDoc.getFTPhost()
self.sUN = self.managerDoc.getFTPuser()
self.sPWD = self.managerDoc.getFTPpassword()
self.sRootDir = self.managerDoc.getFTProotdir()
'''
CLASSES UDPecho and TCPecho provide the two echo servers
to allow VT command to be received from a PC running VT7viewer, passed on
to the VT system, and the replies passed back
'''
class UDPecho(threading.Thread):
sIn = None
sClientAddr = ""
TCP_IP = ""
bNeeded = True
def setClientAddr(self, sAddr):
self.sClientAddr = sAddr
def run(self):
HOST=''
buf = 1024
nVTUDPport = 47473
nClientPort = 47473
'''
we will
loop forever, restarting the socket as a listener
so if one remote pc disconnects we will be ready for another connection
Once a connection has been established, we loop forever receiving commands and forwarding
them to the VT logger, and then returning the reply
'''
try:
self.sIn = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sIn.bind(('',nVTUDPport))
except:
return
while self.bNeeded:
stat = self.sIn.recvfrom(buf)
data, raddr = stat
remaddr, remport = raddr
if data:
#print ("udp packet from "+remaddr)
#print ("Addresses: sClientAddr "+self.sClientAddr+" TCP_IP "+self.TCP_IP)
if (remaddr == self.sClientAddr):
nClientPort = remport
sForward = (self.TCP_IP, nVTUDPport)
else:
sForward = (self.sClientAddr,nClientPort)
#print "bytes recvd: "+str(len(data)), data
if not(sForward == None):
if not(sForward[0]==''):
#print ("forwarding to: "+sForward[0])
stat = self.sIn.sendto(data,sForward)
#print "bytes forwarded: "+str(stat)
if not(self.sIn == None):
self.sIn.shutdown(socket.SHUT_RDWR)
self.sIn.close()
self.sIn = None
def close(self):
self.bNeeded = False
def __init__(self, sVTsystemAddr):
threading.Thread.__init__(self)
self.TCP_IP = sVTsystemAddr
class TCPecho(threading.Thread):
myLog = None
VTPort = 47481
VTmonitor = None
thisUDP = None
bIsRunning = True
mySelf = None
VTPort = 47481
def setComms(self, commsLink):
self.VTmonitor = commsLink
self.myLog.appendToLog("TCP echo server started on port: "+str(self.VTPort))
def close(self):
self.bIsRunning = False
def run(self):
nCommsStart = 0
HOST=''
'''
we will loop forever, restarting the socket as a listener
so if one remote pc disconnects we will be ready for another connection
Once a connection has been established, we loop forever receiving commands and forwarding
them to the VT logger, and then returning the reply
'''
'''
Open a udp echo server to send udp packets from the VT onto the connected host
'''
thisUDP = UDPecho(self.VTmonitor.getCommsIP())
thisUDP.start()
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR, 1)
s.bind((HOST, self.VTPort))
except:
return
while self.bIsRunning:
s.listen(0)
conn, addr = s.accept()
self.sClientAddr, remport = addr
self.myLog.appendToLog("TCP echo server connection opened by "+self.sClientAddr)
thisUDP.setClientAddr(self.sClientAddr)
nCommsStart = time.time()
'''
create a comms link to the VT system
'''
if not(self.VTmonitor.getIsBusy()):
self.VTmonitor.setIsBusy(True)
bInUse = True
try:
'''
Get data message from the connected host and send it to the VT
'''
while bInUse:
data = conn.recv(1024)
if data:
#print "recvd message: "+data
''' does the message contain the stream command ?
if so we will forward it as a UDP message
'''
''' now forward the rest through VTcomms '''
reply = self.VTmonitor.VTforward(data)
#print "reply from VT: "+reply
#print "reply bytes: "+str(len(reply)) + reply
n = conn.send(reply)
#print "TCP reply length: "+str(n)
nCommsStart = time.time()
if ((time.time() - nCommsStart) > 60):
self.myLog.appendToLog("TCPecho timeout - releasing comms")
self.VTmonitor.setIsBusy(False)
bInUse = False
except socket.error:
self.myLog.appendToLog("TCP echo error")
self.VTmonitor.setIsBusy(False)
bInUse = False
conn.close()
self.VTmonitor.setIsBusy(False)
thisUDP.close()
#s.shutdown(socket.SHUT_RDWR)
s.close()
#appendToLog("TCP echo comms released")
def __init__(self, loggingInstance, commsLink):
threading.Thread.__init__(self)
self.myLog = loggingInstance
self.setComms(commsLink)
'''
======================================================================================
CLASS QMESSENGER
This emails a message in text
and a list of files in attach
to the videoturnstile technical support using gmail
======================================================================================
'''
class VTmailer():
managerDoc = None
def mail(self, to, subject, text, attach):
gmail_user = "grahamad.collins@gmail.com"
gmail_pwd = "willow17"
msg = MIMEMultipart()
if not(self.managerDoc == None ):
gmail_user = self.managerDoc.getMailFrom()
gmail_pwd = self.managerDoc.getMailPassword()
msg['From'] = gmail_user
msg['To'] = to
msg['Subject'] = subject
msg.attach(MIMEText(text))
if (attach != ""):
part = MIMEBase('application', 'octet-stream')
part.set_payload(open(attach,'rb').read())
Encoders.encode_base64(part)
part.add_header('Content-Disposition',
'attachment; filename="%s"'% os.path.basename(attach))
msg.attach(part)
try:
mailServer = smtplib.SMTP("smtp.gmail.com",587)
mailServer.ehlo()
mailServer.starttls()
mailServer.ehlo()
mailServer.login(gmail_user,gmail_pwd)
mailServer.sendmail(gmail_user,to,msg.as_string())
mailServer.close()
except:
#print "Cannot contact mail server"
return
def __init__(self, wmManagerDoc):
self.managerDoc = wmManagerDoc
'''
======================================================================================
CLASS FORMMAILER
builds and sends an html form by the POST method
======================================================================================
'''
class formMailer(object):
''' Destination for submitting the form '''
sPostAddress = "http://www.davidleecollins.co.uk/POSTfile.php"
'''Accumulate the data to be used when posting a form.'''
def __init__(self):
self.form_fields = []
self.files = []
self.boundary = mimetools.choose_boundary()
return
def get_content_type(self):
return 'multipart/form-data; boundary=%s' % self.boundary
def add_field(self, name, value):
"""Add a simple field to the form data."""
self.form_fields.append((name, value))
return
def add_file(self, fieldname, filename, mimetype=None):
"""Add a file to be uploaded."""
try:
f = open(filename)
except:
#print "could not open file: "+filename
return
body = f.read()
if mimetype is None:
mimetype = mimetypes.guess_type(filename)[0] or 'application/octet-stream'
self.files.append((fieldname, filename, mimetype, body))
return
def __str__(self):
"""Return a string representing the form data, including attached files."""
# Build a list of lists, each containing "lines" of the
# request. Each part is separated by a boundary string.
# Once the list is built, return a string where each
# line is separated by '\r\n'.
#print ("Fields:")
#print self.form_fields
parts = []
part_boundary = '--' + self.boundary
# Add the form fields
parts.extend(
[ part_boundary,
'Content-Disposition: form-data; name="%s"' % name,
'',
value,
]
for name, value in self.form_fields
)
#print "parts:"
#print parts
# Add the files to upload
'''
parts.extend(
[ part_boundary,
'Content-Disposition: file; name="%s"; filename="%s"' % \
(field_name, filename),
'Content-Type: %s' % content_type,
'',
body,
]
for field_name, filename, content_type, body in self.files
)
'''
# Flatten the list and add closing boundary marker,
# then return CR+LF separated data
flattened = list(itertools.chain(*parts))
flattened.append('--' + self.boundary + '--')
flattened.append('')
s = '\r\n'.join(flattened)
return s
def addFieldList(self,dictFields):
for sKey in dictFields.keys():
self.add_field(sKey,dictFields[sKey])
def addFile(self, sFileSpec):
self.add_file("file", sFileSpec,"text/plain")
def sendForm(self):
# Build the request
request = urllib2.Request(self.sPostAddress)
request.add_header('User-agent', 'PiVoT (http://www.windmill.co.uk)')
body = str(self)
request.add_header('Content-type', self.get_content_type())
request.add_header('Content-length', len(body))
request.add_data(body)
#print
#print 'OUTGOING DATA:'
#print request.get_data()
#print
#print 'SERVER RESPONSE:'
try:
s = urllib2.urlopen(request).read()
request = None
return True
except:
#print "Error sending form"
request = None
return False
class QHandler(BaseHTTPRequestHandler):
myQlogger = None
def do_GET(self):
try:
sVTmessage = self.myQlogger.getHTMLpage()
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(sVTmessage)
return
except IOError:
self.send_error(404,'File not found: %s' % self.path)
def do_POST(self):
try:
ctype, pdict = cgi.parse_header(self.headers.getheader('content-type'))
if ctype == 'multipart/form-data':
query = cgi.parse_multipart(self.rfile, pdict)
self.send_response(301)
self.end_headers()
upfilecontent = query.get('upfile')
#print "filecontent", upfilecontent[0]
self.wfile.write(",HTML>POST OK.<BR><BR>");
self.wfile.write(upfilecontent[0]);
except:
pass
def __init__(self, thisLogger):
self.myQLogger = thisLogger
class Qserver(threading.Thread):
server = None
Qlogger = None
def run(self):
try:
self.server = HTTPServer(('',80), QHandler(self.Qlogger))
#print 'started httpserver...'
except:
#print 'Unable to create server'
return
try:
self.server.serve_forever()
except KeyboardInterrupt:
#print '^C received, closing down server'
bRunning = 0
self.server.socket.close()
def stop(self):
self.server.socket.close()
def __init__(self,thisLogger):
threading.Thread.__init__(self)
self.Qlogger = thisLogger
|
dcollins31/RSPiVT_EchoServer
|
inetcomms.py
|
Python
|
gpl-2.0
| 25,027
|
[
"Brian"
] |
062a6d7f6b360215bd6b06328aa0101a99c004a014cd6c1ec1ac0bec6295a9a2
|
# This function returns the TSV file connected to a given FASTQ file
def getTSV(wildcards):
localFastq =''
if wildcards.fastq[-3:] == '_R1' or wildcards.fastq[-3:] == '-R1' or wildcards.fastq[-3:] == '_R2' or wildcards.fastq[-3:] == '-R2':
localFastq = wildcards.fastq[0:-3]
else:
localFastq = wildcards.fastq
return FASTQDIR + wildcards.sample + '/PAIREDEND/' + localFastq + '.tsv'
# This function extracts the information to create the read-group identifier
# given to the read mappers
def createReadGroup(wildcards):
out = [] # list containing flowcellID, lane, library, platform in that order
localFastq =''
if wildcards.fastq[-3:] == '_R1' or wildcards.fastq[-3:] == '-R1' or wildcards.fastq[-3:] == '_R2' or wildcards.fastq[-3:] == '-R2':
localFastq = wildcards.fastq[0:-3]
else:
localFastq = wildcards.fastq
tsv = open(FASTQDIR+ wildcards.sample + '/PAIREDEND/' + localFastq + '.tsv')
flowcellID = ''
lane = ''
library = ''
platform = ''
for line in tsv:
if line.strip().startswith('RUN_NAME_FOLDER'):
if flowcellID == '':
flowcellID = line.strip().split('\t')[1]
elif line.strip().startswith('LANE_NUMBER'):
if lane == '':
lane = line.strip().split('\t')[1]
elif line.strip().startswith('SAMPLE_CODE'):
if library == '':
library = line.strip().split('\t')[1]
elif line.strip().startswith('SAMPLE_TYPE'):
if platform == '':
platform = line.strip().split('\t')[1]
tsv.close()
out.append(flowcellID)
out.append(lane)
out.append(library)
out.append(platform)
return out
# This function creates the read-group entry for bowtie2
def createReadGroupBowtie2(wildcards):
values = createReadGroup(wildcards)
return '--rg-id ' + wildcards.sample + '.' + values[0] + '.' + values[1] + ' --rg LB:' + values[2] + ' --rg PL:' + values[3] + ' --rg PU:' + values[0] + '.' + values[1] + '.' + values[2] + ' --rg SM:' + wildcards.sample
# This function creates the read-group entry for yara
def createReadGroupYara(wildcards):
values = createReadGroup(wildcards)
return '\'@RG\\tID:' + wildcards.sample + '.' + values[0] + '.' + values[1] + '\\tLB:' + values[2] + '\\tPL:' + values[3] + '\\tPU:' + values[0] + '.' + values[1] + '.' + values[2] + '\\tSM:' + wildcards.sample + '\''
# This function creates the read-group entry for bwa
def createReadGroupBwa(wildcards):
values = createReadGroup(wildcards)
return '\'@RG\\tID:' + wildcards.sample + '.' + values[0] + '.' + values[1] + '\\tLB:' + values[2] + '\\tPL:' + values[3] + '\\tPU:' + values[0] + '.' + values[1] + '.' + values[2] + '\\tSM:' + wildcards.sample + '\''
# This rule aligns unpaired reads using bowtie2
if not 'BOWTIEIN' in globals():
BOWTIEIN = FASTQDIR
if not 'BOWTIEOUT' in globals():
BOWTIEOUT = OUTDIR + 'bowtie2_out/'
rule bowtie2_single:
input:
fastq = BOWTIEIN + '{sample}/PAIREDEND/ORPHAN/{fastq}.fastq.gz',
index1 = config['resources'][ORGANISM]['bowtie2Index'] + '.1.bt2',
index2 = config['resources'][ORGANISM]['bowtie2Index'] + '.2.bt2',
index3 = config['resources'][ORGANISM]['bowtie2Index'] + '.3.bt2',
index4 = config['resources'][ORGANISM]['bowtie2Index'] + '.4.bt2',
index5 = config['resources'][ORGANISM]['bowtie2Index'] + '.rev.1.bt2',
index6 = config['resources'][ORGANISM]['bowtie2Index'] + '.rev.2.bt2',
tsv = getTSV,
output:
bam=temp(BOWTIEOUT + '{sample}/PAIREDEND/ORPHAN/{fastq}.bam'),
params:
sensitivity = config['tools']['bowtie2']['single']['sensitivity'],
k = config['tools']['bowtie2']['single']['k'],
phred = config['tools']['bowtie2']['single']['phred'],
lsfoutfile = BOWTIEOUT + '{sample}/PAIREDEND/ORPHAN/{fastq}.bam.lsfout.log',
lsferrfile = BOWTIEOUT + '{sample}/PAIREDEND/ORPHAN/{fastq}.bam.lsferr.log',
scratch = config['tools']['bowtie2']['scratch'],
mem = config['tools']['bowtie2']['mem'],
time = config['tools']['bowtie2']['time'],
index = config['resources'][ORGANISM]['bowtie2Index'],
params = config['tools']['bowtie2']['params'],
rg = createReadGroupBowtie2
benchmark:
BOWTIEOUT + '{sample}/PAIREDEND/ORPHAN/{fastq}.bam.benchmark'
threads:
config['tools']['bowtie2']['threads']
log:
BOWTIEOUT + '{sample}/PAIREDEND/ORPHAN/{fastq}.bam.log'
shell:
('{config[tools][bowtie2][call]} ' +
'-x {params.index} ' +
'-U {input.fastq} ' +
'-k {params.k} ' +
'{params.phred} ' +
'-p {threads} ' +
'{params.sensitivity} ' +
'{params.rg} ' +
'{params.params} ' +
'2> {log} | ' +
'{config[tools][samtools][call]} view -bhS - >{output.bam}')
# This rule aligns paired reads using bowtie2
rule bowtie2_paired:
input:
fastqR1 = BOWTIEIN + '{sample}/PAIREDEND/{fastq}_R1.fastq.gz',
fastqR2 = BOWTIEIN + '{sample}/PAIREDEND/{fastq}_R2.fastq.gz',
index1 = config['resources'][ORGANISM]['bowtie2Index'] + '.1.bt2',
index2 = config['resources'][ORGANISM]['bowtie2Index'] + '.2.bt2',
index3 = config['resources'][ORGANISM]['bowtie2Index'] + '.3.bt2',
index4 = config['resources'][ORGANISM]['bowtie2Index'] + '.4.bt2',
index5 = config['resources'][ORGANISM]['bowtie2Index'] + '.rev.1.bt2',
index6 = config['resources'][ORGANISM]['bowtie2Index'] + '.rev.2.bt2',
tsv = getTSV,
output:
bam=temp(BOWTIEOUT + '{sample}/PAIREDEND/{fastq}.bam'),
params:
sensitivity = config['tools']['bowtie2']['single']['sensitivity'],
k = config['tools']['bowtie2']['single']['k'],
phred = config['tools']['bowtie2']['single']['phred'],
lsfoutfile = BOWTIEOUT + '{sample}/PAIREDEND/{fastq}.bam.lsfout.log',
lsferrfile = BOWTIEOUT + '{sample}/PAIREDEND/{fastq}.bam.lsferr.log',
scratch = config['tools']['bowtie2']['scratch'],
mem = config['tools']['bowtie2']['mem'],
time = config['tools']['bowtie2']['time'],
index = config['resources'][ORGANISM]['bowtie2Index'],
params = config['tools']['bowtie2']['params'],
rg = createReadGroupBowtie2
benchmark:
BOWTIEOUT + '{sample}/PAIREDEND/{fastq}.bam.benchmark'
threads:
config['tools']['bowtie2']['threads']
log:
BOWTIEOUT + '{sample}/PAIREDEND/{fastq}.bam.log'
shell:
('{config[tools][bowtie2][call]} ' +
'-x {params.index} ' +
'-1 {input.fastqR1} ' +
'-2 {input.fastqR2} ' +
'-k {params.k} ' +
'{params.phred} ' +
'-p {threads} ' +
'{params.sensitivity} ' +
'{params.rg} ' +
'{params.params} ' +
'2> {log} | ' +
'{config[tools][samtools][call]} view -bhS - >{output.bam}')
# This rule aligns unpaired reads using bwa-mem
if not 'BWAIN' in globals():
BWAIN = FASTQDIR
if not 'BWAOUT' in globals():
BWAOUT = OUTDIR + 'bwa/'
rule bwa_mem_single:
input:
fastq = BWAIN + '{sample}/PAIREDEND/ORPHAN/{fastq}.fastq.gz',
index1 = config['resources'][ORGANISM]['bwaIndex'] + '.amb',
index2 = config['resources'][ORGANISM]['bwaIndex'] + '.ann',
index3 = config['resources'][ORGANISM]['bwaIndex'] + '.bwt',
index4 = config['resources'][ORGANISM]['bwaIndex'] + '.pac',
index5 = config['resources'][ORGANISM]['bwaIndex'] + '.sa',
tsv = getTSV
output:
bam=temp(BWAOUT + '{sample}/PAIREDEND/ORPHAN/{fastq}.bam')
params:
lsfoutfile = BWAOUT + '{sample}/PAIREDEND/ORPHAN/{fastq}.bam.lsfout.log',
lsferrfile = BWAOUT + '{sample}/PAIREDEND/ORPHAN/{fastq}.bam.lsferr.log',
scratch = config['tools']['bwa']['mem']['scratch'],
mem = config['tools']['bwa']['mem']['memory'],
time = config['tools']['bwa']['mem']['time'],
index = config['resources'][ORGANISM]['bwaIndex'],
params = config['tools']['bwa']['mem']['params'],
rg = createReadGroupBwa
benchmark:
BWAOUT + '{sample}/PAIREDEND/ORPHAN/{fastq}.bam.benchmark'
threads:
config['tools']['bwa']['mem']['threads']
log:
BWAOUT + '{sample}/PAIREDEND/ORPHAN/{fastq}.bam.log'
shell:
('{config[tools][bwa][mem][call]} ' +
'{params.params} ' +
'-R {params.rg} ' +
'-t {threads} ' +
'{params.index} ' +
'{input.fastq} ' +
'2>{log} | {config[tools][samtools][call]} view -bhS - > {output.bam}')
# This rule aligns unpaired reads using bwa-mem
rule bwa_mem_paired:
input:
fastqR1 = BWAIN + '{sample}/PAIREDEND/{fastq}_R1.fastq.gz',
fastqR2 = BWAIN + '{sample}/PAIREDEND/{fastq}_R2.fastq.gz',
index1 = config['resources'][ORGANISM]['bwaIndex'] + '.amb',
index2 = config['resources'][ORGANISM]['bwaIndex'] + '.ann',
index3 = config['resources'][ORGANISM]['bwaIndex'] + '.bwt',
index4 = config['resources'][ORGANISM]['bwaIndex'] + '.pac',
index5 = config['resources'][ORGANISM]['bwaIndex'] + '.sa',
tsv = getTSV
output:
bam=temp(BWAOUT + '{sample}/PAIREDEND/{fastq}.bam')
params:
lsfoutfile = BWAOUT + '{sample}/PAIREDEND/{fastq}.bam.lsfout.log',
lsferrfile = BWAOUT + '{sample}/PAIREDEND/{fastq}.bam.lsferr.log',
scratch = config['tools']['bwa']['mem']['scratch'],
mem = config['tools']['bwa']['mem']['memory'],
time = config['tools']['bwa']['mem']['time'],
index = config['resources'][ORGANISM]['bwaIndex'],
params = config['tools']['bwa']['mem']['params'],
rg = createReadGroupBwa
benchmark:
BWAOUT + '{sample}/PAIREDEND/{fastq}.bam.benchmark'
threads:
config['tools']['bwa']['mem']['threads']
log:
BWAOUT + '{sample}/PAIREDEND/{fastq}.bam.log'
shell:
('{config[tools][bwa][mem][call]} ' +
'{params.params} ' +
'-R {params.rg} ' +
'-t {threads} ' +
'{params.index} ' +
'{input.fastqR1} ' +
'{input.fastqR2} ' +
'2>{log}| {config[tools][samtools][call]} view -bhS - > {output.bam}')
if not 'BWAALNIN' in globals():
BWAALNIN = FASTQDIR
if not 'BWAALNOUT' in globals():
BWAALNOUT = OUTDIR + 'bwa_aln/'
rule bwa_aln:
input:
fastq = BWAALNIN + '{sample}/PAIREDEND/{fastq}.fastq.gz',
index1 = config['resources'][ORGANISM]['bwaIndex'] + '.amb',
index2 = config['resources'][ORGANISM]['bwaIndex'] + '.ann',
index3 = config['resources'][ORGANISM]['bwaIndex'] + '.bwt',
index4 = config['resources'][ORGANISM]['bwaIndex'] + '.pac',
index5 = config['resources'][ORGANISM]['bwaIndex'] + '.sa',
output:
sai = temp(BWAALNOUT + '{sample}/PAIREDEND/{fastq}.sai')
params:
lsfoutfile = BWAALNOUT + '{sample}/PAIREDEND/{fastq}.sai.lsfout.log',
lsferrfile = BWAALNOUT + '{sample}/PAIREDEND/{fastq}.sai.lsferr.log',
scratch = config['tools']['bwa']['aln']['scratch'],
mem = config['tools']['bwa']['aln']['memory'],
time = config['tools']['bwa']['aln']['time'],
index = config['resources'][ORGANISM]['bwaIndex'],
params = config['tools']['bwa']['aln']['params']
benchmark:
BWAALNOUT + '{sample}/PAIREDEND/{fastq}.sai.benchmark'
threads:
config['tools']['bwa']['aln']['threads']
log:
BWAALNOUT + '{sample}/PAIREDEND/{fastq}.sai.log'
shell:
('{config[tools][bwa][aln][call]} ' +
'{params.params} ' +
'-t {threads} ' +
'{params.index} ' +
'{input.fastq} ' +
'2>{log}| > {output.sai}')
rule bwa_sampe:
input:
sai1 = BWAALNOUT + '{sample}/PAIREDEND/{fastq}_R1.sai',
fastq_R1 = BWAALNIN + '{sample}/PAIREDEND/{fastq}_R1.fastq.gz',
sai2 = BWAALNOUT + '{sample}/PAIREDEND/{fastq}_R2.sai',
fastq_R2 = BWAALNIN + '{sample}/PAIREDEND/{fastq}_R2.fastq.gz',
tsv = getTSV
output:
bam=temp(BWAALNOUT + '{sample}/PAIREDEND/{fastq}.bam')
params:
lsfoutfile = BWAALNOUT + '{sample}/PAIREDEND/{fastq}.bam.lsfout.log',
lsferrfile = BWAALNOUT + '{sample}/PAIREDEND/{fastq}.bam.lsferr.log',
scratch = config['tools']['bwa']['sampe']['scratch'],
mem = config['tools']['bwa']['sampe']['mem'],
time = config['tools']['bwa']['sampe']['time'],
index = config['resources'][ORGANISM]['bwaIndex'],
params = config['tools']['bwa']['sampe']['params'],
rg = createReadGroupBwa
benchmark:
BWAALNOUT + '{sample}/PAIREDEND/{fastq}.bam.benchmark'
threads:
config['tools']['bwa']['sampe']['threads']
log:
BWAALNOUT + '{sample}/PAIREDEND/{fastq}.bam.log'
shell:
('{config[tools][bwa][sampe][call]} ' +
'-r {params.rg} ' +
'{params.params} ' +
'{params.index} ' +
'{input.sai1} ' +
'{input.sai2} ' +
'{input.fastq_R1} ' +
'{input.fastq_R2} ' +
'2>{log}| {config[tools][samtools][call]} view -bhS - > {output.bam}')
# This rule aligns unpaired reads using yara
if not 'YARAIN' in globals():
YARAIN = OUTDIR + '.yara_in'
if not 'YARAOUT' in globals():
YARAOUT = OUTDIR + '.yara_out'
rule yara_single:
input:
fastq = YARAIN + '{sample}/PAIREDEND/{fastq}_UNPAIRED.fastq.gz',
index1 = config['resources'][ORGANISM]['yaraIndex'] + '.lf.drp',
index2 = config['resources'][ORGANISM]['yaraIndex'] + '.lf.drs',
index3 = config['resources'][ORGANISM]['yaraIndex'] + '.lf.drv',
index4 = config['resources'][ORGANISM]['yaraIndex'] + '.lf.pst',
index5 = config['resources'][ORGANISM]['yaraIndex'] + '.rid.concat',
index6 = config['resources'][ORGANISM]['yaraIndex'] + '.rid.limits',
index7 = config['resources'][ORGANISM]['yaraIndex'] + '.sa.ind',
index8 = config['resources'][ORGANISM]['yaraIndex'] + '.sa.len',
index9 = config['resources'][ORGANISM]['yaraIndex'] + '.sa.val',
index10 = config['resources'][ORGANISM]['yaraIndex'] + '.txt.concat',
index11 = config['resources'][ORGANISM]['yaraIndex'] + '.txt.limits',
index12 = config['resources'][ORGANISM]['yaraIndex'] + '.txt.size',
tsv = getTSV
output:
bam=temp(YARAOUT + '{sample}/PAIREDEND/{fastq}_PAIRED.bam'),
params:
lsfoutfile = YARAOUT + '{sample}/PAIREDEND/{fastq}_PAIRED.bam.lsfout.log',
lsferrfile = YARAOUT + '{sample}/PAIREDEND/{fastq}_PAIRED.bam.lsferr.log',
scratch = config['tools']['yara']['scratch'],
mem = config['tools']['yara']['mem'],
time = config['tools']['yara']['time'],
index = config['resources'][ORGANISM]['yaraIndex'],
params = config['tools']['yara']['params'],
rg = createReadGroupYara
benchmark:
YARAOUT + '{sample}/PAIREDEND/{fastq}_PAIRED.bam.benchmark'
threads:
config['tools']['yara']['threads']
log:
YARAOUT + '{sample}/PAIREDEND/{fastq}_PAIRED.bam.log'
shell:
('{config[tools][yara][call]} ' +
'{params.params} ' +
'-rg {params.rg} ' +
'-o {output.bam} ' +
'-e config[tools][yara][paired][error-rate] ' +
'-s config[tools][yara][paired][strata-rate] ' +
'-t {threads} ' +
'{input.fastq} ' +
'2> {log}')
# This rule aligns paired reads using yara
rule yara_paired:
input:
fastqR1 = YARAIN + '{sample}/PAIREDEND/{fastq}_R1_PAIRED.fastq.gz',
fastqR2 = YARAIN + '{sample}/PAIREDEND/{fastq}_R2_PAIRED.fastq.gz',
index1 = config['resources'][ORGANISM]['yaraIndex'] + '.lf.drp',
index2 = config['resources'][ORGANISM]['yaraIndex'] + '.lf.drs',
index3 = config['resources'][ORGANISM]['yaraIndex'] + '.lf.drv',
index4 = config['resources'][ORGANISM]['yaraIndex'] + '.lf.pst',
index5 = config['resources'][ORGANISM]['yaraIndex'] + '.rid.concat',
index6 = config['resources'][ORGANISM]['yaraIndex'] + '.rid.limits',
index7 = config['resources'][ORGANISM]['yaraIndex'] + '.sa.ind',
index8 = config['resources'][ORGANISM]['yaraIndex'] + '.sa.len',
index9 = config['resources'][ORGANISM]['yaraIndex'] + '.sa.val',
index10 = config['resources'][ORGANISM]['yaraIndex'] + '.txt.concat',
index11 = config['resources'][ORGANISM]['yaraIndex'] + '.txt.limits',
index12 = config['resources'][ORGANISM]['yaraIndex'] + '.txt.size',
tsv = getTSV
output:
bam=temp(YARAOUT + '{sample}/PAIREDEND/{fastq}_PAIRED.bam'),
params:
lsfoutfile = YARAOUT + '{sample}/PAIREDEND/{fastq}_PAIRED.bam.lsfout.log',
lsferrfile = YARAOUT + '{sample}/PAIREDEND/{fastq}_PAIRED.bam.lsferr.log',
scratch = config['tools']['yara']['scratch'],
mem = config['tools']['yara']['mem'],
time = config['tools']['yara']['time'],
index = config['resources'][ORGANISM]['yaraIndex'],
params = config['tools']['yara']['params'],
rg = createReadGroupYara
benchmark:
YARAOUT + '{sample}/PAIREDEND/{fastq}_PAIRED.bam.benchmark'
threads:
config['tools']['yara']['threads']
log:
YARAOUT + '{sample}/PAIREDEND/{fastq}_PAIRED.bam.log'
shell:
('{config[tools][yara][call]} ' +
'{params.params} ' +
'-rg {params.rg} ' +
'-o {output.bam} ' +
'-e config[tools][yara][paired][error-rate] ' +
'-s config[tools][yara][paired][strata-rate] ' +
'-t {threads} ' +
'{input.fastqR1} ' +
'{input.fastqR2} ' +
'2> {log}')
# This rule aligns paired reads using soap
if not 'SOAPIN' in globals():
SOAPIN = OUTDIR + '.yara_in'
if not 'SOAPOUT' in globals():
SOAPOUT = OUTDIR + '.soap_out'
rule soap_paired:
input:
fastqR1 = SOAPIN + '{sample}/PAIREDEND/{fastq}_R1.fastq',
fastqR2 = SOAPIN + '{sample}/PAIREDEND/{fastq}_R2.fastq',
index = config['resources'][ORGANISM]['soapIndex'] + '.bwt'
output:
txt=temp(SOAPOUT + '{sample}/PAIREDEND/{fastq}.txt'),
txtSE=temp(SOAPOUT + '{sample}/PAIREDEND/{fastq}_SE.txt')
params:
lsfoutfile = SOAPOUT + '{sample}/PAIREDEND/{fastq}.txt.lsfout.log',
lsferrfile = SOAPOUT + '{sample}/PAIREDEND/{fastq}.txt.lsferr.log',
scratch = config['tools']['soap']['scratch'],
mem = config['tools']['soap']['memory'],
time = config['tools']['soap']['time'],
params = config['tools']['soap']['params'],
index = config['resources'][ORGANISM]['soapIndex']
benchmark:
SOAPOUT + '{sample}/PAIREDEND/{fastq}.txt.benchmark'
threads:
config['tools']['soap']['threads']
log:
SOAPOUT + '{sample}/PAIREDEND/{fastq}.txt.log'
shell:
('{config[tools][soap][call]} ' +
'-a {input.fastqR1} ' +
'-b {input.fastqR2} ' +
'-D {params.index} ' +
'-o {output.txt} ' +
'-2 {output.txtSE} ' +
'-p {threads} ' +
'{params.params} ')
# This rule aligns unpaired reads using bwa-mem
rule soap2bam:
input:
txt = SOAPOUT + '{sample}/PAIREDEND/{fastq}.txt',
fai = config['resources'][ORGANISM]['referenceFai']
output:
bam=temp(SOAPOUT + '{sample}/PAIREDEND/{fastq}.bam')
params:
lsfoutfile = SOAPOUT + '{sample}/PAIREDEND/{fastq}.bam.lsfout.log',
lsferrfile = SOAPOUT + '{sample}/PAIREDEND/{fastq}.bam.lsferr.log',
scratch = config['tools']['soap2sam']['scratch'],
mem = config['tools']['soap2sam']['memory'],
time = config['tools']['soap2sam']['time']
benchmark:
SOAPOUT + '{sample}/PAIREDEND/{fastq}.bam.benchmark'
threads:
config['tools']['soap2sam']['threads']
log:
SOAPOUT + '{sample}/PAIREDEND/{fastq}.bam.log'
shell:
('{config[tools][soap2sam][call]} ' +
'-p ' +
'{input.txt} '
'| {config[tools][samtools][call]} view -bt {input.fai} - >{output.bam}')
|
cbg-ethz/NGS-pipe
|
snake/common/align/align_snake.py
|
Python
|
apache-2.0
| 20,320
|
[
"BWA"
] |
f5816867709b902d1df463d8491ba29f09961393e714a0924920c01e2a2b803d
|
import csv
import heapq
import numpy as np
from ase.tasks.io import read_json
ann_fontsize = 'small'
label_fontsize = 12
def most_common_value(values, precision=None):
import heapq
# find the most common value
if precision is not None:
vs = [round(v, precision) for v in values]
else:
vs = values[:]
if len(vs) > 0:
nlargest = heapq.nlargest(1, [vs.count(e) for e in vs])[0]
index = [vs.count(e) for e in vs].index(nlargest)
v = vs[index]
else:
v = None
return v
def get_differences(reference, values, precision=None):
# diffrences with respect to the reference value within precision
values = values[:]
for n, v in enumerate(values):
if v is not None:
de = v - reference
if precision is not None:
if abs(de) < 1. / precision:
# value within precision
values[n] = 0.0
else:
values[n] = round(de, precision)
else:
values[n] = de
return values
def get_key_summary_list(key, name, runs, data, precision=3, relative=False):
# if relative, then first entry is a common value, other entries relative
# if not relative, then first entry is average value, other entries verbatim
nruns = len(runs)
d = data.copy()
l = [name]
values = [d[r][key] for r in runs if r in d and key in d[r] and d[r][key] is not None]
if relative:
# find the most common value
e = most_common_value(values, precision)
else:
e = np.mean(values)
if e is None:
l.extend(['None' for i in range(nruns + 1)])
else:
# print the found common value with the precision
l.append(("%." + "%df" % (precision + 0)) % e)
es = [d[r].get(key, None) for r in runs if r in d]
if relative:
des = get_differences(e, es)
# and the corresponding differences
for n, de in enumerate(des):
if de is not None:
if abs(de) < 1. / precision:
# value within precision
des[n] = '-'
else:
des[n] = ("%." + "%df" % precision) % de
else:
des[n] = 'None'
l.extend(des)
else:
des = es[:]
for n, de in enumerate(es):
if de is not None:
des[n] = str(de)
else:
des[n] = 'None'
l.extend(des)
return l
def get_key_stats(data, key, nnlargest):
keys = []
values = []
stats = {'absaverage': None,
'average': None,
'std': None,
'max': None,
'N': None,
'nlargest': []}
for k, v in data.iteritems():
if key in v:
if v.get(key) is not None:
keys.append(k)
values.append(v[key])
if len(keys) > 0:
stats['absaverage'] = np.average(abs(np.array(values)))
stats['average'] = np.average(np.array(values))
stats['std'] = np.std(np.array(values))
stats['max'] = np.max(np.array(values))
stats['N'] = len(values)
nlargest = heapq.nlargest(nnlargest, np.abs(np.array(values)))
# already found elements must be removed in order to avoid repetition
k = keys[:]
v = values[:]
nl = []
for i in nlargest:
ind = v.index(i)
nl.append((k[ind], i))
v.pop(ind)
k.pop(ind)
stats['nlargest'] = nl
return stats
def plot_single(xdata, ydata, std,
title,
xlabel, ylabel,
label, color, alpha,
miny, maxy,
num=1,
):
import matplotlib
import pylab
import matplotlib.font_manager
# all goes to figure num
pylab.figure(num=num, figsize=(9.5, 9))
pylab.gca().set_position([0.10, 0.20, 0.85, 0.60])
# let the plot have fixed y-axis scale
ywindow = maxy - miny
pylab.bar(xdata, ydata, 0.9, yerr=std,
label=label, color=color, alpha=alpha)
pylab.gca().set_ylim(miny, maxy)
t = pylab.title(title)
# http://old.nabble.com/More-space-between-title-and-secondary-x-axis-td31722298.html
t.set_y(1.05)
pylab.xlabel(xlabel)
pylab.ylabel(ylabel)
prop = matplotlib.font_manager.FontProperties(size=12)
leg = pylab.legend(loc='upper right', fancybox=True, prop=prop)
leg.get_frame().set_alpha(0.5)
def plot(runs, data,
labels, # labels coresponding to runs
key, # the main quantity to plot
failkey, # the key that informs about failure/success in run
nnlargest,
plot_label,
plot_xlabel,
plot_ylabel,
miny, maxy,
tag=None,
tunit='min'):
# horror function
d = data.copy()
# collect stats for each run
stats = {'average': [],
'std': [],
'sdom': [],
'nlargest': [],
'time': [],
'converged': []}
for n, r in enumerate(runs):
s = get_key_stats(d[r], key, nnlargest)
assert s['average'] is not None, r + ': no key ' + "'" + key + "'" + ' in results?'
stats['average'].append(s['average'])
stats['std'].append(s['std'])
stats['sdom'].append(s['std']/np.sqrt(s['N']))
stats['nlargest'].append(s['nlargest'])
# total run time
t = [d[r][k]['time'] for k in d[r].keys() if 'time' in d[r][k]]
if tunit == 'sec':
stats['time'].append(sum(t)) # sec
elif tunit == 'min':
stats['time'].append(sum(t) / 60) # min
elif tunit == 'h':
stats['time'].append(sum(t) / 3600) # hours
# number of converged systems
c = [d[r][k][failkey] for k in d[r].keys() if failkey in d[r][k]]
stats['converged'].append(len([s for s in c if s is not None]))
import matplotlib
#matplotlib.use('Agg')
from matplotlib import pylab, ticker
num=1
scale = [i for i in range(len(runs))]
# plot skeleton
plot_single(
scale, stats['average'], stats['sdom'],
plot_label,
plot_xlabel,
plot_ylabel,
'Average (avg)',
'blue',
0.5,
miny, maxy,
num=1,
)
zero = [0.0 for v in scale]
pylab.plot(scale, zero, 'k-', label='_nolegend_')
ay1=pylab.gca()
ay1.xaxis.set_ticks([n + 0.5 for n in scale])
ay1.xaxis.set_ticklabels(labels)
ay1.yaxis.set_minor_locator(matplotlib.ticker.AutoMinorLocator())
for label in ay1.get_xticklabels() + ay1.get_yticklabels():
label.set_fontsize(label_fontsize)
# rotate labels http://old.nabble.com/Rotate-x-axes-%28xticks%29-text.-td3141258.html
for label in ay1.get_xticklabels():
label.set_rotation(75)
# plot stats
for n, r in enumerate(runs):
label = ''
for n1, v in enumerate(['average', 'std', 'time']):
l = {
'average': '\navg\n',
'absaverage': '\nabs\n',
'std': '\nstd\n',
'time': '\nt [%s]\n' % tunit,
}[v]
value = {
'average': stats['average'][n],
#'absaverage': stats['absaverage'][n],
'std': stats['std'][n],
'time': stats['time'][n],
}[v]
label += l + ' ' + str(round(value, 1)) + '\n'
label += '\nconv.\n' + ' ' + str(int(stats['converged'][n])) + '\n'
pylab.annotate(label,
xy=(n + 0.0, 0.0),
xytext=(n + 0.2, miny / 2 - 0.5),
arrowprops=None,
horizontalalignment='left',
verticalalignment='center',
fontsize=ann_fontsize,
)
# plot compounds with largest errors
for n, l in enumerate(stats['nlargest']):
l.reverse()
for n1, (c, e) in enumerate(l):
name = c + '\n'
label = name + ' ' + str(int(e))
xytext=(n + 0.3, maxy / 2 + maxy / 10 * n1)
if e > maxy:
# labels exceeding y-scale
xy = xytext
else:
xy=(n + 0.05, e)
pylab.annotate(label,
xy=xy,
xytext=xytext,
arrowprops=dict(width=0.05,
headwidth=5.0,
facecolor='black',
shrink=1.00),
horizontalalignment='left',
verticalalignment='center',
fontsize=ann_fontsize,
)
# axis limits after the last plot!
pylab.ylim(miny, maxy)
kname = key.replace(' ', '_')
if tag is not None:
plotname = '%s_%s.png' % (tag, kname)
else:
plotname = '%s.png' % kname
pylab.savefig(plotname, bbox_inches='tight')
class AnalyseOptimizersTask:
def __init__(self, taskname, runs,
labels=None, tag=None, steps=100, precision=3, tunit='min'):
"""Analyse optimizers runs. """
self.taskname = taskname
self.runs = runs
if labels is None: # run labels for plotting
self.labels = self.runs.split(',')
else:
self.labels = labels
self.tag = tag
self.steps = steps # only for purpose of limit plot y-axis
self.precision = precision
assert tunit in ['sec', 'min', 'h']
self.tunit = tunit
self.key_summary = 'relaxed energy'
self.key_plot = 'optimizer force calls'
self.plot_label = 'Summary of optimizers'
self.plot_xlabel = 'optimizer'
self.plot_ylabel = self.key_plot
def read_run(self, taskname, tag, run):
if tag is not None:
tag += '_%s' % run
else:
tag = run
return read_json(taskname + '-' + tag + '.json')
def get_data(self, taskname, tag, runs):
d = {}
for r in runs:
d[r] = self.read_run(taskname, tag, r)
return d
def analyse(self):
runs = self.runs.split(',')
# dict of systems for each run
datarun = self.get_data(self.taskname, self.tag, runs)
# the number of systems (based on results from json)
nsystems = max([len(datarun[n]) for n in datarun.keys()])
# dict of runs for each system
datasys = {}
for k, v in datarun.iteritems():
for k1, v1 in v.iteritems():
if k1 not in datasys:
datasys[k1] = {k: v1}
else:
datasys[k1].update({k: v1})
# csv summary of self.key_plot
key_name = self.key_plot.replace(' ', '_')
row = ['formula', self.key_plot]
row.extend([r for r in range(len(runs))])
rows = [row]
for name, data in datasys.items():
if not data:
continue
row = get_key_summary_list(self.key_plot,
name,
runs,
data,
precision=self.precision,
relative=False)
row = [r.replace('None', 'N/A') for r in row]
# only failed or non-common runs
for k in row[2:]:
if k == 'N/A' or k != '-':
rows.append(row)
break
if len(rows) > 0: # always create csv file
if self.tag is not None:
csvwriter = csv.writer(
open('%s_%s.csv' % (self.tag, key_name), 'wb'))
else:
csvwriter = csv.writer(open('%s.csv' % key_name, 'wb'))
for r in rows:
csvwriter.writerow(r)
# csv summary of self.key_summary
summary_name = self.key_summary.replace(' ', '_')
row = ['formula', self.key_summary]
row.extend([r for r in range(len(runs))])
rows = [row]
for name, data in datasys.items():
if not data:
continue
row = get_key_summary_list(self.key_summary,
name,
runs,
data,
precision=self.precision,
relative=True)
row = [r.replace('None', 'N/A') for r in row]
# only failed or non-common runs
for k in row[2:]:
if k == 'N/A' or k != '-':
rows.append(row)
break
if len(rows) > 0: # always create csv file
if self.tag is not None:
csvwriter = csv.writer(
open('%s_%s.csv' % (self.tag, summary_name), 'wb'))
else:
csvwriter = csv.writer(open('%s.csv' % summary_name, 'wb'))
for r in rows:
csvwriter.writerow(r)
# plot
maxy = self.steps - 5
miny = - ((maxy + 5) / 2 + 5)
# plot only runs with data
plotruns = runs[:]
labels = self.labels[:]
for n, r in enumerate(runs):
nd = [1 for k in datarun[r] if datarun[r][k]]
if len(nd) == 0:
print 'skipped plotting empty data', r
# no data for this run
i = plotruns.index(r)
plotruns.pop(i)
labels.pop(i)
continue
plot(plotruns, datarun,
labels,
self.key_plot, # the main quantity to plot
self.key_summary,
4,
self.plot_label + ': ' + str(nsystems) + ' systems',
self.plot_xlabel,
self.plot_ylabel,
miny, maxy,
tag=self.tag,
tunit=self.tunit)
class AnalyseSCFTask(AnalyseOptimizersTask):
def __init__(self, taskname, runs,
labels=None, tag=None, steps=100, precision=3, tunit='min'):
"""Analyse SCF runs. """
AnalyseOptimizersTask.__init__(self, taskname, runs,
labels, tag, steps, precision, tunit)
self.key_summary = 'energy'
self.key_plot = 'calculator steps'
self.plot_label = 'Summary of runs'
self.plot_xlabel = 'run'
self.plot_ylabel = self.key_plot
|
grhawk/ASE
|
tools/ase/test/tasks/analyse.py
|
Python
|
gpl-2.0
| 14,783
|
[
"ASE"
] |
7071be4074ed9896d4cffd1a57ec6536c43ce44263ef6deaa13f5c41526b4df5
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
FIRtro 2.0 Módulo de gestión de presets usado por server_process.
Uso informativo en línea de comando:
presets.py [list | preset*] (*: detalla los que coincidan)
"""
# v1.1:
# - Permite indicar "dirac_pulse" en vías full range (en ese caso no existe un pcm real).
# v1.2
# - Gestiona PEQ (EQ paramétrico basado en Ecasound).
# v1.3:
# - Se incorpora la atenuacion y el delay para cada vía.
# v1.3b
# - Se simplifica el archivo presets.ini con el nombre descriptivo del preset en la propia sección INI.
# - Se implementa línea de comandos para búsquedas.
# v1.3c
# - Se deja de usar nc localhost para hablar con Brutefir, ahora usamos el cliente brutefir_cli.py
import brutefir_cli
import subprocess
import os
from sys import path as sys_path, argv as sys_argv, exit as sys_exit
import socket
import ConfigParser
# para conectar solo las salidas de brutefir de las VIAS EN USO a la tarjeta de sonido
import jack
# El módulo "read_brutefir_process.py" proporciona la relación de coeficientes
# cargados en Brutefir y los filtros con los coeficientes que tienen asignados,
# y tambien proporciona el mapeo de salidas de Brutefir en Jack
import read_brutefir_process as brutefir
# Para que funcione la cosa actualizamos las listas:
# brutefir.outputs, brutefir.coeffs, brutefir.filters_at_start y brutefir.filters_running
brutefir.lee_config()
brutefir.lee_running_config()
# modulos del FIRtro:
HOME = os.path.expanduser("~")
sys_path.append(HOME + "/bin")
from basepaths import loudspeaker_folder, config_folder
from getconfig import loudspeaker
from getstatus import drc_eq as drc_status
status = ConfigParser.ConfigParser()
status.read(config_folder + "/status")
# carpeta del altavoz/fs
altavoz_folder = loudspeaker_folder + loudspeaker
# archivo de definición de nuestros presets para este altavoz:
presets = ConfigParser.ConfigParser()
presets.read(altavoz_folder + "/presets.ini")
# recopilador de avisos para printar
avisos = []
def configura_preset(presetID, filter_type):
""" Esta función se encarga de configurar en el proceso brutefir el coeff de los xover
y subwoofer si procede, como se haya definido en el archivo de presets de usuario.
Y devuelve los siguientes valores a server_process para que se actualize en el sistema:
- la descripción del preset
- el numero de drc requerido
- la configuración de PEQ
- el ajuste de balance
"""
global avisos
if presets.has_section(presetID):
avisos += [ "(presets) Ha seleccionado preset: " + presetID ]
viasActivas = []
for opcion in presets.options(presetID):
valor = presets.get(presetID, opcion)
if opcion in ["drc"]:
drc_num = configura_drc_coeff(valor)
if opcion in ["fr", "lo", "mi", "hi", "sw"]:
via = opcion # solo para legibilidad
atten, delay, pcm_name = valor.split()
configura_via_coeff(via, pcm_name, filter_type)
configura_via_atten(via, atten)
configura_via_delay(via, delay)
viasActivas.append(via)
avisos += [ "(presets) Se configura la via " + via + " " + filter_type + ":\t" \
+ atten + "\t" + delay + "\t" + pcm_name ]
if opcion in ["balance"]:
balance = ajusta_balance(valor)
if opcion in ["peq"]:
peq = ajusta_peq(valor)
avisos += ["(presets) Enjoy!"]
# Conectamos a la tarjeta solo las vias definidas en el preset:
conecta_tarjeta(viasActivas)
else:
presetID = "ERROR PRESET NO VALIDO"
drc_num = drc_status
balance = "0"
peq = None
for aviso in avisos:
print aviso
avisos = []
return presetID, drc_num, balance, peq
def busca_preset(presetID):
if presets.has_section(presetID):
return presetID
else:
return None
def ajusta_balance(balance):
# para ajustar el balance hay que hablar con el FIRtro para que quede reflejado en la web y en status
# o sea que esta función no hace nada :-)
return balance
def ajusta_peq(peq):
# para ajustar el PEQ hay que hablar con el FIRtro para que quede reflejado en la web y en status
# o sea que esta función no hace nada :-)
return peq
def configura_drc_coeff(fName):
""" funcion auxiliar para cargar el drc del preset seleccionado
"""
global avisos
# El DRC dedica un pcm para cada canal
drc_coeffs = []
for channel in "L", "R":
filtro_pcm = channel + " " + fName + ".pcm"
for coeff in brutefir.coeffs:
# e.g.: [6, 'c_drc2_L', 'L RRreq FR_60Hz1st.pcm']
if filtro_pcm == coeff[2]:
drc_coeffs.append(coeff[1])
# aquí vamos a hablar con el FIRtro en lugar de directamente con Brutefir
# al objeto de respetar la gestión de DRCs integrada en el FIRtro.
drc_nums = []
for drc_coeff in drc_coeffs:
# e.g.: 'c_drc2_L' --> '2'
drc_nums.append(drc_coeff[5])
try:
if drc_nums[0] == drc_nums[1]:
# devolvemos el drc que hay que configurar para que se ocupe server_process.py
return drc_nums[0]
avisos += ["(presets) se configura drc num." + drc_nums[0] + " con filtro: " + fName]
except:
return "0"
avisos += ["(presets) algo no ha ido bien localizando el drc"]
def configura_via_coeff(via, fName, filter_type):
""" funcion auxiliar para cargar en Brutefir (orden CLI: cfc) el filtro de xover del preset seleccionado
"""
#global avisos
if "dirac" in fName:
filtro_pcm = "dirac pulse"
else:
filtro_pcm = filter_type + "-" + fName + ".pcm"
for coeff in brutefir.coeffs[2:]: # eludimos recorrer los primeros coeff del EQ
if filtro_pcm == coeff[2]: # coeff[2] es el nombre del pcm
bCoeff = coeff[1] # coeff[1] es el nombre del coeff en Brutefir
for bFilter in vias_como_esta(via):
# enviamos el comando a Brutefir:
tmp = 'cfc "' + bFilter + '" "' + bCoeff + '"; quit;'
#print ">>>>>>> comando a Brutefir:", tmp # DEBUG
brutefir_cli.bfcli(tmp)
def configura_via_atten(via, atten):
""" funcion auxiliar para configurar la atten de una vía en Brutefir (orden CLI: cfoa)
"""
#global avisos
for bFilter in vias_como_esta(via):
atten = str(float(atten))
# enviamos el comando a Brutefir:
tmp = 'cfoa "' + bFilter + '" "' + bFilter[2:] + '" ' + atten + '; quit;'
#print ">>>>>>> comando a Brutefir:", tmp # DEBUG
brutefir_cli.bfcli(tmp)
def configura_via_delay(via, delay):
""" funcion auxiliar para configurar el retardo de una vía en Brutefir (orden CLI: cod)
"""
#global avisos
for bFilter in vias_como_esta(via):
bOutput = bFilter[2:]
delaySamples = str(int(float(delay)/1000*44100))
# enviamos el comando a Brutefir:
tmp = 'cod "' + bOutput + '" ' + delaySamples + '; quit;'
#print ">>>>>>> comando a Brutefir:", tmp # DEBUG
brutefir_cli.bfcli(tmp)
# Subfuncion auxiliar para recorrer las etapas de filtrado
# del tipo indicado (fr, hi, mi, lo o sw):
def vias_como_esta(tipoVia):
""" funcion auxiliar que proporciona las vias de subwoofer definidas en brutefir
"""
viasComoEsta = []
for filtroRunning in brutefir.filters_running:
if tipoVia in filtroRunning[0]:
viasComoEsta.append(filtroRunning[0])
return viasComoEsta
def conecta_tarjeta(viasActivas):
jack.attach("tmp")
# disponemos de la funcion brutefir.outputs que contiene el mapeo de vias
for output in brutefir.outputs:
brutefirPort = "brutefir:" + output.split("/")[1].replace('"', '')
jackPort = output.split("/")[0].replace('"', '')
# ahora debemos evaluar si es una salida de una via activa
salidaActiva = False
for viaActiva in viasActivas:
if viaActiva in output:
salidaActiva = True
if salidaActiva:
jack.connect(brutefirPort, jackPort)
else:
jack.disconnect(brutefirPort, jackPort)
jack.detach()
# OjO no simplificar, usado en server_process para pasarla a la web via json
def lista_de_presets():
""" devuelve la lista de presets
"""
return presets.sections()
# Para uso en la línea de comandos
def printa_presets(x=''):
""" muestra los presets definidos en el archivo presets.ini del altavoz
"""
if x in ("all", "list"):
print "\n--- Presets disponibles:"
for preset in presets.sections():
print preset
else:
for preset in lista_de_presets():
if x in preset:
print "\n[" + preset + "]"
print "via:".ljust(12), " ".ljust(4), "atten delay filtro"
for option in presets.options(preset):
print option.ljust(12), "=".ljust(4), presets.get(preset, option)
print
if __name__ == '__main__':
if sys_argv[1:]:
printa_presets(sys_argv[1].lower())
else:
print __doc__
sys_exit(0)
|
rripio/FIRtro
|
bin/presets.py
|
Python
|
gpl-3.0
| 9,550
|
[
"DIRAC"
] |
d7791637cea38a66c74b054cdafbbc80f5fc110144fe8dd187abb1d7bbf25f49
|
#!/usr/bin/python
from scipy.stats import cauchy
import random
import math
import csv
import numpy as np
import netCDF4 as nc
import argparse
import lvDiagram
'''
parser = argparse.ArgumentParser()
parser.add_argument("numberRegions", type=int,
help="Number of HII Regions to Populate in Model")
args = parser.parse_args()
numRegions = args.numberRegions # Prompt User for number of Hii regions
'''
def defaults():
ff=nc.Dataset('/Users/Marvin/Research/data/larson_radius_hypercube.ncdf') # Import data cube from Tremblin et. al. 2014
region = 1 # Start count of regions from 1 to NumRegions
HiiList = [] # Initialize list to store Hii data
# The following definitions determine which structures will
# be present in the galaxy and what their relative proportion is.
# See Hughes et al. ApJ April 2013 for relative proportion in M51
diffuse = True
bar = True
ThreekpcArm = True
ring = True
spiral = True
diffusePercent = 25
barPercent = 10
ThreekpcArmPercent = 10
ringPercent = 10
spiralPercent = 100 - (diffusePercent + barPercent +
ThreekpcArmPercent + ringPercent)
numRegions = 10000
# Determine Structure of Galaxy
extentOfBar = 4.4 # Length of bar in kiloparsecs.
# See Benjamin et al. ApJ Sept 2005.
cutoff = 3.41#4.1 # Looking to (cutoff)x the bar length.
# Max value ~6.86 due to model limitation (Tremblin, below)
galRange = extentOfBar*cutoff
sunPos = 8.4 # Distance of Sun from GC
sunHeight = 0.02 # Distance Sun is above galactic plane (kpc) CITE SOURCE!
circRot = 220 # Solar circular rotation speed. Carroll & Ostlie (24.36)
v0 = 0 # Initial velocity of source. Only relevant to 3kpc arm.
galRot = 44.0*math.pi/180.0 # Rotates entire galaxy by (x) degrees.
# See Benjamin et al. ApJ Sept 2005.
random.seed( 1 ) # Seed random number generator. (ARBITRARY)
numSpirals = 4 # Determines Number of Spiral arms
pitchAngle = 11*math.pi/180 # Determines curvature of arms
# 7.3 --> See Wu et al. A&A April 2014 for pitch angle estimate in Sagitarrius arm
warpParam = math.pi/2 # Determines degree of galactic warp
# DEFINE/CONVERT TO AS ANGLE?
warpHeight = 0.08 # BY INSPECTION
maxSpiralRevolutions = 1 # Range for number of spiral revs. (ARBITRARY)
maxCluster = 10 # Maximum number of regions in a given cluster (ARBITRARY)
avgCluster = 4 # Most commonly found number of regions in cluster (ARBITRARY)
clusterRange = 20/1000 # Sets clustered regions to be within (x) pc of each other
# See Motte et al. ApJ 2002
sigma = 0.3 # Sets FWHM of spiral arms to (x) kpc
# 0.2 See Wu et al. A&A April 2014 for deviation
# from spiral estimate in Sagitarrius arm.
zmax = .15 # Sets max height in z as +/- (x) kpc
gamma =0# 0.01365 # Sets spread of Cauchy-Lorentz Z-distribution of regions
alpha = 0 # Sets HII region drop off as r^-alpha(after bar)
# Determine Mass of Individual Regions
# In Units of Stellar Mass. Sets lower bound for ionizing star
lowerMass = 16
upperMass = 90
barAngle=0 # THIS IS NOT USED IN THE CODE. IT DEFINES THE SUN'S STARTING POSITION
# Output parameters
(galRad,xRot,yRot,z,mass,lum,age,radius)=(0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0)
(diffLum,barLum,ThreekpcLum,ringLum,sprLum,totLum)=(0.0,0.0,0.0,0.0,0.0,0.0)
(diffCount,barCount,ThreekpcCount,ringCount,sprCount,totCount)=(0,0,0,0,0,0)
def Simulate(numRegions=numRegions, alpha=alpha, pitchAngle=pitchAngle, numSpirals=numSpirals, extentOfBar=extentOfBar, \
barAngle=barAngle, scaleHeight=gamma, warpHeight=warpHeight):
'''
To display the default values for all parameters, call >> help(defaults)
'''
while region <= numRegions :
v0 = 0
i = 1
# Reset i each time to force a region to be populated
# if all requirements are met.
selectionParam = random.random()
# Determines if Hii region is kept or thrown away.
# Forces population of regions to follow linear trend
# to end of bar and power law drop-off after bar.
numCluster = 1
numClusterTot = random.randrange(1,maxCluster,1)
whereIsRegion = random.randrange(1, diffusePercent + barPercent
+ ringPercent + ThreekpcArmPercent
+ spiralPercent, 1)
# Determines location of one individual region.
# HII Region will be randomly populated in Galaxy, but will not be
# be placed in central region (within bar radius).
if (whereIsRegion <= diffusePercent) and (diffuse == True) :
while i != 0 : # This loop forces an Hii region to be populated diffusely
x = random.gauss(0,galRange/2) # Sets diffuse population to have
# FWHM of galRange/2
y = random.gauss(0,galRange/2)
theta = math.atan(x/y)
galWarp = warpHeight*math.tan(x/galRange*warpParam)*math.cos(y/galRange*warpParam)
# Models galactic warp with Tan(x)*Cos(y)
zPos = random.uniform(-zmax,zmax)
z = galWarp + zPos
# Produces Cauchy-Lorentz z distribution
galRad = pow(pow(x,2)+pow(y,2),.5) # Region's distance from center
i += 1
if (abs(x) > extentOfBar + random.gauss(0,sigma)) \
and (galRad < galRange + random.gauss(0,sigma)) \
and (selectionParam < pow(extentOfBar,alpha)/pow(galRad,alpha)):
region += numClusterTot # Increase region count
i = 0 # Escape loop
elif (abs(x) < extentOfBar + random.gauss(0,sigma)) \
and (extentOfBar < galRad < galRange + random.gauss(0,sigma)) \
and (selectionParam < pow(extentOfBar,alpha)/pow(galRad,alpha)):
region += numClusterTot # Increase region count
i = 0 # Escape loop
# Populate in Bar
elif (whereIsRegion > diffusePercent) \
and (whereIsRegion <= (diffusePercent + barPercent)) \
and (bar == True) :
while i != 0 : # This loop forces an Hii region to be populated in bar
x = random.uniform(-extentOfBar,extentOfBar) # Returns random number between (-extentOfBar,extentOfBar)
y = random.gauss(0,sigma) # Sets thickness of bar to (sigma) kpc
theta = math.atan(x/y)
galWarp = warpHeight*math.tan(x/galRange*warpParam)*math.cos(y/galRange*warpParam)
# Models galactic warp with Tan(x)*Cos(y)
zPos = random.uniform(-zmax,zmax)
z = galWarp + zPos
# Produces Cauchy-Lorentz z distribution
galRad = pow(pow(x,2)+pow(y,2),.5) # Region's distance from center
i += 1
if (selectionParam < galRad/extentOfBar) \
and (galRad < galRange) :
region += numClusterTot # Increase region count
i = 0 # Escape loop
# Populate in 3 Kiloparsec Arm
elif (whereIsRegion > (diffusePercent + barPercent)) \
and (whereIsRegion <= (diffusePercent + barPercent + ThreekpcArmPercent)) \
and (ThreekpcArm == True) :
yRingInt = extentOfBar/2
ySign = random.randrange(-1,1)
while i != 0 : # This loop forces an Hii region to be populated in ring
xCart = random.uniform(-extentOfBar,extentOfBar)
yCart = math.copysign(yRingInt*pow(1-pow(xCart,2)/pow(extentOfBar,2),.5),ySign) # Produces ring structure
x = xCart + random.gauss(0, sigma) # Gaussian distribution around ring
y = yCart + random.gauss(0, sigma)
theta = math.atan(x/y)
zPos = random.uniform(-zmax,zmax)
galWarp = warpHeight*math.tan(x/galRange*warpParam)*math.cos(y/galRange*warpParam) # Models galactic warp with Tan(x)*Cos(y)
z = galWarp + zPos # EDIT TO Produces Cauchy-Lorentz z distribution
galRad = pow(pow(x,2)+pow(y,2),.5) # Region's distance from center
i += 1
if (selectionParam < galRad/extentOfBar) \
and (galRad < galRange) :
v0 = 56 # Expansion of 3kpc arm
region += numClusterTot # Increase region count
i = 0 # Escape loop
# Populate in Ring
elif (whereIsRegion > (diffusePercent + barPercent + ThreekpcArmPercent)) \
and (whereIsRegion <= (diffusePercent + barPercent + ThreekpcArmPercent + ringPercent)) \
and (ring == True) :
yRingInt = extentOfBar
ySign = random.randrange(-1,1)
while i != 0 : # This loop forces an Hii region to be populated in ring
xCart = random.uniform(-extentOfBar,extentOfBar)
yCart = math.copysign(yRingInt*pow(1-pow(xCart,2)/pow(extentOfBar,2),.5),ySign) # Produces ring structure
x = xCart + random.gauss(0, sigma) # Gaussian distribution around ring
y = yCart + random.gauss(0, sigma)
theta = math.atan(x/y)
zPos = random.uniform(-zmax,zmax)
galWarp = warpHeight*math.tan(x/galRange*warpParam)*math.cos(y/galRange*warpParam) # Models galactic warp with Tan(x)*Cos(y)
z = galWarp + zPos
galRad = pow(pow(x,2)+pow(y,2),.5) # Region's distance from center
i += 1
if (selectionParam < galRad/extentOfBar) \
and (galRad < galRange) :
region += numClusterTot # Increase region count
i = 0 # Escape loop
# Populate in One of Spirals
elif (whereIsRegion > (diffusePercent + barPercent + ThreekpcArmPercent + ringPercent)) \
and (whereIsRegion <= (diffusePercent + barPercent + ThreekpcArmPercent + ringPercent + spiralPercent)) \
and (spiral == True):
while i != 0 : # This loop forces an Hii region to be populated in arms
whichArm = random.randint(0,numSpirals-1)
theta = random.uniform(0,2*np.pi*maxSpiralRevolutions)
r = extentOfBar*math.exp(pitchAngle*(1 - .3*math.floor(whichArm/2))*theta) # (.2 ARBITRARY)
xCart = r*math.cos(theta + np.pi*math.fmod(whichArm,2))
yCart = r*math.sin(theta + np.pi*math.fmod(whichArm,2))
x = xCart + random.gauss(0,sigma) # Gaussian distribution around spiral
y = yCart + random.gauss(0,sigma)
galWarp = warpHeight*math.tan(x/galRange*warpParam)*math.cos(y/galRange*warpParam) # Models galactic warp with Tan(x)*Cos(y)
zPos = random.uniform(-zmax,zmax)
z = galWarp + zPos
galRad = pow(pow(x,2)+pow(y,2),.5) # Region's distance from center in kpc
i += 1
if (galRad < galRange) \
and (selectionParam < pow(extentOfBar,alpha)/pow(galRad,alpha)) :
region += numClusterTot # Increase region count
i = 0 # Escape Loop
# Determine individual region parameters and write to list
while (i == 0) and (numCluster <= numClusterTot) :
# Set Time Distribution
timeParam = random.randint(0,99)
age = timeParam*.127 # Age in Myr (12.7 Myr limit) in Trebmlin model
# Set Host Star Mass Distribution
massParam = random.random() # Used in forcing powerlaw fit
while massParam != 0 :
mass = random.randrange(lowerMass,upperMass)
IMF = pow(lowerMass,2.35)*pow(mass,-2.35)
lifetime = pow(lowerMass,.935)*pow(mass,-.935)
numHiiRegions = IMF*lifetime
if numHiiRegions > massParam : # Makes power law fit
massParam = 0 # Escape loop
# Set Host Star Flux Distribution
fluxMin = math.log10(pow(lowerMass,1.94))
fluxMax = math.log10(pow(upperMass,1.94))
fluxParam = int(round((math.log10(pow(mass,1.94))-fluxMin)/(fluxMax-fluxMin)*16,0)) # Use this line to access all values of Lum from 10^47 - 10^51
# fluxParam = int(round((math.log10(pow(mass,1.94))-fluxMin)/(fluxMax-fluxMin)*12,0)+4)
lum = fluxParam
# Set Electron Temperature Distribution
# Relationship taken from Balser et.al. 2011, put in range accepted by Tremblin model
TeParam = int(round((5756 + 303*random.uniform(-1,1)) + galRad*(299 + 31*random.uniform(-1,1)),-3)/1000 - 5)
# Set Neutral Hydrogen Density Distribution
densityParam = random.randint(0,10)
# From Distributions, Determine HII Region Radius
# Using Pascal Tremblin's hypercube data
radius = ff.variables['radius'][timeParam,fluxParam,TeParam,densityParam]
# Rotate galaxy
xRot = x*math.cos(galRot) - y*math.sin(galRot)
yRot = x*math.sin(galRot) + y*math.cos(galRot)
# Set velocity of source
omega = circRot/galRad # Assume flat rotation curve.
omega0 = circRot/sunPos
dist = pow(pow(xRot,2)+pow(yRot-sunPos,2),0.5)
l = math.copysign(math.acos((pow(dist,2)+pow(sunPos,2)-pow(galRad,2))/(2*sunPos*dist))*180/math.pi,xRot)
b = math.atan((z-sunHeight)/dist)
vR = (omega - omega0)*sunPos*math.sin(l*math.pi/180)+v0*math.cos(theta)
# This section allows the user to test various parameters for easy
# output to terminal (e.g. luminosity of various features, counts
# of regions in spiral versus bar, etc.)
if (whereIsRegion <= diffusePercent) \
and (diffuse == True) :
diffLum = diffLum + lum
diffCount += 1
regNum = 1
elif (whereIsRegion > diffusePercent) \
and (whereIsRegion <= (diffusePercent + barPercent)) \
and (bar == True) :
barLum = barLum + lum
barCount += 1
regNum = 2
elif (whereIsRegion > (diffusePercent + barPercent)) \
and (whereIsRegion <= (diffusePercent + barPercent + ThreekpcArmPercent)) \
and (ThreekpcArm == True) :
ThreekpcLum = ThreekpcLum + lum
ThreekpcCount += 1
regNum = 3
elif (whereIsRegion > (diffusePercent + barPercent + ThreekpcArmPercent)) \
and (whereIsRegion <= (diffusePercent + barPercent + ThreekpcArmPercent + ringPercent)) \
and (ring == True) :
ringLum = ringLum + lum
ringCount += 1
regNum = 4
elif (whereIsRegion > (diffusePercent + barPercent + ThreekpcArmPercent + ringPercent)) \
and (whereIsRegion <= (diffusePercent + barPercent + ThreekpcArmPercent + ringPercent + spiralPercent)) \
and (spiral == True):
sprLum = sprLum + lum
sprCount += 1
if whichArm == 0 :
regNum = 5
elif whichArm == 1 :
regNum = 6
elif whichArm == 2 :
regNum = 7
elif whichArm == 3 :
regNum = 8
totLum = totLum + lum
# Append information to CSV file
HiiList.append([galRad,xRot,yRot,z,mass,lum,age,radius,l,vR,regNum,b])
numCluster += 1
with open("3DHiiRegions.csv", "wb") as f:
writer = csv.writer(f)
writer.writerows(HiiList)
print "Diffuse Luminosity : " + str(diffLum*100/totLum) + "% (" + str(diffCount) + " Regions)"
print "Bar Luminosity : " + str(barLum*100/totLum) + "% (" + str(barCount) + " Regions)"
print "3 kpc Arm Luminosity : " + str(ThreekpcLum*100/totLum) + "% ("+ str(ThreekpcCount) + " Regions)"
print "Ring Luminosity : " + str(ringLum*100/totLum) + "% ("+ str(ringCount) + " Regions)"
print "Spiral Luminosity : " + str(sprLum*100/totLum) + "% (" + str(sprCount) + " Regions)"
print "Total Luminosity : " + str((barLum+ThreekpcLum+ringLum+sprLum+diffLum)*100/totLum) + "% (" + str(barCount+ThreekpcCount+ringCount+sprCount+diffCount) + " Regions)"
def main(numRegions=numRegions, alpha=alpha, pitchAngle=pitchAngle, numSpirals=numSpirals, extentOfBar=extentOfBar, \
barAngle=barAngle, scaleHeight=gamma, warpHeight=warpHeight):
defaults()
# This will not work as expected. It will use all values from the defaults() function
Simulate(numRegions=numRegions, alpha=alpha, pitchAngle=pitchAngle, numSpirals=numSpirals, extentOfBar=extentOfBar, \
barAngle=barAngle, scaleHeight=gamma, warpHeight=warpHeight)
# UPDATE PLOTS
lvDiagram.lvDiagram()
main()
|
WillArmentrout/galSims
|
simulate/Simulate_Function.py
|
Python
|
gpl-2.0
| 17,243
|
[
"Galaxy",
"Gaussian"
] |
64e33f04e5c41130d2175f553ef1662a9a7b90e16148e44a0b43405268797689
|
"""semi-views for the `group_messaging` application
These are not really views - rather context generator
functions, to be used separately, when needed.
For example, some other application can call these
in order to render messages within the page.
Notice that :mod:`urls` module decorates all these functions
and turns them into complete views
"""
import copy
import datetime
from coffin.template.loader import get_template
from django.contrib.auth.models import User
from django.forms import IntegerField
from django.http import HttpResponse
from django.http import HttpResponseNotAllowed
from django.http import HttpResponseForbidden
from django.utils import simplejson
from group_messaging.models import Message
from group_messaging.models import SenderList
from group_messaging.models import LastVisitTime
from group_messaging.models import get_personal_group_by_user_id
from group_messaging.models import get_personal_groups_for_users
class InboxView(object):
"""custom class-based view
to be used for pjax use and for generation
of content in the traditional way, where
the only the :method:`get_context` would be used.
"""
template_name = None #used only for the "GET" method
http_method_names = ('GET', 'POST')
def render_to_response(self, context, template_name=None):
"""like a django's shortcut, except will use
template_name from self, if `template_name` is not given.
Also, response is packaged as json with an html fragment
for the pjax consumption
"""
if template_name is None:
template_name = self.template_name
template = get_template(template_name)
html = template.render(context)
json = simplejson.dumps({'html': html, 'success': True})
return HttpResponse(json, mimetype='application/json')
def get(self, request, *args, **kwargs):
"""view function for the "GET" method"""
context = self.get_context(request, *args, **kwargs)
return self.render_to_response(context)
def post(self, request, *args, **kwargs):
"""view function for the "POST" method"""
pass
def dispatch(self, request, *args, **kwargs):
"""checks that the current request method is allowed
and calls the corresponding view function"""
if request.method not in self.http_method_names:
return HttpResponseNotAllowed()
view_func = getattr(self, request.method.lower())
return view_func(request, *args, **kwargs)
def get_context(self, request, *args, **kwargs):
"""Returns the context dictionary for the "get"
method only"""
return {}
def as_view(self):
"""returns the view function - for the urls.py"""
def view_function(request, *args, **kwargs):
"""the actual view function"""
if request.user.is_authenticated() and request.is_ajax():
view_method = getattr(self, request.method.lower())
return view_method(request, *args, **kwargs)
else:
return HttpResponseForbidden()
return view_function
class NewThread(InboxView):
"""view for creation of new thread"""
http_method_list = ('POST',)
def post(self, request):
"""creates a new thread on behalf of the user
response is blank, because on the client side we just
need to go back to the thread listing view whose
content should be cached in the client'
"""
usernames = request.POST['to_usernames']
usernames = map(lambda v: v.strip(), usernames.split(','))
users = User.objects.filter(username__in=usernames)
missing = copy.copy(usernames)
for user in users:
if user.username in missing:
missing.remove(user.username)
result = dict()
if missing:
result['success'] = False
result['missing_users'] = missing
else:
recipients = get_personal_groups_for_users(users)
message = Message.objects.create_thread(
sender=request.user,
recipients=recipients,
text=request.POST['text']
)
result['success'] = True
result['message_id'] = message.id
return HttpResponse(simplejson.dumps(result), mimetype='application/json')
class PostReply(InboxView):
"""view to create a new response"""
http_method_list = ('POST',)
def post(self, request):
parent_id = IntegerField().clean(request.POST['parent_id'])
parent = Message.objects.get(id=parent_id)
message = Message.objects.create_response(
sender=request.user,
text=request.POST['text'],
parent=parent
)
last_visit = LastVisitTime.objects.get(
message=message.root,
user=request.user
)
last_visit.at = datetime.datetime.now()
last_visit.save()
return self.render_to_response(
{'post': message, 'user': request.user},
template_name='group_messaging/stored_message.html'
)
class ThreadsList(InboxView):
"""shows list of threads for a given user"""
template_name = 'group_messaging/threads_list.html'
http_method_list = ('GET',)
def get_context(self, request):
"""returns thread list data"""
#get threads and the last visit time
threads = Message.objects.get_threads_for_user(request.user)
sender_id = IntegerField().clean(request.GET.get('sender_id', '-1'))
if sender_id != -1:
threads = threads.filter(sender__id=sender_id)
#for each thread we need to know if there is something
#unread for the user - to mark "new" threads as bold
threads_data = dict()
for thread in threads:
thread_data = dict()
#determine status
thread_data['status'] = 'new'
#determine the senders info
senders_names = thread.senders_info.split(',')
if request.user.username in senders_names:
senders_names.remove(request.user.username)
thread_data['senders_info'] = ', '.join(senders_names)
thread_data['thread'] = thread
threads_data[thread.id] = thread_data
last_visit_times = LastVisitTime.objects.filter(
user=request.user,
message__in=threads
)
for last_visit in last_visit_times:
thread_data = threads_data[last_visit.message_id]
if thread_data['thread'].last_active_at <= last_visit.at:
thread_data['status'] = 'seen'
#after we have all the data - update the last visit time
last_visit_times.update(at=datetime.datetime.now())
return {'threads': threads, 'threads_data': threads_data}
class SendersList(InboxView):
"""shows list of senders for a user"""
template_name = 'group_messaging/senders_list.html'
http_method_names = ('GET',)
def get_context(self, request):
"""get data about senders for the user"""
senders = SenderList.objects.get_senders_for_user(request.user)
senders = senders.values('id', 'username')
return {'senders': senders}
class ThreadDetails(InboxView):
"""shows entire thread in the unfolded form"""
template_name = 'group_messaging/thread_details.html'
http_method_names = ('GET',)
def get_context(self, request, thread_id=None):
"""shows individual thread"""
#todo: assert that current thread is the root
root = Message.objects.get(id=thread_id)
responses = Message.objects.filter(root__id=thread_id)
last_visit, created = LastVisitTime.objects.get_or_create(
message=root,
user=request.user
)
if created is False:
last_visit.at = datetime.datetime.now()
last_visit.save()
return {'root_message': root, 'responses': responses, 'request': request}
|
maxwward/SCOPEBak
|
group_messaging/views.py
|
Python
|
gpl-3.0
| 8,583
|
[
"VisIt"
] |
ba8e6ec063370cd7294658993b908d99edbb5663642155d361bca4c80b3a56bb
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Vtkh(Package):
"""VTK-h is a toolkit of scientific visualization algorithms for emerging
processor architectures. VTK-h brings together several projects like VTK-m
and DIY2 to provide a toolkit with hybrid parallel capabilities."""
homepage = "https://github.com/Alpine-DAV/vtk-h"
url = "https://github.com/Alpine-DAV/vtk-h"
version('master',
git='https://github.com/Alpine-DAV/vtk-h.git',
branch='master',
submodules=True)
maintainers = ['cyrush']
variant("mpi", default=True, description="build mpi support")
variant("tbb", default=True, description="build tbb support")
variant("cuda", default=False, description="build cuda support")
depends_on("cmake")
depends_on("mpi", when="+mpi")
depends_on("tbb", when="+tbb")
depends_on("cuda", when="+cuda")
depends_on("vtkm@master")
depends_on("vtkm@master+tbb", when="+tbb")
depends_on("vtkm@master+cuda", when="+cuda")
def install(self, spec, prefix):
with working_dir('spack-build', create=True):
cmake_args = ["../src",
"-DVTKM_DIR={0}".format(spec["vtkm"].prefix),
"-DENABLE_TESTS=OFF",
"-DBUILD_TESTING=OFF"]
# mpi support
if "+mpi" in spec:
mpicc = spec['mpi'].mpicc
mpicxx = spec['mpi'].mpicxx
cmake_args.extend(["-DMPI_C_COMPILER={0}".format(mpicc),
"-DMPI_CXX_COMPILER={0}".format(mpicxx)])
# tbb support
if "+tbb" in spec:
cmake_args.append("-DTBB_DIR={0}".format(spec["tbb"].prefix))
# cuda support
if "+cuda" in spec:
cmake_args.append("-DENABLE_CUDA=ON")
# this fix is necessary if compiling platform has cuda, but
# no devices (this common for front end nodes on hpc clusters)
# we choose kepler as a lowest common denominator
cmake_args.append("-DVTKm_CUDA_Architecture=kepler")
# use release, instead of release with debug symbols b/c vtkh libs
# can overwhelm compilers with too many symbols
for arg in std_cmake_args:
if arg.count("CMAKE_BUILD_TYPE") == 0:
cmake_args.extend(std_cmake_args)
cmake_args.append("-DCMAKE_BUILD_TYPE=Release")
cmake(*cmake_args)
if "+cuda" in spec:
# avoid issues with make -j and FindCuda deps
# likely a ordering issue that needs to be resolved
# in vtk-h
make(parallel=False)
else:
make()
make("install")
|
skosukhin/spack
|
var/spack/repos/builtin/packages/vtkh/package.py
|
Python
|
lgpl-2.1
| 4,051
|
[
"VTK"
] |
a94952e9456274901154448af3b98074cde170a69c0197f5335b1c7ca0649fb8
|
#!/usr/bin/env python
##~---------------------------------------------------------------------------##
## _______ _______ _______ _ _ ##
## | _ || || || | _ | | ##
## | |_| || || _ || || || | ##
## | || || | | || | ##
## | || _|| |_| || | ##
## | _ || |_ | || _ | ##
## |__| |__||_______||_______||__| |__| ##
## www.amazingcow.com ##
## File : cowtodo.py ##
## Project : COWTODO ##
## Date : Aug 06, 2015 ##
## License : GPLv3 ##
## Author : n2omatt <n2omatt@amaizingcow.com ##
## Copyright : AmazingCow - 2015 - 2018 ##
## ##
## Description : ##
## ##
##---------------------------------------------------------------------------~##
#COWTODO: #01 - Check if the output is a terminal, if not does not put color.
#COWTODO: #02 - Add an option to specify more tags if needed.
#COWTODO: #03 - Better error checking.
#COWTODO: #04 - Refactor the issues output methods.
#COWTODO: #05 - Output to a sql statements, xml, json etc.
#COWTODO: #06 - Make a setup file.
#COWTODO: #09 - Normalize the output messages (Color and style)
#COWTODO: #10 - Change the termcolor to cowtermcolor.
## Imports ##
import getopt;
import os
import os.path;
import pdb;
import re;
import sys;
import time;
################################################################################
## Don't let the standard import error to users - Instead show a ##
## 'nice' error screen describing the error and how to fix it. ##
################################################################################
def __import_error_message_print(pkg_name, pkg_url):
print """Sorry,
cowtodo depends on {0} package
Visit ({1}) to get it.
Or checkout the README.md to learn other ways to install {0}.""".format(
pkg_name,
pkg_url
);
exit(1);
## cowtermcolor ##
try:
from cowtermcolor import *;
except ImportError, e:
__import_error_message_print(
"cowtermcolor",
"https://github.com/AmazingCow-Libs/cowtermcolor_py"
);
################################################################################
## Color ##
################################################################################
ColorPath = Color(MAGENTA);
ColorError = Color(RED);
ColorInfo = Color(BLUE);
ColorProcess = Color(YELLOW);
ColorFile = Color(CYAN);
ColorIssue = Color(GREEN);
ColorNumber = Color(BLUE);
ColorTag = Color(RED);
################################################################################
## Constants ##
################################################################################
class Constants:
#App
APP_NAME = "cowtodo";
APP_VERSION = "0.4.0";
APP_AUTHOR = "N2OMatt <n2omatt@amazingcow.com>"
APP_COPYRIGHT = "\n".join(("Copyright (c) 2015, 2016 - Amazing Cow",
"This is a free software (GPLv3) - Share/Hack it",
"Check opensource.amazingcow.com for more :)"));
#FLags
FLAG_HELP = "h", "help";
FLAG_VERSION = "v", "version";
FLAG_SHORT = "s", "short";
FLAG_LONG = "l", "long";
FLAG_VERBOSE = "V", "verbose";
FLAG_NO_COLORS = "", "no-colors";
FLAG_EXCLUDE = "e", "exclude";
FLAG_EXCLUDE_EXT = "E", "exclude-ext";
FLAG_ADD_EXCLUDE_DIR = "", "add-exclude-dir";
FLAG_REMOVE_EXCLUDE_DIR = "", "remove-exclude-dir";
FLAG_LIST_EXCLUDE_DIR = "", "list-exclude-dir";
FLAGS_SHORT = "hvslVe:E:";
FLAGS_LONG = ["help",
"version",
"short",
"long",
"verbose",
"no-colors",
"exclude-ext=",
"exclude=",
"add-exclude-dir=",
"remove-exclude-dir=",
"list-exclude-dir"];
#Exclude Dir RC Paths.
RC_DIR_PATH = os.path.expanduser("~/.cowtodorc");
RC_FILE_PATH = os.path.join(RC_DIR_PATH,"cowtodorc.txt");
################################################################################
## Globals ##
################################################################################
class Globals:
extensions = [ ".h" , ".c", #C
".cpp" , ".cc", #C++
".cs", #C#
".html", ".htm", #HTML
".js" , ".jsx", #Javascript
".md", #Markdown
".m" , ".mm", #ObjC
".php" , #PHP
".py" , #Python
".sh" #Shell Script.
];
tag_names = [ "COWTODO",
"COWFIX",
"COWHACK",
"COWNOTE", ];
this_file_name = "cowtodo.py";
tag_entries = {
"COWTODO" : [],
"COWHACK" : [],
"COWFIX" : [],
"COWNOTE" : [],
};
verbose = False;
exclude_dirs = [];
paths_to_add_in_exclude_rc = [];
paths_to_remove_in_exclude_rc = [];
################################################################################
## Helper ##
################################################################################
class Helper:
@staticmethod
def print_help(exit_code = -1):
msg = "Usage:" +"""
cowtodo [-hv] [-sl] [-e <path>] <search_path>
cowtodo [--list-exclude-dir]
cowtodo [--add-exclude-dir|remove-exclude-dir] <path>
Options:
*-h --help : Show this screen.
*-v --version : Show app version and copyright.
-s --short : Output the short listing.
-l --long : Output the long listing. (Default)
-V --verbose : Verbose mode, helps to see what it's doing.
--no-colors : Make the output uncolored.
-e --exclude <path> : Exclude the path from scan.
*--list-exclude-dir : List all exclude path in ({rcpath}).
--add-exclude-dir <path> : Add exclude path to ({rcpath}).
--remove-exclude-dir <path> : Remove exclude path from ({rcpath}).
Notes:
If <search_path> is blank the current dir is assumed.
Multiple --exclude <path> can be used.
Multiple --add-exclude-dir <path> can be used.
Multiple --remove-exclude-dir <path> can be used.
Options marked with * are exclusive, i.e. the cowtodo will run that
and exit successfully after the operation.
""";
msg = msg.format(rcpath=Constants.RC_FILE_PATH);
print msg;
if(exit_code != -1):
Helper.exit(exit_code);
@staticmethod
def print_version(exit_code = -1):
print "{} - {} - {}".format(Constants.APP_NAME,
Constants.APP_VERSION,
Constants.APP_AUTHOR);
print Constants.APP_COPYRIGHT;
print;
if(exit_code != -1):
Helper.exit(exit_code);
@staticmethod
def print_output(*args):
print "".join(map(str,args)),
@staticmethod
def print_verbose(*args):
if(Globals.verbose):
print " ".join(map(str,args));
@staticmethod
def print_error(*args):
print ColorError("[ERROR]"),
print " ".join(map(str, args));
@staticmethod
def print_fatal(*args):
print ColorError("[FATAL]"),
print " ".join(map(str, args));
Helper.exit(1);
@staticmethod
def expand_normalize_path(path):
if(len(path) == 0):
Helper.print_fatal("Invalid empty path.");
path = os.path.expanduser(path);
path = os.path.abspath(path);
path = os.path.normpath(path);
return path;
@staticmethod
def clean_str(s, tag):
s = s.replace("\n", "");
#Check if the comment end with a backslash.
has_end_backslash = False;
s2 = s.strip("\\");
if(s2 != s):
has_end_backslash = True;
s = s2;
#Remove comments.
s = s.strip(" # " ); # PYTHON comments
s = s.strip(" /*/ "); # C comments
s = s.strip(" / "); # C++ comments
s = s.lstrip(" <!-- ").rstrip(" -->");
#Remove the spacing.
s = s.strip(" ");
#Remove the current tag.
s = s.replace(tag, "");
s = s.strip(" : ");
#If ends with backslash add a new line.
if(has_end_backslash):
s += "\n";
#Clean up lines that are empty...
if(s == "\n"):
s = "";
return s;
@staticmethod
def exit(code):
exit(code);
################################################################################
## Exclude Dirs RC ##
################################################################################
class ExcludeDirRC:
@staticmethod
def get_excluded_dirs():
ExcludeDirRC.check_dir_and_file();
lines = open(Constants.RC_FILE_PATH).readlines();
return map(lambda x: x.replace("\n", ""), lines);
@staticmethod
def check_dir_and_file():
#Check if the rc folder exists.
if(not os.path.isdir(Constants.RC_DIR_PATH)):
msg = "Missing folder at: ({})\n Creating one now.";
msg = msg.format(ColorPath(Constants.RC_DIR_PATH));
Helper.print_error(msg);
try:
os.mkdir(Constants.RC_DIR_PATH);
except Exception, e:
Helper.print_fatal(e);
#Check if the database exists.
if(not os.path.isfile(Constants.RC_FILE_PATH)):
msg = "Missing database file at ({})\n Your dirs are gone - Creating one now.";
msg = msg.format(ColorPath(Constants.RC_FILE_PATH));
Helper.print_error(msg);
cmd = "touch {}".format(Constants.RC_FILE_PATH);
ret = os.system(cmd);
if(ret != 0):
Helper.print_fatal("cmd: ({}) failed.".format(ColorInfo(cmd)));
@staticmethod
def verify_paths():
ExcludeDirRC.check_dir_and_file();
#Get all paths and check if them refer to a directory.
invalid_paths = [];
line = 1;
for path in ExcludeDirRC.get_excluded_dirs():
fullpath = Helper.expand_normalize_path(path);
if(os.path.isdir(fullpath) == False):
invalid_paths.append({"line" : line, "path" : path});
line += 1;
#If any of them is invalid, show a fatal error log.
if(len(invalid_paths) != 0):
rc_path = ColorPath(Constants.RC_FILE_PATH);
msg = "Invalid Paths in ({}):\n".format(rc_path);
for d in invalid_paths:
msg += " {} - line:({})\n".format(ColorError(d["path"]),
ColorInfo(d["line"]));
msg += "\nFix it... then cowtodo can run."
Helper.print_fatal(msg);
@staticmethod
def print_list():
ExcludeDirRC.check_dir_and_file();
print "Excluded dirs - (Will be ignored in all cowtodo calls):";
lines = ExcludeDirRC.get_excluded_dirs();
if(len(lines) == 0):
print "Empty...";
return;
for line in lines:
print " ", ColorPath(line.replace("\n", ""));
@staticmethod
def add_path(path):
ExcludeDirRC.check_dir_and_file();
msg = "Adding path in exclude dirs rc: ({})".format(ColorPath(path));
Helper.print_verbose(msg);
fullpath = Helper.expand_normalize_path(path);
#Check if path is valid.
if(not os.path.isdir(fullpath)):
Helper.print_fatal("Invalid path:({})".format(ColorPath(path)));
#Check if we already have this path included.
#If included log and return.
grep_cmd = "grep -q \"{}\" \"{}\"".format(fullpath, Constants.RC_FILE_PATH);
ret = os.system(grep_cmd);
if(ret == 0):
msg = "Path ({}) is already added...".format(ColorPath(path));
Helper.print_verbose(msg);
return;
#Path is not included add it.
echo_cmd = "echo \"{}\" >> \"{}\"".format(fullpath, Constants.RC_FILE_PATH);
os.system(echo_cmd);
@staticmethod
def remove_path(path):
ExcludeDirRC.check_dir_and_file();
msg = "Removing path in exclude dirs rc: ({})".format(ColorPath(path));
Helper.print_verbose(msg);
fullpath = Helper.expand_normalize_path(path);
#Check if we already have this path included.
#If not included log and fail.
grep_cmd = "grep -q \"{}\" \"{}\"".format(fullpath, Constants.RC_FILE_PATH);
ret = os.system(grep_cmd);
if(ret != 0):
msg = "Path ({}) is not added...".format(ColorPath(path));
Helper.print_fatal(msg);
#Path included, so grep the inverse to a temp file
#move the temp over the original one.
cmd = "grep -v \"{search}\" \"{original}\" > \"{temp}.temp\"";
grep_inv_cmd = cmd.format(search = fullpath,
original = Constants.RC_FILE_PATH,
temp = Constants.RC_FILE_PATH);
os.system(grep_inv_cmd);
#Now move the temp over the original
mv_cmd = "mv {temp}.temp {original}".format(temp = Constants.RC_FILE_PATH,
original = Constants.RC_FILE_PATH);
os.system(mv_cmd);
################################################################################
## Tag Entry ##
################################################################################
class TagEntry:
def __init__(self, filename):
self.filename = filename;
self.data = {};
def add(self, tag_type, line_no, line_str):
#Check if we already added something in this tag.
if(tag_type not in self.data):
self.data[tag_type] = [];
self.data[tag_type].append([line_no, line_str]);
################################################################################
## Scan/Parse Functions ##
################################################################################
def scan(start_path):
#Check if start path is valid.
start_path = Helper.expand_normalize_path(start_path);
if(not os.path.isdir(start_path)):
Helper.print_fatal("{} ({})".format(
"start path is not valid directory - ",
ColorPath(start_path)));
## Scan the directories.
for root, dirs, files in os.walk(start_path, topdown=True):
#Check if current root path is in our exclude list of if it is hidden.
is_in_exclude_list = Helper.expand_normalize_path(root) in Globals.exclude_dirs;
is_hidden = os.path.split(root)[1][0] == "." and len(root) != 1;
if(is_in_exclude_list or is_hidden):
#Log if verbose.
Helper.print_verbose("{} ({})".format(ColorInfo("Ignoring:"),
ColorPath(root)));
#Remove the os.walk dirs.
dirs[:] = [];
#Remove the path from the exclude paths since we're already hit it.
#So it will be perform better because we gonna search a lot of dirs
#without test a path that makes no sense anymore.
if(is_in_exclude_list):
Globals.exclude_dirs.remove(os.path.abspath(root));
continue; #Skip the rest of for block.
Helper.print_verbose("{} ({})".format(ColorProcess("Scanning:"),
ColorPath(root)));
#For each file check if it matches with our file extensions.
for file in files:
#We found this file. Ignore it :)
if(file == Globals.this_file_name):
continue;
#Get the filename and its extension.
filename, fileext = os.path.splitext(file);
#Check if the fileext is in our extensions list.
for ext in Globals.extensions:
if(fileext == ext):
Helper.print_verbose(" {} ({})".format(ColorProcess("Parsing"),
ColorFile(file)));
#Parse the file to get all tags.
parse(os.path.join(root, file));
def parse(filename):
tag_entry = TagEntry(filename);
#Open the file and get the lines.
lines = open(filename).readlines();
#For all lines.
for line_no in xrange(0, len(lines)):
line = lines[line_no];
#Iterate for all of our tags in this line.
for tag_name in Globals.tag_names:
search_str = ".*%s.*" %(tag_name); #Build a regex.
#Check if any tag was found.
if(re.search(search_str, line) is not None):
clean_line = ""; #Start with a blank line.
#Keep consuming the lines while them
#end with the \ (backslash) char.
#Or the end of file is reached.
while(True):
clean_line += Helper.clean_str(line, tag_name);
#End of file OR
#empty (Just a new line) OR
## line not ending with \ backslash
if(line_no >= len(lines) or len(line) < 2 or line[-2] != "\\"):
break;
line_no += 1;
line = lines[line_no];
#Ops... This comment is empty, but we cannot discard it because
#it's author may put it like a mark on code.
if(len(clean_line) == 0):
clean_line += "__EMPTY [{}] COMMENT__".format(tag_name);
tag_entry.add(tag_name, line_no, clean_line);
#We just take in account on type of tag for each time.
break;
for tag_name in tag_entry.data.keys():
Globals.tag_entries[tag_name].append(tag_entry);
################################################################################
## Output Functions ##
################################################################################
def output_long():
#Output the messages for all tag names.
for tag_name in Globals.tag_names:
#Get the list of entries for this tag.
tag_entry_list = Globals.tag_entries[tag_name];
tag_entry_list_len = len(tag_entry_list);
if(tag_entry_list_len == 0): #Have nothing to show.
continue;
#Print the Tag name and count of files with it.
out = "{} - Files({})";
out = out.format(ColorTag(tag_name), ColorNumber(tag_entry_list_len));
print out;
#For each entry for this tag.
for entry in tag_entry_list:
entry_data = entry.data[tag_name];
entry_data_len = len(entry_data);
#Print the file name and the count of issues.
out = "{} - Issues({}):";
out = out.format(ColorFile(entry.filename),
ColorNumber(entry_data_len));
print out;
digits_on_greater_entry_no = len(str(entry_data[-1][0]));
for entry_info in entry_data:
entry_file = entry.filename;
entry_no = entry_info[0];
entry_issue = entry_info[1];
spacing_chars = " " * 8;
#Left padding the entry number with zeros.
number_of_zeros = (digits_on_greater_entry_no - len(str(entry_no)));
entry_no = ("0" * number_of_zeros) + str(entry_no);
#Padding them entry issue to make it aligned.
padding = " " * (len(entry_no) + 3);
entry_issue = entry_issue.replace("\n", "\n" + spacing_chars + padding)
#Put colors.
colored_entry_no = ColorNumber(entry_no);
colored_entry_issue = ColorIssue(entry_issue);
out = "{}({}) {}";
out = out.format(spacing_chars,
colored_entry_no,
colored_entry_issue);
print out;
def output_short():
#Output the messages for all tag names.
for tag_name in Globals.tag_names:
#Get the list of entries for this tag.
tag_entry_list = Globals.tag_entries[tag_name];
tag_entry_list_len = len(tag_entry_list);
if(tag_entry_list_len == 0): #Have nothing to show.
continue;
#Print the Tag name and count of files with it.
out = "{} - Files({})";
out = out.format(ColorTag(tag_name), ColorNumber(tag_entry_list_len));
print out;
#For each entry for this tag.
for entry in tag_entry_list:
entry_data = entry.data[tag_name];
entry_data_len = len(entry_data);
digits_on_greater_entry_no = len(str(entry_data[-1][0]));
for entry_info in entry_data:
entry_file = entry.filename;
entry_no = entry_info[0];
entry_issue = entry_info[1];
#Left padding the entry number with zeros.
number_of_zeros = (digits_on_greater_entry_no - len(str(entry_no)));
entry_no = ("0" * number_of_zeros) + str(entry_no);
#Padding the entry issue to make them aligned
entry_issue_offset = " " * (len(entry_file) + len(entry_no) + 3);
entry_issue = entry_issue.replace("\n", "\n" + entry_issue_offset);
#Print the line of issue and its message.
colored_entry_file = ColorFile(entry_file);
colored_entry_no = ColorNumber(entry_no);
colored_entry_issue = ColorIssue(entry_issue);
out = "{}({}) {}";
out = out.format(colored_entry_file,
colored_entry_no,
colored_entry_issue);
print out;
################################################################################
## Script Initialization ##
################################################################################
def main():
try:
#Get the command line options.
options = getopt.gnu_getopt(sys.argv[1:],
Constants.FLAGS_SHORT,
Constants.FLAGS_LONG);
except Exception, e:
Helper.print_fatal(e);
#Options switches.
long_requested = False;
short_requested = False;
#Parse the options.
for option in options[0]:
key, value = option;
key = key.lstrip("-");
#Help / Version / List exclude dirs - Exclusives.
if(key in Constants.FLAG_HELP):
Helper.print_help(0);
elif(key in Constants.FLAG_VERSION):
Helper.print_version(0);
elif(key in Constants.FLAG_LIST_EXCLUDE_DIR):
ExcludeDirRC.print_list();
exit(0);
#Long / Short output - Optionals.
elif(key in Constants.FLAG_LONG ): long_requested = True;
elif(key in Constants.FLAG_SHORT ): short_requested = True;
#Verbose / No Colors - Optionals.
elif(key in Constants.FLAG_VERBOSE):
Globals.verbose = True;
elif(key in Constants.FLAG_NO_COLORS):
ColorMode.mode = ColorMode.NEVER;
#Exclude dir - Optional
elif(key in Constants.FLAG_EXCLUDE):
path = Helper.expand_normalize_path(value);
if(not os.path.isdir(path)):
msg = "Path to exclude is invalid ({})";
Helper.print_fatal(msg.format(ColorPath(path)));
Globals.exclude_dirs.append(path);
#Exclude ext - Optional
elif(key in Constants.FLAG_EXCLUDE_EXT):
if(value[0] != "."):
value = "." + value;
if(Globals.extensions.count(value) == 0):
msg = "Extension not recognized: ({})".format(ColorInfo(value));
Helper.print_fatal(msg);
Globals.extensions.remove(value);
#Add / Remove the dir to exclude rc - Optionals
#Error checking about paths will be done when adding the paths.
elif(key in Constants.FLAG_ADD_EXCLUDE_DIR):
Globals.paths_to_add_in_exclude_rc.append(value);
elif(key in Constants.FLAG_REMOVE_EXCLUDE_DIR):
Globals.paths_to_remove_in_exclude_rc.append(value);
#Add/Remove all paths to/from rc before start the run.
for path in Globals.paths_to_add_in_exclude_rc:
ExcludeDirRC.add_path(path);
for path in Globals.paths_to_remove_in_exclude_rc:
ExcludeDirRC.remove_path(path);
#Add all the Excluded paths in rc to the list of excluded paths.
Globals.exclude_dirs += ExcludeDirRC.get_excluded_dirs();
#Set the output function based upon the flag.
output_func = output_long;
if(short_requested ): output_func = output_short;
if(long_requested ): output_func = output_long;
#Check if the path is present.
path = ".";
if(len(options[1]) > 0):
path = options[1][0];
#Do a scan and present the results.
scan(path);
Helper.print_verbose("\n\n");
output_func();
if(__name__ == "__main__"):
main();
|
AmazingCow/COWTODO
|
cowtodo.py
|
Python
|
gpl-3.0
| 26,961
|
[
"VisIt"
] |
dbcffa75366987053337fe0d4425778cff6ff83b14fba7e2ef58c7a4fc6d9432
|
#
# Angel Farguell, CU Denver
#
from __future__ import absolute_import
import logging
import re
import glob
import json
import subprocess
import os.path as osp
from datetime import datetime, timedelta
import pytz
import numpy as np
import netCDF4 as nc
from utils import Dict, split_path, utc_to_utcf, ensure_dir
from ingest.sat_source import SatSource
def parse_filename(file_name):
"""
Parse filename from AWS dataset name
:param file_name: AWS file name to parse
:return info: dictionary with information from file name
"""
info = {}
pattern = 'FDC([CF]{1})-M([0-9]{1})_G([0-9]{2})_s([0-9]{14})_e([0-9]{14})'
tfrmt = '%Y%j%H%M%S'
match = re.search(pattern,file_name)
if len(match.groups()) == 5:
info['domain'] = match.group(1)
info['mode'] = match.group(2)
info['satellite'] = match.group(3)
info['start_date'] = datetime.strptime(match.group(4)[0:13],tfrmt).replace(tzinfo=pytz.UTC)
info['end_date'] = datetime.strptime(match.group(5)[0:13],tfrmt).replace(tzinfo=pytz.UTC)
return info
def parse_projinfo(file_name):
"""
Parse projection information from NetCDF AWS dataset file
:param file_name: AWS file name to parse
:return info: dictionary with information from file name
"""
d = nc.Dataset(file_name)
gip = d['goes_imager_projection']
x = d['x']
y = d['y']
return {'shgt': gip.getncattr('perspective_point_height'), 'requ': gip.getncattr('semi_major_axis'),
'rpol': gip.getncattr('semi_minor_axis'), 'lon0': gip.getncattr('longitude_of_projection_origin'),
'xscl': x.getncattr('scale_factor'), 'xoff': x.getncattr('add_offset'), 'xdim': x.shape[0],
'yscl': y.getncattr('scale_factor'), 'yoff': y.getncattr('add_offset'), 'ydim': y.shape[0]}
def aws_request(cmd, max_retries=5):
"""
Request command in AWS storage using AWS CLI retrying if an error occurs
:param cmd: list with command and flags to run using subprocess
:param max_retries: max retries to request to AWS CLI
:return r: output from check_output method of subprocess
"""
for tries in range(max_retries):
try:
logging.debug('aws_request - {}'.format(' '.join(cmd)))
r = subprocess.check_output(cmd)
return r
except:
if tries == max_retries-1:
logging.error('error when requesting "{}", no more retries left'.format(' '.join(cmd)))
else:
logging.warning('error when requesting "{}", retry {}/{}...'.format(' '.join(cmd),tries+1,max_retries))
return None
def aws_search(awspaths, time=(datetime(2000,1,1),datetime.now())):
"""
Search data in AWS storage using AWS CLI
:param awspaths: list of paths to search using AWS CLI
:param time: time interval in datetime format to consider the data from
:return result: metadata of the satellite data found
"""
ls_cmd = 'aws s3 ls {} --recursive --no-sign-request'
result = {}
for awspath in awspaths:
cmd = ls_cmd.format(awspath).split()
r = aws_request(cmd)
if r != None:
for line in r.decode().split('\n'):
if len(line):
_,_,file_size,file_name = list(map(str.strip,line.split()))
info = parse_filename(file_name)
if info['start_date'] <= time[1] and info['start_date'] >= time[0]:
base = split_path(awspath)[0]
url = osp.join('s3://',base,file_name)
file_basename = osp.basename(file_name)
product_id = 'A{:04d}{:03d}_{:02d}{:02d}'.format(info['start_date'].year,
info['start_date'].timetuple().tm_yday,
info['start_date'].hour,
info['start_date'].minute)
result.update({product_id: {'file_name': file_basename, 'file_remote_path': file_name,
'time_start': utc_to_utcf(info['start_date']),
'time_end': utc_to_utcf(info['end_date']),
'updated': datetime.now(), 'url': url, 'file_size': int(file_size),
'domain': info['domain'], 'satellite': 'G{}'.format(info['satellite']),
'mode': info['mode'],
'product_id': product_id}})
return result
def download_url(url, sat_path):
"""
Download URL from AWS storage using AWS CLI
:param url:
:param sat_path:
"""
cmd = 'aws s3 cp {} {} --no-sign-request'.format(url, sat_path).split()
r = aws_request(cmd)
def create_grid(proj_info, out_path):
"""
Create AWS grid
:param proj_info: projection information dictionary to create the grid coordinates
:param out_path: output path to write the grid coordinates
"""
shgt = proj_info['shgt']
requ = proj_info['requ']
rpol = proj_info['rpol']
lon0 = proj_info['lon0']
xscl = proj_info['xscl']
xoff = proj_info['xoff']
xdim = proj_info['xdim']
yscl = proj_info['yscl']
yoff = proj_info['yoff']
ydim = proj_info['ydim']
dtor = 0.0174533
pi = 3.1415926536
rrat = (requ*requ)/(rpol*rpol)
bigh = requ + shgt
i = np.reshape(np.arange(xdim),(1,xdim))
j = np.reshape(np.arange(ydim),(ydim,1))
x = i*xscl + xoff
y = j*yscl + yoff
a = np.sin(x)**2 + np.cos(x)**2*(np.cos(y)**2 + rrat*np.sin(y)**2)
b = -2*bigh*np.cos(x)*np.cos(y)
c = bigh**2-requ**2
with np.errstate(invalid='ignore'):
rs = (-b - np.sqrt(b*b - 4.*a*c))/(2.*a)
sx = rs*np.cos(x)*np.cos(y)
sy = -rs*np.sin(x)
sz = rs*np.cos(x)*np.sin(y)
lats = (np.arctan(rrat*sz/np.sqrt((bigh-sx)**2 + sy**2)))/dtor
lons = (lon0*dtor - np.arctan(sy/(bigh-sx)))/dtor
nc_file = nc.Dataset(out_path, 'w')
xx = nc_file.createDimension("x", xdim)
yy = nc_file.createDimension("y", ydim)
longitude = nc_file.createVariable("lon", "f4", ("y","x"))
latitude = nc_file.createVariable("lat", "f4", ("y","x"))
longitude[:] = lons
latitude[:] = lats
nc_file.close()
def process_grid(ingest_dir, meta):
"""
Process AWS grid
:param ingest_dir: path to ingest directory for the satellite product
:param meta: metadata to create the grid for
"""
current_proj_path = osp.join(ingest_dir,'{}_projinfo.json'.format(meta['file_name'].split('.')[0]))
current_grid_path = osp.join(ingest_dir,'{}_grid.nc'.format(meta['file_name'].split('.')[0]))
current_proj = parse_projinfo(meta['local_path'])
archived_proj_paths = glob.glob(osp.join(ingest_dir, '*_projinfo.json'))
if len(archived_proj_paths):
for archived_proj_path in archived_proj_paths:
if osp.exists(archived_proj_path):
archived_proj = json.load(open(archived_proj_path, 'r'))
archived_grid_path = eval(archived_proj)['grid_path']
current_proj.update({'grid_path': archived_grid_path})
if archived_proj == str(current_proj):
archived_proj = eval(archived_proj)
if not osp.exists(archived_grid_path):
create_grid(archived_proj, archived_grid_path)
return {'proj_path': archived_proj_path, 'grid_path': archived_grid_path}
current_proj.update({'grid_path': current_grid_path})
json.dump(str(current_proj), open(current_proj_path, 'w'))
create_grid(current_proj, current_grid_path)
return {'proj_path': current_proj_path, 'grid_path': current_grid_path}
class SatSourceAWSError(Exception):
"""
Raised when a SatSourceAWS cannot retrieve satellite data.
"""
pass
class SatSourceAWS(SatSource):
"""
The parent class of all satellite sources that implements common functionality, for example
- local satellite validation (file size check)
- Satellite retrieval with retries (smart check whether server implements http-range)
"""
def __init__(self, js):
super(SatSourceAWS, self).__init__(js)
def search_api_sat(self, sname, bounds, time, collection=None):
"""
API search of the different satellite granules and return metadata dictionary
:param sname: short name satellite product, ex: ''
:param bounds: bounding box as (lon_min,lon_max,lat_min,lat_max)
:param time: time interval (init_time_iso,final_time_iso)
:param collection: id of the collection to specify
:return metas: a dictionary with all the metadata for the API search
"""
# complete_days
stime,etime = time
n_complete_days = int((etime.replace(hour=0,minute=0,second=0,microsecond=0)-stime.replace(hour=0,minute=0,second=0,microsecond=0)).days)+1
complete_days = [(stime + timedelta(days=x)).strftime('%Y/%j/') for x in range(n_complete_days)]
awspaths = [osp.join(self.base_url.format(self.platform), self.product.format(self.sector), hour) for hour in complete_days]
metas = aws_search(awspaths,time)
logging.info('SatSourceAWS.search_api_sat - {} metas found'.format(len(metas)))
return metas
def get_metas_sat(self, bounds, time):
"""
Get all the meta data for all the necessary products
:param bounds: bounding box as (lon_min,lon_max,lat_min,lat_max)
:param time: time interval (init_time_datetime,final_time_datetime)
:param maxg: max number of granules to process
:return metas: dictionary with the metadata of all the products
"""
return Dict({'fire': self.search_api_sat(None,None,time)})
def group_metas(self, metas):
"""
Group all the satellite metas before downloading to minimize number of files downloaded
:param metas: satellite metadatas from API search
:return metas: groupped and cleanned metadata dictionary
"""
return metas
def _download_url(self, url, sat_path, token, min_size):
"""
Download a satellite file from a satellite service
:param url: the URL of the file
:param sat_path: local path to download the file
:param token: key to use for the download or None if not
"""
download_url(url, sat_path)
def retrieve_metas(self, metas):
"""
Retrieve satellite data from CMR API metadata
:return metas: dictonary with all the satellite data to retrieve
:return manifest: dictonary with all the satellite data retrieved
"""
logging.info('retrieve_metas - downloading {} products'.format(self.id))
manifest = Dict({})
for p_id,meta in metas['fire'].items():
urls = [meta['url']]
fire_meta = self.download_sat(urls)
fire_meta.update(meta)
local_path = fire_meta['local_path']
remote_size = int(fire_meta['file_size'])
local_size = osp.getsize(local_path)
if local_size != remote_size:
logging.error('retrieve_metas - local file with size {} different than remote size {}'.format())
continue
info_path = local_path + '.size'
open(ensure_dir(info_path), 'w').write(str(local_size))
geo_meta = process_grid(self.ingest_dir,fire_meta)
manifest.update({p_id: {
'time_start_iso' : fire_meta['time_start'],
'time_end_iso' : fire_meta['time_end'],
'geo_url' : fire_meta['url'],
'geo_local_path' : geo_meta['grid_path'],
'geo_description' : self.info,
'fire_url' : fire_meta['url'],
'fire_local_path' : fire_meta['local_path'],
'fire_description' : self.info
}})
return manifest
def read_sat(self, files, metas, bounds):
"""
Read all the satellite files from a specific satellite service
:param file: dictionary with geolocation (03), fire (14) (, and reflectance (09)) file paths for satellite service
:param bounds: spatial bounds tuple (lonmin,lonmax,latmin,latmax)
:return ret: dictionary with Latitude, Longitude and fire mask arrays read
"""
pass
def read_files_sat(self, file, bounds):
"""
Read a specific satellite files from a specific satellite service
:param file: dictionary with geolocation (03), fire (14) (, and reflectance (09)) file paths for satellite service
:param bounds: spatial bounds tuple (lonmin,lonmax,latmin,latmax)
:return ret: dictionary with Latitude, Longitude and fire mask arrays read
"""
pass
# instance variables
base_url='noaa-{}'
product='ABI-L2-FDC{}'
sector='F' # full disk
|
openwfm/wrfxpy
|
src/ingest/sat_source_aws.py
|
Python
|
mit
| 12,986
|
[
"NetCDF"
] |
2fe7b1f4c93fca58ed53e16d96da7d0df988718c1b19fdee8c2a7b3b49471752
|
import datetime
import glob
import os
import random
import subprocess
import uuid
import pyproj
import xarray
from clover.geometry.bbox import BBox
from clover.netcdf.describe import describe
from clover.render.renderers.stretched import StretchedRenderer
from clover.render.renderers.unique import UniqueValuesRenderer
from clover.utilities.color import Color
from django.conf import settings
from django.db import Error
from ncdjango.models import Service, Variable
from landscapesim import models
from landscapesim.common.query import ssim_query
NC_ROOT = getattr(settings, 'NC_SERVICE_DATA_ROOT')
CLOVER_PATH = getattr(settings, 'CLOVER_PATH', None)
CREATE_SERVICE_ERROR_MSG = "Error creating ncdjango service for {} in scenario {}, skipping..."
CREATE_RENDERER_ERROR_MSG = "Error creating renderer for {vname}. Did you set ID values for your {vname} definitions?"
# Some variables don't quite line up, but this is by design.
# NAME_HASH maps service name to variable named ids used by that service
NAME_HASH = {'strata': 'stratum',
'secondary_strata': 'secondary_stratum',
'stateclasses': 'stateclass',
'age': 'age',
'transition_groups': 'transition_type'} # transition type ids are used in tg rasters
# CTYPE_HASH maps service name to raster class type
CTYPE_HASH = {'strata': 'str',
'aatp': 'tgap',
'stateclasses': 'sc',
'state_attributes': 'sa',
'transition_attributes': 'ta',
'age': 'age',
'transition_groups': 'tg',
'avg_annual_transition_probability': 'tgap'}
# Sometimes we need to talk to the ssim dbs directly =)
SSIM_TABLE = {'transition_groups': 'STSim_TransitionGroup',
'state_attributes': 'STSim_StateAttributeType',
'transition_attributes': 'STSim_TransitionAttributeType',
'avg_annual_transition_probability': 'STSim_TransitionGroup'} # must match ids, since they're duplicated
# We need to match up the ssim name values to our table
INNER_TABLE = {'transition_groups': models.TransitionGroup,
'state_attributes': models.StateAttributeType,
'transition_attributes': models.TransitionAttributeType,
'avg_annual_transition_probability': models.TransitionGroup}
# Sanity check for creating ncdjango services correctly
def has_nc_root(func):
def wrapper(*args, **kwargs):
if NC_ROOT:
return func(*args, **kwargs)
else:
print('NC_SERVICE_DATA_ROOT not set. Did you install and setup ncdjango properly?')
return wrapper
class ServiceGenerator:
""" A class for creating ncdjango services from GeoTIFF outputs. """
def __init__(self, scenario):
self.scenario = scenario
@staticmethod
def generate_unique_renderer(values, randomize_colors=False):
if randomize_colors:
r = random.randint(0, 255)
g = random.randint(0, 255)
b = random.randint(0, 255)
return UniqueValuesRenderer([(v[0], Color(r, g, b)) for v in values], labels=[v[1] for v in values])
else:
return UniqueValuesRenderer([(v[1], Color(*[int(c) for c in v[0].split(',')[1:]])) for v in values],
labels=[v[2] for v in values])
@staticmethod
def generate_stretched_renderer(info):
v_min = None
v_max = None
for var in info['variables'].keys():
if var not in ['x', 'y', 'lat', 'lon', 'latitude', 'longitude']:
variable = info['variables'][var]
minimum = variable['min']
maximum = variable['max']
v_min = minimum if not v_min or minimum < v_min else v_min
v_max = minimum if not v_max or maximum < v_max else v_max
v_mid = (v_max - v_min) / 2 + v_min
return StretchedRenderer([
(v_min, Color(240, 59, 32)),
(v_mid, Color(254, 178, 76)),
(v_max, Color(255, 237, 160))
])
def generate_service(self, filename_or_pattern, variable_name, unique=True, has_colormap=True,
model_set=None, model_id_lookup=None,):
"""
Creates an ncdjango service from a geotiff (stack).
:param filename_or_pattern: A local filename or glob pattern.
:param variable_name: The variable name of the data to assign to the ncdjango service.
:param unique: Indicate whether to use a UniqueValuesRenderer or a StretchedRenderer
:param has_colormap: Indicate whether the service has a colormap or not.
:param model_set: The Queryset to filter over.
:param model_id_lookup: A unique identifier used for specific datasets.
:return: One (1) ncdjango service.
"""
has_time = '*' in filename_or_pattern # pattern if true, filename otherwise
# Construct relative path for new netcdf file, relative to NC_ROOT. Used as 'data_path' in ncdjango.Service
nc_rel_path = os.path.join(
self.scenario.project.library.name, self.scenario.project.name, 'Scenario-' + str(self.scenario.sid)
)
if has_time:
nc_rel_path = os.path.join(nc_rel_path, 'output', variable_name + '.nc')
else:
nc_rel_path = os.path.join(nc_rel_path, filename_or_pattern.replace('tif', 'nc'))
# Absolute path where we want the new netcdf to live.
nc_full_path = os.path.join(NC_ROOT, nc_rel_path)
if not os.path.exists(os.path.dirname(nc_full_path)):
os.makedirs(os.path.dirname(nc_full_path))
variable_names = []
# No patterns, so create a simple input raster
if not has_time:
self.convert_to_netcdf(
os.path.join(self.scenario.input_directory, filename_or_pattern), nc_full_path, variable_name
)
# Time series output pattern, convert to timeseries netcdf
else:
ctype = filename_or_pattern[:-4].split('-')[2:][0]
assert ctype == CTYPE_HASH[variable_name] # sanity check
# collect all information to make valid patterns
iterations = []
timesteps = []
ssim_ids = []
glob_pattern = glob.glob(os.path.join(self.scenario.output_directory, filename_or_pattern))
glob_pattern.sort()
for f in glob_pattern:
it, ts, *ctype_id = f[:-4].split(os.sep)[-1].split('-')
if it not in iterations:
iterations.append(it) # E.g. ['It0001','It0002', ...]
if ts not in timesteps: # T.g. ['Ts0000','Ts0001', ...]
timesteps.append(ts)
if len(ctype_id) > 1:
ssim_id = int(ctype_id[1])
if ssim_id not in ssim_ids:
ssim_ids.append(ssim_id) # E.g. ['tg', '93'], ['ta', '234'], etc.
assert ctype == ctype_id[0] # pattern matching is way off (not sure this would even happen)
# ssim_ids are internal, have to match with our system
# service variables are <variable_name>-<inner_id>-<iteration>
if len(ssim_ids):
filename_patterns = []
ssim_result = ssim_query('select * from ' + SSIM_TABLE[variable_name], self.scenario.project.library)
# Create valid hash map
ssim_hash = {}
for ssim_id in ssim_ids:
inner_id = None
for row in ssim_result:
# match primary key id and project id, and only create filename_patterns if a match if found
if ssim_id == row[0] and str(self.scenario.project.pid) == str(row[1]):
name = row[2]
inner_id = INNER_TABLE[variable_name].objects.filter(
name__exact=name, project=self.scenario.project
).first().id
break
if inner_id:
ssim_hash[ssim_id] = inner_id
# Now build proper filename_patterns
for it in iterations:
for ssim_id in ssim_ids:
filename_patterns.append(os.path.join(
self.scenario.output_directory, '{}-Ts*-{}-{}.tif'.format(it, ctype, ssim_id)
))
for pattern in filename_patterns:
pattern_id = ssim_hash[int(pattern.split(os.sep)[-1].split('-')[-1][:-4])]
iteration_num = int(pattern.split(os.sep)[-1].split('-')[0][2:])
iteration_var_name = '{variable_name}-{id}-{iteration}'.format(
variable_name=variable_name,
id=pattern_id,
iteration=iteration_num
)
variable_names.append(iteration_var_name)
iteration_nc_file = os.path.join(self.scenario.output_directory,
iteration_var_name + '.nc')
self.convert_to_netcdf(pattern, iteration_nc_file, iteration_var_name)
merge_nc_pattern = os.path.join(self.scenario.output_directory, variable_name + '-*-*.nc')
# no ids
# service variables are <variable_name>-<iteration>
else:
filename_patterns = [os.path.join(self.scenario.output_directory, '{}-Ts*-{}.tif'.format(it, ctype))
for it in iterations]
merge_nc_pattern = os.path.join(self.scenario.output_directory, variable_name + '-*.nc')
for pattern in filename_patterns:
iteration_num = int(pattern.split(os.sep)[-1].split('-')[0][2:])
iteration_var_name = '{variable_name}-{iteration}'.format(variable_name=variable_name,
iteration=iteration_num)
variable_names.append(iteration_var_name)
iteration_nc_file = os.path.join(self.scenario.output_directory,
iteration_var_name + '.nc')
self.convert_to_netcdf(pattern, iteration_nc_file, iteration_var_name)
self.merge_netcdf(merge_nc_pattern, nc_full_path)
info = describe(nc_full_path)
grid = info['variables'][variable_names[0] if len(variable_names) else variable_name]['spatial_grid']['extent']
extent = BBox((grid['xmin'], grid['ymin'], grid['xmax'], grid['ymax']), projection=pyproj.Proj(grid['proj4']))
steps_per_variable = None
t = None
t_start = None
t_end = None
dimensions = list(info['dimensions'].keys())
if 'x' in dimensions:
x, y = ('x', 'y')
else:
x, y = ('lon', 'lat')
if has_time:
t = 'time'
steps_per_variable = info['dimensions'][t]['length']
t_start = datetime.datetime(2000, 1, 1)
t_end = t_start + datetime.timedelta(1) * steps_per_variable
try:
service = Service.objects.create(
name=uuid.uuid4(),
data_path=nc_rel_path,
projection=grid['proj4'],
full_extent=extent,
initial_extent=extent
)
if has_time and len(variable_names) and steps_per_variable:
# Set required time fields
service.supports_time = True
service.time_start = t_start
service.time_end = t_end
service.time_interval = 1
service.time_interval_units = 'days'
service.calendar = 'standard'
service.save()
if unique:
model = model_set or getattr(self.scenario.project, variable_name)
unique_id_lookup = model_id_lookup or NAME_HASH[variable_name] + '_id'
try:
if has_colormap:
queryset = model.values_list('color', unique_id_lookup, 'name')
renderer = self.generate_unique_renderer(queryset)
else:
queryset = model.values_list(unique_id_lookup, 'name')
renderer = self.generate_unique_renderer(queryset, randomize_colors=True)
except:
raise AssertionError(CREATE_RENDERER_ERROR_MSG.format(vname=variable_name))
else:
renderer = self.generate_stretched_renderer(info)
if has_time and len(variable_names):
for name in variable_names:
Variable.objects.create(
service=service,
index=variable_names.index(name),
variable=name,
projection=grid['proj4'],
x_dimension=x,
y_dimension=y,
name=name,
renderer=renderer,
full_extent=extent,
supports_time=True,
time_dimension=t,
time_start=t_start,
time_end=t_end,
time_steps=steps_per_variable
)
else:
Variable.objects.create(
service=service,
index=0,
variable=variable_name,
projection=grid['proj4'],
x_dimension=x,
y_dimension=y,
name=variable_name,
renderer=renderer,
full_extent=extent
)
return service
except:
raise Error(CREATE_SERVICE_ERROR_MSG.format(variable_name, self.scenario.sid))
@staticmethod
def convert_to_netcdf(geotiff_file_or_pattern, netcdf_out, variable_name):
"""
Convert input rasters to netcdf for use in ncdjango
:param geotiff_file_or_pattern: Absolute path (or pattern of paths) to geotiff(s) to convert to netCDF
:param netcdf_out: Absolute path to netCDF file.
:param variable_name: The variable name.
"""
try:
subprocess.run(
[CLOVER_PATH if CLOVER_PATH else "clover", "to_netcdf", geotiff_file_or_pattern, netcdf_out,
variable_name]
)
except:
raise subprocess.SubprocessError("Failed calling clover. Did you setup clover correctly?")
@staticmethod
def merge_netcdf(pattern, out):
"""
Merges a list of netcdf files with different variables and same dimensions into a single netcdf file.
:param pattern: glob.glob pattern (not necessarily a regex)
:param out: Path to the created netcdf file
"""
glob_pattern = glob.glob(pattern)
glob_pattern.sort()
xarray.open_mfdataset(glob_pattern).to_netcdf(out) # using open_mfdataset prevents issues with file handles
@has_nc_root
def create_input_services(self):
""" Generates a set of ncdjango services and variables (one-to-one) to associate with a scenario. """
ics = self.scenario.initial_conditions_spatial_settings
sis = models.ScenarioInputServices.objects.create(scenario=ics.scenario)
# Create stratum input service
if len(ics.stratum_file_name):
sis.stratum = self.generate_service(ics.stratum_file_name, 'strata')
if len(ics.secondary_stratum_file_name):
sis.secondary_stratum = self.generate_service(
ics.secondary_stratum_file_name, 'secondary_strata', has_colormap=False
)
if len(ics.stateclass_file_name):
sis.stateclass = self.generate_service(ics.stateclass_file_name, 'stateclasses')
if len(ics.age_file_name):
sis.age = self.generate_service(ics.age_file_name, 'age', unique=False)
sis.save()
@has_nc_root
def create_output_services(self):
""" Generates a set of ncdjango services and time-series variables to associate with a scenario. """
assert self.scenario.is_result # sanity check
sos = models.ScenarioOutputServices.objects.create(scenario=self.scenario)
oo = self.scenario.output_options
# State Classes
if oo.raster_sc:
sos.stateclass = self.generate_service('It*-Ts*-sc.tif', 'stateclasses')
# Transition Groups
if oo.raster_tr:
sos.transition_group = self.generate_service(
'It*-Ts*-tg-*.tif', 'transition_groups', has_colormap=False,
model_set=self.scenario.project.transition_types, model_id_lookup='transition_type_id'
)
# State Attributes
if oo.raster_sa:
sos.state_attribute = self.generate_service('It*-Ts*-sa-*.tif', 'state_attributes', unique=False)
# Transition Attributes
if oo.raster_ta:
sos.transition_attribute = self.generate_service('It*-Ts*-ta-*.tif', 'transition_attributes', unique=False)
# Strata
if oo.raster_strata:
sos.stratum = self.generate_service('It*-Ts*-str.tif', 'strata')
# Age
if oo.raster_age:
sos.age = self.generate_service('It*-Ts*-age.tif', 'age', unique=False)
# Time since transition
if oo.raster_tst:
# TODO - implement RasterOutputTST (find use case first)
print("RasterOutputTST not implemented yet. For now, please disable this output option.")
# Average annual transition group probability
if oo.raster_aatp:
sos.avg_annual_transition_group_probability = self.generate_service(
'It*-Ts*-tgap-*.tif', 'avg_annual_transition_probability', unique=False
)
sos.save()
|
consbio/landscapesim
|
landscapesim/common/services.py
|
Python
|
bsd-3-clause
| 18,215
|
[
"NetCDF"
] |
6f2141b4c4f21e29b8002eefe75d258e08639d2a8babfc8ff6ec33083561ec4a
|
#!/usr/bin/python
import subprocess as sp, argparse
parser=argparse.ArgumentParser()
parser.add_argument('--mei',nargs='*')
parser.add_argument('--option_tag',nargs='*')
parser.add_argument('--path',nargs='*')
args=parser.parse_args()
insertME=''.join(args.mei)
option_tag=''.join(args.option_tag)
path_current=''.join(args.path)
fh = open(path_current+"Results%s/TPM_stats_90_maxUR_10.txt"%(option_tag),"r")
ref = fh.readlines()[1:]
fh.close()
ind_TPM_UR = {} #ind as key, (TPM,UR) as value
ind_loci = {} #ind as key, num_loci as value
for row in ref:
row=row.strip().split("\t")
ind_TPM_UR[row[0]] = (float(row[2]),int(row[1]))
ind_loci[row[0]] = 0
#print ind_loci
d_lib_r = {} #ind as key, mapped_read as value
#to retrive the number of mapped read for the library used
lib_i = []
ln0 = sp.Popen(['ls',path_current],stdout = sp.PIPE)
lib_name = sp.Popen(['grep','library'],stdin = ln0.stdout, stdout = sp.PIPE).communicate()[0].strip()
#print lib_name
f_lib = open(path_current+lib_name,"r")
lib_text = f_lib.readlines()
f_lib.close()
for row in lib_text:
row=row.strip().split("\t")
lib_i.append(row[1])
is0 = sp.Popen(['ls',path_current],stdout = sp.PIPE)
is1 = sp.Popen(['grep','Sample'],stdin = is0.stdout, stdout = sp.PIPE)
ind_sample = sp.Popen(['awk','-F',r"_",r'{print $2}'],stdin = is1.stdout, stdout = sp.PIPE).communicate()[0].strip().split("\n")
i = list(set(lib_i) & set(ind_sample))
#print i, len(i)
for s in i:
qi = sp.Popen(['ls','%sSample_%s/'%(path_current,s)],stdout = sp.PIPE)
qii = sp.Popen(['grep','sorted.bam$'],stdin = qi.stdout, stdout = sp.PIPE)
bam_file = qii.communicate()[0].strip()
# print bam_file
q1 = sp.Popen(['samtools','idxstats','%sSample_%s/%s'%(path_current,s,bam_file)],stdout = sp.PIPE)
q2 = sp.Popen(['awk',r'{sum+=$3;} END {print sum;}'],stdin=q1.stdout,stdout=sp.PIPE)
mapped_read = float(q2.communicate()[0].strip().split("\n")[0])
# print "mapped read is %s"%mapped_read
d_lib_r[s]=mapped_read
#print d_lib_r
d = {} #locusID as key, {ind:[#raw_read,#uniq_read]} as value
for k in range(0,2,1):
if k == 0 :
orient = "minus"
else:
orient = "plus"
# print orient
fk = open(path_current+"output_bwa-blast%s/%s_all.insert_%s%stemp"%(option_tag,insertME,orient,option_tag),"r")
data = fk.readlines()
fk.close()
for row in data:
row=row.strip().split(" ")
if row[5] not in d.keys():
d[row[5]]={}
d[row[5]][row[4]] = [int(row[3]),1]
else:
if row[4] not in d[row[5]].keys():
d[row[5]][row[4]] = [int(row[3]),1]
else:
d[row[5]][row[4]][0] += int(row[3])
d[row[5]][row[4]][1] += 1
out = open(path_current+"output_bwa-blast%s/latest_data_TPM-10UR_filters_All_insertion%sbed"%(option_tag,option_tag),"w")
out_count = open(path_current+"output_bwa-blast%s/latest_data_TPM-10UR_filters_All_insertion_ind_count_loci%stxt"%(option_tag,option_tag),"w")
fz = open(path_current+"output_bwa-blast%s/%s_all%sBB.bed"%(option_tag,insertME,option_tag),"r")
template = fz.readlines()
fz.close()
for row in template:
row=row.strip().split("\t")
ind_list = []
TPM_list = []
UR_list = []
flag = False
for ind in d[row[6]].keys():
if ind in i:
temp_TPM = (d[row[6]][ind][0]*1000000.0)/d_lib_r[ind]
if temp_TPM > ind_TPM_UR[ind][0] and d[row[6]][ind][1] > ind_TPM_UR[ind][1]:
flag = True
ind_list.append(ind)
TPM_list.append("%s"%float("%.6g"%temp_TPM))
UR_list.append(str(d[row[6]][ind][1]))
ind_loci[ind] += 1
if flag:
out.write(row[0]+"\t"+row[1]+"\t"+row[2]+"\t"+row[3]+"\t"+row[4]+"\t"+row[5]+"\t"+row[6]+"\t"+",".join(ind_list)+"\t"+",".join(TPM_list)+"\t"+",".join(UR_list)+"\n")
out.close()
for ind in ind_loci.keys():
out_count.write(ind+"\t"+str(ind_loci[ind])+"\n")
out_count.close()
|
JXing-Lab/ME-SCAN-SVA
|
modules/JW_producing_all_insertion_with_ind_info.py
|
Python
|
gpl-3.0
| 3,773
|
[
"BLAST"
] |
adba3667e752802ffbf8ad462b74077abd7fccb8fd29803e18c7773595c791f8
|
#!/usr/bin/env python
###########################
###### IMPORT MODULES
###########################
import LCSbase as lb
import LCScommon as lcscommon
from matplotlib import pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import matplotlib.gridspec as gridspec
import numpy as np
import os
import scipy.stats as st
from scipy.stats import ks_2samp, anderson_ksamp, binned_statistic
import argparse# here is min mass = 9.75
from urllib.parse import urlencode
from urllib.request import urlretrieve
from astropy.io import fits, ascii
from astropy.cosmology import WMAP9 as cosmo
from scipy.optimize import curve_fit
from astropy.stats import bootstrap,binom_conf_interval
from astropy.utils import NumpyRNGContext
from astropy.table import Table, Column
from astropy.coordinates import SkyCoord
from astropy.visualization import simple_norm
from astropy.wcs import WCS
from astropy import units as u
from astropy.stats import median_absolute_deviation as MAD
from PIL import Image
import sys
LCSpath = os.path.join(os.getenv("HOME"),'github','LCS','python','')
sys.path.append(LCSpath)
from fit_line_sigma_clip import fit_line_sigma_clip
###########################
##### DEFINITIONS
###########################
homedir = os.getenv("HOME")
plotdir = homedir+'/research/LCS/plots/'
#USE_DISK_ONLY = np.bool(np.float(args.diskonly))#True # set to use disk effective radius to normalize 24um size
USE_DISK_ONLY = True
#if USE_DISK_ONLY:
# print('normalizing by radius of disk')
minsize_kpc=1.3 # one mips pixel at distance of hercules
#minsize_kpc=2*minsize_kpc
mstarmin=10.#float(args.minmass)
mstarmax=10.8
minmass=mstarmin #log of M*
ssfrmin=-12.
ssfrmax=-9
spiralcut=0.8
truncation_ratio=0.5
zmin = 0.0137
zmax = 0.0433
Mpcrad_kpcarcsec = 2. * np.pi/360./3600.*1000.
mipspixelscale=2.45
exterior=.68
colors=['k','b','c','g','m','y','r','sienna','0.5']
shapes=['o','*','p','d','s','^','>','<','v']
#colors=['k','b','c','g','m','y','r','sienna','0.5']
truncated=np.array([113107,140175,79360,79394,79551,79545,82185,166185,166687,162832,146659,99508,170903,18236,43796,43817,43821,70634,104038,104181],'i')
minsize_kpc=1.3 # one mips pixel at distance of hercules
BTkey = '__B_T_r'
Rdkey = 'Rd_2'
# ellipticity to use in combined tables
# the first one e_1 refers to the ellip of the bulge during B/D decomposition
# this is the same in the LCS and GSWLC catalogs - phew!
ellipkey = 'e_2'
###########################
##### Functions
###########################
# using colors from matplotlib default color cycle
mycolors = plt.rcParams['axes.prop_cycle'].by_key()['color']
colorblind1='#F5793A' # orange
colorblind2 = '#85C0F9' # light blue
colorblind3='#0F2080' # dark blue
darkblue = colorblind3
darkblue = mycolors[1]
lightblue = colorblind2
#lightblue = 'b'
#colorblind2 = 'c'
colorblind3 = 'k'
# my version, binned median for GSWLC galaxies with vr < 15,0000
t = Table.read(homedir+'/research/APPSS/GSWLC2-median-ssfr-mstar-vr15k.dat',format='ipac')
log_mstar2 = t['med_logMstar']
log_ssfr2 = t['med_logsSFR']
def running_median(x,y,nbin,ax,color='k',alpha=.4):
mybins=np.linspace(st.stats.scoreatpercentile(x,2),st.stats.scoreatpercentile(x,98),nbin)
ybin,xbin,binnumb = binned_statistic(x,y,statistic='median',bins=mybins)
yerr,xbin,binnumb = binned_statistic(x,y,statistic='std',bins=mybins)
nyerr,xbin,binnumb = binned_statistic(x,y,statistic='count',bins=mybins)
yerr = yerr/np.sqrt(nyerr)
dbin = xbin[1]-xbin[0]
ax.plot(xbin[:-1]+0.5*dbin,ybin,color=color,lw=3)
ax.fill_between(xbin[:-1]+0.5*dbin,ybin+yerr,ybin-yerr,color=color,alpha=alpha)
def linear_func(x,m,b):
return m*x+b
def get_bootstrap_confint(d,bootfunc=np.median,nboot=100):
bootsamp = bootstrap(d,bootfunc=bootfunc,bootnum=nboot)
bootsamp.sort()
# get indices
ilower = int(((nboot - .68*nboot)/2))
iupper = nboot-ilower
return bootsamp[ilower],bootsamp[iupper]
def get_BV_MS(logMstar,MSfit=None):
'''
get MS fit that BV calculated from GSWLC;
MSfit = can use with alternate fit function if you provide the function
'''
#return 0.53*logMstar-5.5
# for no BT cut, e < 0.75
if MSfit is None:
return 0.3985*logMstar - 4.2078
else:
return MSfit(logMstar)
def get_MS_BTcut(logMstar,MSfit=None):
'''
get MS fit with B/T < 0.4 cut
MSfit = can use with alternate fit function if you provide the function
'''
#return 0.53*logMstar-5.5
# for no BT cut, e < 0.75
return 0.4731*logMstar-4.8771
def plot_Durbala_MS(ax,ls='-'):
plt.sca(ax)
#lsfr = log_mstar2+log_ssfr2
##plt.plot(log_mstar2, lsfr, 'w-', lw=4)
#plt.plot(log_mstar2, lsfr, c='m',ls='-', lw=6, label='Durbala+20')
x1,x2 = 9.7,11.
xline = np.linspace(x1,x2,100)
yline = get_MS_BTcut(xline)
ax.plot(xline,yline,c='w',ls=ls,lw=4,label='_nolegend_')
ax.plot(xline,yline,c='m',ls=ls,lw=3,label='$B/T < 0.4$')
def plot_BV_MS(ax,color='mediumblue',ls='-'):
plt.sca(ax)
plot_Durbala_MS(ax)
x1,x2 = 9.7,11.
xline = np.linspace(x1,x2,100)
yline = get_BV_MS(xline)
ax.plot(xline,yline,c='w',ls=ls,lw=4,label='_nolegend_')
ax.plot(xline,yline,c=color,ls=ls,lw=3,label='Linear Fit')
# scatter around MS fit
sigma=0.3
ax.plot(xline,yline-1.5*sigma,c='w',ls='--',lw=4)
ax.plot(xline,yline-1.5*sigma,c=color,ls='--',lw=3,label='fit-1.5$\sigma$')
def plot_GSWLC_sssfr(ax=None,ls='-'):
if ax is None:
ax = plt.gca()
ssfr = -11.5
x1,x2 = 9.6,11.15
xline = np.linspace(x1,x2,100)
yline = ssfr+xline
ax.plot(xline,yline,c='w',ls=ls,lw=4,label='_nolegend_')
ax.plot(xline,yline,c='0.5',ls=ls,lw=3,label='log(sSFR)=-11.5')
def colormass(x1,y1,x2,y2,name1,name2, figname, hexbinflag=False,contourflag=False, \
xmin=7.9, xmax=11.6, ymin=-1.2, ymax=1.2, contour_bins = 40, ncontour_levels=5,\
xlabel='$\log_{10}(M_\star/M_\odot) $', ylabel='$(g-i)_{corrected} $', color1=colorblind3,color2=colorblind2,\
nhistbin=50, alpha1=.1,alphagray=.1,lcsflag=False,ssfrlimit=None,cumulativeFlag=False):
'''
PARAMS:
-------
* x1,y1
* x2,y2
* name1
* name2
* hexbinflag
* contourflag
* xmin, xmax
* ymin, ymax
* contour_bins, ncontour_levels
* color1
* color2
* nhistbin
* alpha1
* alphagray
'''
fig = plt.figure(figsize=(8,8))
plt.subplots_adjust(left=.15,bottom=.15)
nrow = 2
ncol = 2
# for purposes of this plot, only keep data within the
# window specified by [xmin:xmax, ymin:ymax]
keepflag1 = (x1 >= xmin) & (x1 <= xmax) & (y1 >= ymin) & (y1 <= ymax)
keepflag2 = (x2 >= xmin) & (x2 <= xmax) & (y2 >= ymin) & (y2 <= ymax)
x1 = x1[keepflag1]
y1 = y1[keepflag1]
x2 = x2[keepflag2]
y2 = y2[keepflag2]
n1 = sum(keepflag1)
n2 = sum(keepflag2)
ax1 = plt.subplot2grid((nrow,ncol),(1,0),rowspan=nrow-1,colspan=ncol-1, fig=fig)
if hexbinflag:
#t1 = plt.hist2d(x1,y1,bins=100,cmap='gray_r')
#H, xbins,ybins = np.histogram2d(x1,y1,bins=20)
#extent = [xbins[0], xbins[-1], ybins[0], ybins[-1]]
#plt.contour(np.log10(H.T+1), 10, extent = extent, zorder=1,colors='k')
#plt.hexbin(xvar2,yvar2,bins='log',cmap='Blues', gridsize=100)
label=name1+' (%i)'%(n1)
plt.hexbin(x1,y1,bins='log',cmap='gray_r', gridsize=75)
else:
label=name1+' (%i)'%(n1)
if lcsflag:
plt.plot(x1,y1,'ko',color=color1,alpha=alphagray,label=label, zorder=10,mec='k',markersize=8)
else:
plt.plot(x1,y1,'k.',color=color1,alpha=alphagray,label=label, zorder=1,markersize=8)
if contourflag:
H, xbins,ybins = np.histogram2d(x2,y2,bins=contour_bins)
extent = [xbins[0], xbins[-1], ybins[0], ybins[-1]]
plt.contour((H.T), levels=ncontour_levels, extent = extent, zorder=1,colors=color2, label='__nolegend__')
#plt.legend()
else:
label=name2+' (%i)'%(n2)
plt.plot(x2,y2,'co',color=color2,alpha=alpha1, label=label,markersize=8,mec='k')
plt.legend(loc='upper right')
#sns.kdeplot(agc['LogMstarTaylor'][keepagc],agc['gmi_corrected'][keepagc])#,bins='log',gridsize=200,cmap='blue_r')
#plt.colorbar()
if ssfrlimit is not None:
xl=np.linspace(xmin,xmax,100)
yl =xl + ssfrlimit
plt.plot(xl,yl,'k--')#,label='sSFR=-11.5')
plt.axis([xmin,xmax,ymin,ymax])
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
plt.xlabel(xlabel,fontsize=26)
plt.ylabel(ylabel,fontsize=26)
plt.gca().tick_params(axis='both', labelsize=16)
#plt.axis([7.9,11.6,-.05,2])
ax2 = plt.subplot2grid((nrow,ncol),(0,0),rowspan=1,colspan=ncol-1, fig=fig, sharex = ax1, yticks=[])
print('just checking ...',len(x1),len(x2))
print(min(x1))
print(min(x2))
minx = min([min(x1),min(x2)])
maxx = max([max(x1),max(x2)])
mybins = np.linspace(minx,maxx,nhistbin)
if cumulativeFlag:
t = plt.hist(x1, density=True, cumulative=True,bins=len(x1),color=color1,histtype='step',lw=1.5, label=name1+' (%i)'%(n1))
t = plt.hist(x2, density=True, cumulative=True,bins=len(x2),color=color2,histtype='step',lw=1.5, label=name2+' (%i)'%(n2))
else:
t = plt.hist(x1, density=True, bins=mybins,color=color1,histtype='step',lw=1.5, label=name1+' (%i)'%(n1))
t = plt.hist(x2, density=True, bins=mybins,color=color2,histtype='step',lw=1.5, label=name2+' (%i)'%(n2))
if hexbinflag:
plt.legend()
#leg = ax2.legend(fontsize=12,loc='lower left')
#for l in leg.legendHandles:
# l.set_alpha(1)
# l._legmarker.set_alpha(1)
ax2.xaxis.tick_top()
ax3 = plt.subplot2grid((nrow,ncol),(1,ncol-1),rowspan=nrow-1,colspan=1, fig=fig, sharey = ax1, xticks=[])
miny = min([min(y1),min(y2)])
maxy = max([max(y1),max(y2)])
mybins = np.linspace(miny,maxy,nhistbin)
if cumulativeFlag:
t=plt.hist(y1, density=True, cumulative=True,orientation='horizontal',bins=len(y1),color=color1,histtype='step',lw=1.5, label=name1)
t=plt.hist(y2, density=True, cumulative=True,orientation='horizontal',bins=len(y2),color=color2,histtype='step',lw=1.5, label=name2)
else:
t=plt.hist(y1, density=True, orientation='horizontal',bins=mybins,color=color1,histtype='step',lw=1.5, label=name1)
t=plt.hist(y2, density=True, orientation='horizontal',bins=mybins,color=color2,histtype='step',lw=1.5, label=name2)
plt.yticks(rotation='horizontal')
ax3.yaxis.tick_right()
ax3.tick_params(axis='both', labelsize=16)
ax2.tick_params(axis='both', labelsize=16)
#ax3.set_title('$log_{10}(SFR)$',fontsize=20)
#plt.savefig(figname)
print('############################################################# ')
print('KS test comparising galaxies within range shown on the plot')
print('')
print('STELLAR MASS')
t = lcscommon.ks(x1,x2,run_anderson=False)
#t = anderson_ksamp(x1,x2)
#print('Anderson-Darling: ',t)
print('')
print('COLOR')
t = lcscommon.ks(y1,y2,run_anderson=False)
#t = anderson_ksamp(y1,y2)
#print('Anderson-Darling: ',t)
return ax1,ax2,ax3
def scatter_hist(x, y, ax, ax_histx, ax_histy):
''' https://matplotlib.org/stable/gallery/lines_bars_and_markers/scatter_hist.html#sphx-glr-gallery-lines-bars-and-markers-scatter-hist-py '''
# no labels
ax_histx.tick_params(axis="x", labelbottom=False)
ax_histy.tick_params(axis="y", labelleft=False)
# the scatter plot:
ax.scatter(x, y)
# now determine nice limits by hand:
binwidth = 0.25
xymax = max(np.max(np.abs(x)), np.max(np.abs(y)))
lim = (int(xymax/binwidth) + 1) * binwidth
bins = np.arange(-lim, lim + binwidth, binwidth)
ax_histx.hist(x, bins=bins,histtype='step',lw=2,alpha=1)
ax_histy.hist(y, bins=bins, orientation='horizontal',histtype='step',lw=2,alpha=1)
def scatter_hist_wrapper(x,y,fig):
# definitions for the axes
left, width = 0.1, 0.65
bottom, height = 0.1, 0.65
spacing = 0.005
rect_scatter = [left, bottom, width, height]
rect_histx = [left, bottom + height + spacing, width, 0.2]
rect_histy = [left + width + spacing, bottom, 0.2, height]
# start with a square Figure
ax = fig.add_axes(rect_scatter)
ax_histx = fig.add_axes(rect_histx, sharex=ax)
ax_histy = fig.add_axes(rect_histy, sharey=ax)
# use the previously defined function
scatter_hist(x, y, ax, ax_histx, ax_histy)
return ax,ax_histx,ax_histy
def plotsalim07():
#plot the main sequence from Salim+07 for a Chabrier IMF
lmstar=np.arange(8.5,11.5,0.1)
#use their equation 11 for pure SF galaxies
lssfr = -0.35*(lmstar - 10) - 9.83
#use their equation 12 for color-selected galaxies including
#AGN/SF composites. This is for log(Mstar)>9.4
#lssfr = -0.53*(lmstar - 10) - 9.87
lsfr = lmstar + lssfr -.3
sfr = 10.**lsfr
plt.plot(lmstar, lsfr, 'w-', lw=4)
plt.plot(lmstar, lsfr, c='salmon',ls='-', lw=2, label='$Salim+07$')
plt.plot(lmstar, lsfr-np.log10(5.), 'w--', lw=4)
plt.plot(lmstar, lsfr-np.log10(5.), c='salmon',ls='--', lw=2)
def mass_match(input_mass,comp_mass,seed,dm=.15,nmatch=1,inputZ=None,compZ=None,dz=.0025):
'''
for each galaxy in parent, draw nmatch galaxies in match_mass
that are within +/-dm
PARAMS:
-------
* input_mass - sample to create mass-matched sample for, ref mass distribution
* comp_mass - sample to draw matches from
* dm - mass offset from which to draw matched galaxies from
* nmatch = number of matched galaxies per galaxy in the input
* inputZ = redshift for input sample; need to supply this to do redshift cut
* compZ = redshift for comparison sample
RETURNS:
--------
* indices of the comp_sample
'''
# for each galaxy in parent
# select nmatch galaxies from comp_sample that have stellar masses
# within +/- dm
np.random.seed(seed)
return_index = np.zeros(len(input_mass)*nmatch,'i')
comp_index = np.arange(len(comp_mass))
# hate to do this with a loop,
# but I can't think of a smarter way right now
for i in range(len(input_mass)):
# limit comparison sample to mass of i galaxy, +/- dm
flag = np.abs(comp_mass - input_mass[i]) < dm
# if redshifts are provided, also restrict based on redshift offset
if inputZ is not None:
flag = flag & (np.abs(compZ - inputZ[i]) < dz)
# select nmatch galaxies randomly from this limited mass range
# NOTE: can avoid repeating items by setting replace=False
if sum(flag) < nmatch:
print('galaxies in slice < # requested',sum(flag),nmatch,input_mass[i],inputZ[i])
if sum(flag) == 0:
print('\truh roh - doubling mass and redshift slices')
flag = np.abs(comp_mass - input_mass[i]) < 2*dm
# if redshifts are provided, also restrict based on redshift offset
if inputZ is not None:
flag = flag & (np.abs(compZ - inputZ[i]) < 2*dz)
if sum(flag) == 0:
print('\truh roh again - tripling mass and redshift slices')
flag = np.abs(comp_mass - input_mass[i]) < 4*dm
# if redshifts are provided, also restrict based on redshift offset
if inputZ is not None:
flag = flag & (np.abs(compZ - inputZ[i]) < 4*dz)
if sum(flag) == 0:
print("can't seem to find a match for mass = ",input_mass[i], i)
print("skipping this galaxy")
continue
return_index[int(i*nmatch):int((i+1)*nmatch)] = np.random.choice(comp_index[flag],nmatch,replace=True)
return return_index
def plotelbaz():
#plot the main sequence from Elbaz+13
xe=np.arange(8.5,11.5,.1)
xe=10.**xe
#I think that this comes from the intercept of the
#Main-sequence curve in Fig. 18 of Elbaz+11. They make the
#assumption that galaxies at a fixed redshift have a constant
#sSFR=SFR/Mstar. This is the value at z=0. This is
#consistent with the value in their Eq. 13
#This is for a Salpeter IMF
ye=(.08e-9)*xe
plt.plot(log10(xe),np.log19(ye),'w-',lw=3)
plt.plot(log10(xe),np.log10(ye),'k-',lw=2,label='$Elbaz+2011$')
#plot(log10(xe),(2*ye),'w-',lw=4)
#plot(log10(xe),(2*ye),'k:',lw=2,label='$2 \ SFR_{MS}$')
plt.plot(log10(xe),np.log10(ye/5.),'w--',lw=4)
plt.plot(log10(xe),np.log10(ye/5.),'k--',lw=2,label='$SFR_{MS}/5$')
def getlegacy(ra1,dec1,jpeg=True,getfits=True,imsize=None):
'''
imsize is size of desired cutout in arcmin
'''
default_image_size = 60
gra = '%.5f'%(ra1) # accuracy is of order .1"
gdec = '%.5f'%(dec1)
galnumber = gra+'-'+gdec
if imsize is not None:
image_size=imsize
else:
image_size=default_image_size
cwd = os.getcwd()
if not(os.path.exists(cwd+'/cutouts/')):
os.mkdir(cwd+'/cutouts')
rootname = 'cutouts/legacy-im-'+str(galnumber)+'-'+str(image_size)
jpeg_name = rootname+'.jpg'
fits_name = rootname+'.fits'
# check if images already exist
# if not download images
if not(os.path.exists(jpeg_name)):
print('downloading image ',jpeg_name)
url='http://legacysurvey.org/viewer/jpeg-cutout?ra='+str(ra1)+'&dec='+str(dec1)+'&layer=dr8&size='+str(image_size)+'&pixscale=1.00'
urlretrieve(url, jpeg_name)
if not(os.path.exists(fits_name)):
print('downloading image ',fits_name)
url='http://legacysurvey.org/viewer/cutout.fits?ra='+str(ra1)+'&dec='+str(dec1)+'&layer=dr8&size='+str(image_size)+'&pixscale=1.00'
urlretrieve(url, fits_name)
try:
t,h = fits.getdata(fits_name,header=True)
except IndexError:
print('problem accessing image')
print(fits_name)
url='http://legacysurvey.org/viewer/cutout.fits?ra='+str(ra1)+'&dec='+str(dec1)+'&layer=dr8&size='+str(image_size)+'&pixscale=1.00'
print(url)
return None
if np.mean(t[1]) == 0:
return None
norm = simple_norm(t[1],stretch='asinh',percent=99.5)
if jpeg:
t = Image.open(jpeg_name)
plt.imshow(t,origin='lower')
else:
plt.imshow(t[1],origin='upper',cmap='gray_r', norm=norm)
w = WCS(fits_name,naxis=2)
return t,w
def sersic(x,Ie,n,Re):
bn = 1.999*n - 0.327
return Ie*np.exp(-1*bn*((x/Re)**(1./n)-1))
def plot_models():
plt.figure(figsize=(8,6))
plt.subplots_adjust(wspace=.35)
rmax = 4
scaleRe = 0.8
rtrunc = 1.5
n=1
# shrink Re
plt.subplot(2,2,1)
x = np.linspace(0,rmax,100)
Ie=1
Re=1
y = sersic(x,Ie,n,Re)
plt.plot(x,y,label='sersic n='+str(n),lw=2)
y2 = sersic(x,Ie,n,scaleRe*Re)
plt.plot(x,y2,label="Re "+r"$ \rightarrow \ $"+str(scaleRe)+"Re",ls='--',lw=2)
#plt.legend()
plt.ylabel('Intensity',fontsize=18)
plt.text(0.1,.85,'(a)',transform=plt.gca().transAxes,horizontalalignment='left')
plt.legend(fontsize=14)
# plot total flux
plt.subplot(2,2,2)
dx = x[1]-x[0]
sum1 = (y*dx*2*np.pi*x)
sum2 = (y2*dx*2*np.pi*x)
plt.plot(x,np.cumsum(sum1)/np.max(np.cumsum(sum1)),label='sersic n='+str(n),lw=2)
plt.plot(x,np.cumsum(sum2)/np.max(np.cumsum(sum1)),label="Re "+r"$ \rightarrow \ $"+str(scaleRe)+"Re",ls='--',lw=2)
plt.ylabel('Enclosed Flux',fontsize=18)
plt.grid()
plt.text(0.1,.85,'(b)',transform=plt.gca().transAxes,horizontalalignment='left')
plt.legend(fontsize=14)
# truncated sersic model
plt.subplot(2,2,3)
plt.plot(x,y,label='sersic n='+str(n),lw=2)
y3 = y.copy()
flag = x > rtrunc
y3[flag] = np.zeros(sum(flag))
plt.plot(x,y3,ls='--',label='Rtrunc = '+str(rtrunc)+' Re',lw=2)
plt.legend()
plt.ylabel('Intensity',fontsize=18)
#plt.legend()
#plt.gca().set_yscale('log')
plt.text(0.1,.85,'(c)',transform=plt.gca().transAxes,horizontalalignment='left')
plt.legend(fontsize=14)
plt.xlabel('r/Re')
plt.subplot(2,2,4)
sum3 = (y3*dx*2*np.pi*x)
plt.plot(x,np.cumsum(sum1)/np.max(np.cumsum(sum1)),label='sersic n='+str(n),lw=2)
plt.plot(x,np.cumsum(sum3)/np.max(np.cumsum(sum1)),label='Rtrunc = '+str(rtrunc)+' Re',ls='--',lw=2)
plt.ylabel('Enclosed Flux',fontsize=18)
plt.grid()
plt.text(0.1,.85,'(d)',transform=plt.gca().transAxes,horizontalalignment='left')
plt.legend(fontsize=14)
plt.xlabel('r/Re')
plt.savefig(plotdir+'/cartoon-models.png')
plt.savefig(plotdir+'/cartoon-models.pdf')
pass
###########################
##### Plot parameters
###########################
# figure setup
plotsize_single=(6.8,5)
plotsize_2panel=(10,5)
params = {'backend': 'pdf',
'axes.labelsize': 24,
'font.size': 20,
'legend.fontsize': 12,
'xtick.labelsize': 14,
'ytick.labelsize': 14,
#'lines.markeredgecolor' : 'k',
#'figure.titlesize': 20,
'mathtext.fontset': 'cm',
'mathtext.rm': 'serif',
'text.usetex': True,
'figure.figsize': plotsize_single}
plt.rcParams.update(params)
#figuredir = '/Users/rfinn/Dropbox/Research/MyPapers/LCSpaper1/submit/resubmit4/'
figuredir='/Users/grudnick/Work/Local_cluster_survey/Papers/Finn_MS/Plots/'
#figuredir = '/Users/grudnick/Work/Local_cluster_survey/Analysis/MS_paper/Plots/'
#colors = dict(mcolors.BASE_COLORS, **mcolors.CSS4_COLORS)
########################################
##### STATISTICS TO COMPARE CORE VS EXTERNAL
########################################
test_statistics = lambda x: (np.mean(x), np.var(x), MAD(x), st.skew(x), st.kurtosis(x))
stat_cols = ['mean','var','MAD','skew','kurt']
###########################
##### START OF GALAXIES CLASS
###########################
class gswlc_full():
'''
class for full GSWLC catalog. this will cut on BT and save a trimmed section that
overlaps LCS region
'''
def __init__(self,catalog,cutBT=False,HIdef=False):
if catalog.find('.fits') > -1:
#print('got a fits file')
self.gsw = Table.read(catalog,format='fits')
#print(self.gsw.colnames)
self.redshift_field = 'zobs'
if cutBT:
self.outfile = catalog.split('.fits')[0]+'-LCS-Zoverlap-BTcut.fits'
print('outfile = ',self.outfile)
else:
self.outfile = catalog.split('.fits')[0]+'-LCS-Zoverlap.fits'
print('outfile = ',self.outfile)
else:
print('reading ascii')
self.gsw = ascii.read(catalog)
if catalog.find('v2') > -1:
self.redshift_field = 'Z_1'
else:
self.redshift_field = 'Z'
if cutBT:
self.outfile = catalog.split('.dat')[0]+'-LCS-Zoverlap-BTcut.fits'
else:
self.outfile = catalog.split('.dat')[0]+'-LCS-Zoverlap.fits'
#print(self.gsw.colnames[0:10],len(self.gsw.colnames))
self.keepflag = np.ones(len(self.gsw),'bool')
self.cut_redshift()
if cutBT:
self.cut_BT(BT=float(args.BT))
self.cut_ellip()
self.save_trimmed_cat()
#self.get_dsfr
if HIdef:
self.save_trimmed_HIdef()
def cut_redshift(self):
z1 = zmin
z2 = zmax
#print(self.redshift_field)
#print(self.gsw.colnames[0:10],len(self.gsw.colnames))
#print(self.gsw.colnames)
#print(z1,z2)
zflag = (self.gsw[self.redshift_field] > z1) & (self.gsw[self.redshift_field] < z2)
massflag = self.gsw['logMstar'] > 0
#self.gsw = self.gsw[zflag & massflag]
self.keepflag = self.keepflag & zflag & massflag
def cut_BT(self,BT=None):
btflag = self.gsw[BTkey] <= BT
#self.gsw = self.gsw[btflag]
self.keepflag = self.keepflag & btflag
def cut_ellip(self):
ellipflag = self.gsw[ellipkey] <= float(args.ellip)
#self.gsw = self.gsw[ellipflag]
self.keepflag = self.keepflag & ellipflag
def get_dsfr(self):
''' get distance from MS '''
#self.dsfr = self.gsw['logSFR'] - get_BV_MS(self.gsw['logMstar'])
self.dsfr = self.gsw['logSFR'] - self.get_MS(self.gsw['logMstar'])
def save_trimmed_cat(self):
t = Table(self.gsw[self.keepflag])
t.write(self.outfile,format='fits',overwrite=True)
def save_trimmed_HIdef(self):
fname = '/home/rfinn/research/GSWLC/GSWLC-X2-NO-DR10-AGN-Simard2011-tab1-tab3-Tempel-13-2021Jan07-HIdef-2021Aug28'
HIdef = Table.read(fname+'.fits')
HIdef_trimmed = HIdef[self.keepflag]
HIdef_trimmed.write(fname+'-trimmed.fits',format='fits',overwrite=True)
class gswlc_base():
''' functions that will be applied to LCS-GSWLC catalog and GSWLC catalog '''
def base_init(self):
# selecting agn in a different way - from dr10 catalog
#self.get_agn()
# cut catalog to remove agn
#self.remove_agn()
self.calc_ssfr()
#self.get_dsfr()
#self.get_lowsfr_flag()
def get_agn(self):
self.specflag = ~np.isnan(self.cat['O3FLUX'])
self.AGNKAUFF= (np.log10(self.cat['O3FLUX']/self.cat['HBFLUX']) > (.61/(np.log10(self.cat['N2FLUX']/self.cat['HAFLUX']-.05))+1.3)) | (np.log10(self.cat['N2FLUX']/self.cat['HAFLUX']) > 0.) #& (self.s.HAEW > 0.)
# add calculations for selecting the sample
self.wiseagn=(self.cat['W1MAG_3'] - self.cat['W2MAG_3']) > 0.8
self.agnflag = (self.AGNKAUFF & self.specflag) | self.wiseagn
print('fraction of AGN = %.3f (%i/%i)'%(sum(self.agnflag)/len(self.agnflag),sum(self.agnflag),len(self.agnflag)))
def remove_agn(self):
print('REMOVING AGN')
self.cat = self.cat[~self.agnflag]
def calc_ssfr(self):
self.ssfr = self.cat['logSFR'] - self.cat['logMstar']
def get_dsfr(self):
''' get distance from MS '''
#self.dsfr = self.cat['logSFR'] - get_BV_MS(self.cat['logMstar'])
self.dsfr = self.cat['logSFR'] - self.get_MS(self.cat['logMstar'])
def get_lowsfr_flag(self):
self.lowsfr_flag = (self.dsfr < -0.45)
def plot_ms(self,plotsingle=True,outfile1=None,outfile2=None):
if plotsingle:
plt.figure(figsize=(8,6))
x = self.cat['logMstar']
y = self.cat['logSFR']
plt.plot(x,y,'k.',alpha=.1)
#plt.hexbin(x,y,gridsize=30,vmin=5,cmap='gray_r')
#plt.colorbar()
xl=np.linspace(8,12,50)
ssfr_limit = -11.5
plt.plot(xl,xl+ssfr_limit,'c-')
plotsalim07()
plt.xlabel('logMstar',fontsize=20)
plt.ylabel('logSFR',fontsize=20)
if outfile1 is None:
plt.savefig(homedir+'/research/LCS/plots/sfms-ssfr-cut.pdf')
else:
plt.savefig(outfile1)
if outfile2 is None:
plt.savefig(homedir+'/research/LCS/plots/sfms-ssfr-cut.png')
else:
plt.savefig(outfile2)
def plot_positions(self,plotsingle=True, filename=None):
if plotsingle:
plt.figure(figsize=(14,8))
plt.scatter(self.cat['RA'],selfgsw['DEC'],c=np.log10(self.densNN*u.Mpc),vmin=-.5,vmax=1.,s=10)
plt.colorbar(label='$\log_{10}(N/d_N)$')
plt.xlabel('RA')
plt.ylabel('DEC')
plt.gca().invert_xaxis()
plt.axhline(y=5)
plt.axhline(y=60)
plt.axvline(x=135)
plt.axvline(x=225)
#plt.axis([130,230,0,65])
if filename is not None:
plt.savefig(filename)
class gswlc(gswlc_base):
''' class for GSWLC field catalog '''
def __init__(self,catalog,cutBT=False,HIdef_file=None):
self.cat = fits.getdata(catalog)
self.base_init()
#self.calc_local_density()
self.get_field1()
if HIdef_file is not None:
self.HIdef = Table.read(HIdef_file)
def fit_MS(self,flag=None):
''' fit the SF main sequence for current sample (using same BT, mass, ellipticity cut as sample) '''
x = self.cat['logMstar']
y = self.cat['logSFR']
if flag is not None:
x = x[flag]
y = y[flag]
# using curve fit with
fitted_line,mask = fit_line_sigma_clip(x,y,sigma=3)
self.MS_line = fitted_line
self.MS_slope = fitted_line.slope.value
self.MS_intercept = fitted_line.intercept.value
self.MS_std = np.std(np.abs(y[~mask]-fitted_line(x[~mask])))
self.MS_std = np.std(np.abs(y-fitted_line(x)))
print('##################################')
print('### FITTING WITH SIGMA CLIPPING ')
print('##################################')
print('Best-fit slope = {:.2f}'.format(self.MS_slope))
print('Best-fit inter = {:.2f}'.format(self.MS_intercept))
print('Width of the MS = {:.2f} (unclipped data)'.format(self.MS_std))
#reset scatter in MS to 0.3 for consistency with text
self.MS_std = 0.3
# using curve fit after sigma clipping
print('###################################')
print('### FITTING MS AFTER SIGMA CLIPPING ')
print('###################################')
popt,pcov = curve_fit(linear_func,x[~mask],y[~mask])
perr = np.sqrt(np.diag(pcov))
print('Best-fit slope = {:.2f}+/-{:.2f}'.format(popt[0],perr[0]))
print('Best-fit inter = {:.2f}+/-{:.2f}'.format(popt[1],perr[1]))
def get_MS(self,x):
return self.MS_line(x)
def plot_MS(self,ax,color='mediumblue',ls='-'):
'''
plot our MS fit
'''
plot_Durbala_MS(ax)
x1,x2 = 9.7,11.
xline = np.linspace(x1,x2,100)
yline = self.get_MS(xline)
ax.plot(xline,yline,c='w',ls=ls,lw=4,label='_nolegend_')
ax.plot(xline,yline,c=color,ls=ls,lw=3,label='Linear Fit')
# scatter around MS fit
sigma=self.MS_std
ax.plot(xline,yline-1.5*sigma,c='w',ls='--',lw=4)
ax.plot(xline,yline-1.5*sigma,c=color,ls='--',lw=3,label='fit-1.5$\sigma$')
def calc_local_density(self,NN=10):
try:
redshift = self.cat['Z']
except KeyError:
redshift = self.cat['Z_1']
pos = SkyCoord(ra=self.cat['RA']*u.deg,dec=self.cat['DEC']*u.deg, distance=redshift*3.e5/70*u.Mpc,frame='icrs')
idx, d2d, d3d = pos.match_to_catalog_3d(pos,nthneighbor=NN)
self.densNN = NN/d3d
self.sigmaNN = NN/d2d
def get_field1(self):
ramin=135
ramax=225
decmin=5
decmax=60
gsw_position_flag = (self.cat['RA'] > ramin) & (self.cat['RA'] < ramax) & (self.cat['DEC'] > decmin) & (self.cat['DEC'] < decmax)
#gsw_density_flag = np.log10(self.densNN*u.Mpc) < .2
#self.field1 = gsw_position_flag & gsw_density_flag
def plot_field1(self,figname1=None,figname2=None):
plt.figure(figsize=(14,8))
plt.scatter(self.cat['RA'],self.cat['DEC'],c=np.log10(self.densNN*u.Mpc),vmin=-.5,vmax=1.,s=10)
plt.colorbar(label='$\log_{10}(N/d_N)$')
plt.xlabel('RA')
plt.ylabel('DEC')
plt.gca().invert_xaxis()
plt.axhline(y=5)
plt.axhline(y=60)
plt.axvline(x=135)
plt.axvline(x=225)
plt.title('Entire GSWLC (LCS z range)')
plt.axis([130, 230, 0, 65])
if figname1 is not None:
plt.savefig(figname1)
if figname2 is not None:
plt.savefig(figname2)
def plot_dens_hist(self,figname1=None,figname2=None):
plt.figure()
t = plt.hist(np.log10(self.densNN*u.Mpc), bins=20)
plt.xlabel('$ \log_{10} (N/d_N)$')
print('median local density = %.3f'%(np.median(np.log10(self.densNN*u.Mpc))))
plt.axvline(x = (np.median(np.log10(self.densNN*u.Mpc))),c='k')
plt.title('Distribution of Local Density')
if figname1 is not None:
plt.savefig(figname1)
if figname2 is not None:
plt.savefig(figname2)
#######################################################################
#######################################################################
########## NEW CLASS: LCSGSW
#######################################################################
#######################################################################
class lcsgsw(gswlc_base):
'''
functions to operate on catalog that is cross-match between
LCS and GSWLC
'''
def __init__(self,catalog,sigma_split=600,cutBT=False):
# read in catalog of LCS matched to GSWLC
#self.cat = fits.getdata(catalog)
self.cat = Table.read(catalog)
'''
c1 = Column(self.agnflag,name='agnflag')
c2 = Column(self.wiseagn,name='wiseagn')
c3 = Column(self.AGNKAUFF,name='kauffagn')
c4 = Column(self.cat['N2FLUX']/self.cat['HAFLUX'],name='n2ha')
c5 = Column(self.cat['O3FLUX']/self.cat['HBFLUX'],name='o3hb')
tabcols = [c1,c2,c3,c4,c5,self.cat['HAFLUX'],self.cat['N2FLUX'],self.cat['HBFLUX'],self.cat['O3FLUX'],self.cat['W1MAG_3'],self.cat['W2MAG_3'],self.cat['AGNKAUFF']]
tabnames = ['agnflag','wiseagn','kauffagn','N2/HA','O3/HB','HAFLUX','N2FLUX','HBFLUX','O3FLUX','W1MAG_3','W2MAG_3','AGNKAUF']
newtable = Table(data=tabcols,names=tabnames)
newtable.write(homedir+'/research/LCS/tables/lcs-gsw-noagn.fits',overwrite=True)
'''
self.base_init()
# this doesn't work after adding agn cut
# getting error that some array lengths are not the same
# I am not actually using this data file in the simulation
# so commenting it out for now.
# I'm sure the error will pop up somewhere else...
#self.write_file_for_simulation()
if cutBT:
self.cut_BT(BT=float(args.BT))
self.cut_ellip()
self.nsadict=dict((a,b) for a,b in zip(self.cat['NSAID'],np.arange(len(self.cat))))
#self.get_dsfr()
#self.get_lowsfr_flag()
self.get_DA()
self.get_sizeflag()
self.get_sbflag()
self.get_galfitflag()
self.get_membflag()
self.get_infallflag()
self.get_sampleflag()
self.calculate_sizeratio()
if args.cutBT:
self.write_file_for_simulation()
self.group = self.cat['CLUSTER_SIGMA'] < sigma_split
self.cluster = self.cat['CLUSTER_SIGMA'] > sigma_split
self.get_NUV24()
#self.get_dsfr()
def get_NUV24(self):
self.NUVr=self.cat['ABSMAG'][:,1] - self.cat['ABSMAG'][:,4]
self.NUV = 22.5 - 2.5*np.log10(self.cat['NMGY'][:,1])
self.MAG24 = 2.5*np.log10(3631./(self.cat['FLUX24']*1.e-6))
self.NUV24 =self.NUV-self.MAG24
#lcspath = homedir+'/github/LCS/'
#self.lcsbase = lb.galaxies(lcspath)
def get_DA(self):
# stole this from LCSbase.py
#print(self.cat.colnames)
self.DA=np.zeros(len(self.cat))
for i in range(len(self.DA)):
if self.cat['membflag'][i]:
self.DA[i] = cosmo.angular_diameter_distance(self.cat['CLUSTER_REDSHIFT'][i]).value*Mpcrad_kpcarcsec
else:
self.DA[i] = cosmo.angular_diameter_distance(self.cat['ZDIST'][i]).value*Mpcrad_kpcarcsec
def get_sizeflag(self):
''' calculate size flag '''
self.sizeflag=(self.cat['SERSIC_TH50']*self.DA > minsize_kpc)
def get_sbflag(self):
''' surface brightness flag '''
self.sb_obs = 999*np.ones(len(self.cat))
mipsflag = self.cat['FLUX24'] > 0
self.sb_obs[mipsflag]=(self.cat['fcmag1'][mipsflag] + 2.5*np.log10(np.pi*((self.cat['fcre1'][mipsflag]*mipspixelscale)**2)*self.cat['fcaxisratio1'][mipsflag]))
self.sbflag = self.sb_obs < 20.
print('got sb flag')
def get_gim2dflag(self):
# don't need this b/c the catalog has already been match to the simard table
self.gim2dflag=(self.cat['SERSIC_TH50']*self.DA > minsize_kpc)
def get_galfitflag(self):
''' calculate galfit flag '''
self.galfitflag = (self.cat['fcmag1'] > .01) & ~self.cat['fcnumerical_error_flag24']
galfit_override = [70588,70696,43791,69673,146875,82170, 82182, 82188, 82198, 99058, 99660, 99675, 146636, 146638, 146659, 113092, 113095, 72623,72631,72659, 72749, 72778, 79779, 146121, 146130, 166167, 79417, 79591, 79608, 79706, 80769, 80873, 146003, 166044,166083, 89101, 89108,103613,162792,162838, 89063,99509,72800,79381,10368]
for id in galfit_override:
try:
self.galfitflag[self.nsadict[int(id)]] = True
#print('HEY! found a match, just so you know, with NSAID ',id,'\n\tCould be from sources that were removed with AGN/BT/GSWLC matches')
except KeyError:
pass
#print('got a key error, just so you know, with NSAID ',id,'\n\tCould be from sources that were removed with AGN/BT/GSWLC matches')
except IndexError:
pass
#print('WARNING: got an index error in nsadict for NSAID', id,'\n\tCould be from sources that were removed with AGN/BT/GSWLC matches')
#self.galfitflag = self.galfitflag
try:
self.galfitflag[self.nsadict[79378]] = False
except KeyError:
pass
# bringing this over from LCSbase.py
self.badfits=np.zeros(len(self.cat),'bool')
nearbystar=[142655, 143485, 99840, 80878] # bad NSA fit; 24um is ok
#nearbygalaxy=[103927,143485,146607, 166638,99877,103933,99056]#,140197] # either NSA or 24um fit is unreliable
# checked after reworking galfit
nearbygalaxy=[143485,146607, 166638,99877,103933,99056]#,140197] # either NSA or 24um fit is unreliable
#badNSA=[166185,142655,99644,103825,145998]
#badNSA = [
badfits= nearbygalaxy#+nearbystar+nearbygalaxy
badfits=np.array(badfits,'bool')
for gal in badfits:
flag = self.cat['NSAID'] == gal
if sum(flag) == 1:
self.badfits[flag] = True
# fold badfits into galfit flag
self.galfitflag = self.galfitflag & ~self.badfits
def get_membflag(self):
self.membflag = (np.abs(self.cat['DELTA_V']) < (-4./3.*self.cat['DR_R200'] + 2))
def get_infallflag(self):
self.infallflag = (np.abs(self.cat['DELTA_V']) < 3) & ~self.membflag
def get_sampleflag(self):
print(len(self.galfitflag),len(self.sbflag),len(self.cat['lirflag']),len(self.sizeflag))
self.sampleflag= self.sbflag & self.cat['lirflag'] & self.sizeflag & self.galfitflag
self.uvirflag = (self.cat['flag_uv'] > 0) & (self.cat['flag_midir'] > 0)
self.uvirsampleflag = self.sampleflag & self.uvirflag
#& self.cat['galfitflag2'] #& & & #~self.cat['AGNKAUFF'] #& # #& self.cat['gim2dflag'] ##& ~self.cat['fcnumerical_error_flag24']
def calculate_sizeratio(self):
# all galaxies in the catalog have been matched to simard table 1
# does this mean they all have a disk scale length?
self.gim2dflag = np.ones(len(self.cat),'bool')# self.cat['matchflag'] & self.cat['lirflag'] & self.cat['sizeflag'] & self.cat['sbflag']
# stole this from LCSbase.py
self.SIZE_RATIO_DISK = np.zeros(len(self.cat))
a = self.cat['fcre1'][self.gim2dflag]*mipspixelscale # fcre1 = 24um half-light radius in mips pixels
b = self.DA[self.gim2dflag]
c = self.cat[Rdkey][self.gim2dflag] # gim2d half light radius for disk in kpc
# this is the size ratio we use in paper 1
self.SIZE_RATIO_DISK[self.gim2dflag] =a*b/c
self.SIZE_RATIO_DISK_ERR = np.zeros(len(self.gim2dflag))
self.SIZE_RATIO_DISK_ERR[self.gim2dflag] = self.cat['fcre1err'][self.gim2dflag]*mipspixelscale*self.DA[self.gim2dflag]/self.cat[Rdkey][self.gim2dflag]
self.sizeratio = self.SIZE_RATIO_DISK
self.sizeratioERR=self.SIZE_RATIO_DISK_ERR
# size ratio corrected for inclination
#self.size_ratio_corr=self.sizeratio*(self.cat.faxisratio1/self.cat.SERSIC_BA)
def write_file_for_simulation(self):
# need to calculate size ratio
# for some reason, I don't have this in my table
# what was I thinking???
# um, nevermind - looks like it's in the file afterall
self.get_DA()
#self.calculate_sizeratio()
# write a file that contains the
# sizeratio, error, SFR, sersic index, membflag
# we are using GSWLC SFR
c1 = Column(self.sizeratio,name='sizeratio')
c2 = Column(self.sizeratioERR,name='sizeratio_err')
# using all simard values of sersic fit
tabcols = [c1,c2,self.membflag,self.cat[BTkey],self.cat['ng_1'],self.cat['logSFR'],self.cat['logMstar'],self.cat['fcre1'],self.cat['fcnsersic1'],self.cat['Rd_1'],self.cat['DELTA_V']]
tabnames = ['sizeratio','sizeratio_err','membflag','B_T_r','ng','logSFR','logMstar','fcre1','fcnsersic1','Rd','DELTA_V']
newtable = Table(data=tabcols,names=tabnames)
newtable = newtable[self.sampleflag]
newtable.write(homedir+'/research/LCS/tables/LCS-simulation-data.fits',format='fits',overwrite=True)
def cut_BT(self,BT=None):
print('inside cut_BT, BT = ',BT)
btflag = (self.cat[BTkey] <= BT) & (self.cat['matchflag'])
self.cat = self.cat[btflag]
self.ssfr = self.ssfr[btflag]
#self.sizeratio = self.sizeratio[btflag]
#self.sizeratioERR = self.sizeratioERR[btflag]
def cut_ellip(self):
ellipflag = (self.cat[ellipkey] <= float(args.ellip))
self.cat = self.cat[ellipflag]
self.ssfr = self.ssfr[ellipflag]
def get_mstar_limit(self,rlimit=17.7):
print(rlimit,zmax)
# assume hubble flow
dmax = zmax*3.e5/70
# abs r
# m - M = 5logd_pc - 5
# M = m - 5logd_pc + 5
## r = 22.5 - np.log10(lcsgsw['NMGY'][:,4])
Mr = rlimit - 5*np.log10(dmax*1.e6) +5
print(Mr)
ssfr = self.cat['logSFR'] - self.cat['logMstar']
flag = (self.cat['logMstar'] > 0) & (ssfr > -11.5)
Mr = self.cat['ABSMAG'][:,4]+5*np.log10(.7)
plt.figure()
plt.plot(Mr[flag],self.cat['logMstar'][flag],'bo',alpha=.2,markersize=3)
plt.axvline(x=-18.6)
plt.axhline(y=10)
plt.axhline(y=9.7,ls='--')
plt.xlabel('Mr')
plt.ylabel('logMstar GSWLC')
plt.grid(True)
def plot_dvdr(self, figname1=None,figname2=None,plotsingle=True):
# log10(chabrier) = log10(Salpeter) - .25 (SFR estimate)
# log10(chabrier) = log10(diet Salpeter) - 0.1 (Stellar mass estimates)
xmin,xmax,ymin,ymax = 0,3.5,0,3.5
if plotsingle:
plt.figure(figsize=(8,6))
ax=plt.gca()
plt.subplots_adjust(left=.1,bottom=.15,top=.9,right=.9)
plt.ylabel('$ \Delta v/\sigma $',fontsize=26)
plt.xlabel('$ \Delta R/R_{200} $',fontsize=26)
plt.legend(loc='upper left',numpoints=1)
if USE_DISK_ONLY:
clabel=['$R_{24}/R_d$','$R_{iso}(24)/R_{iso}(r)$']
else:
clabel=['$R_e(24)/R_e(r)$','$R_{iso}(24)/R_{iso}(r)$']
x=(self.cat['DR_R200'])
y=abs(self.cat['DELTA_V'])
plt.hexbin(x,y,extent=(xmin,xmax,ymin,ymax),cmap='gray_r',gridsize=50,vmin=0,vmax=10)
xl=np.arange(0,2,.1)
plt.plot(xl,-4./3.*xl+2,'k-',lw=3,color='b')
props = dict(boxstyle='square', facecolor='0.8', alpha=0.8)
plt.text(.1,.1,'CORE',transform=plt.gca().transAxes,fontsize=18,color=colorblind3,bbox=props)
plt.text(.6,.6,'INFALL',transform=plt.gca().transAxes,fontsize=18,color=colorblind3,bbox=props)
#plt.plot(xl,-3./1.2*xl+3,'k-',lw=3)
# plot low sfr galaxies
flag = self.lowsfr_flag & self.sampleflag & self.membflag
plt.plot(x[flag],y[flag],'ko',color=darkblue,mec='k',markersize=10)
plt.axis([xmin,xmax,ymin,ymax])
if figname1 is not None:
plt.savefig(figname1)
if figname2 is not None:
plt.savefig(figname2)
def compare_sfrs(self,shift=None,masscut=None,nbins=20):
'''
plot distribution of LCS external and core SFRs
'''
if masscut is None:
masscut = self.masscut
flag = (self.cat['logSFR'] > -99) & (self.cat['logSFR']-self.cat['logMstar'] > -11.5) & (self.cat['logMstar'] > masscut)
sfrcore = self.cat['logSFR'][self.cat['membflag'] & flag]
sfrext = self.cat['logSFR'][~self.cat['membflag']& flag]
plt.figure()
mybins = np.linspace(-2.5,1.5,nbins)
plt.hist(sfrext,bins=mybins,histtype='step',label='External',lw=3)
if shift is not None:
plt.hist(sfrext+np.log10(1-shift),bins=mybins,histtype='step',label='Shifted External',lw=3)
plt.hist(sfrcore,bins=mybins,histtype='step',label='Core',lw=3)
plt.legend()
plt.xlabel('SFR')
plt.ylabel('Normalized Counts')
print('CORE VS EXTERNAL')
t = lcscommon.ks(sfrcore,sfrext,run_anderson=False)
def plot_ssfr_sizeratio(self,outfile1='plot.pdf',outfile2='plot.png'):
'''
GOAL:
* compare sSFR vs sizeratio for galaxies in the paper 1 sample
INPUT:
* outfile1 - usually pdf name for output plot
* outfile2 - usually png name for output plot
OUTPUT:
* plot of sSFR vs sizeratio
'''
plt.figure(figsize=(8,6))
flag = self.sampleflag
plt.scatter(self.ssfr[flag],self.sizeratio[flag],c=self.cat[BTkey][flag])
cb = plt.colorbar(label='B/T')
plt.ylabel('$R_e(24)/R_e(r)$',fontsize=20)
plt.xlabel('$sSFR / (yr^{-1})$',fontsize=20)
t = lcscommon.spearmanr(self.sizeratio[flag],self.ssfr[flag])
print(t)
plt.savefig(outfile1)
plt.savefig(outfile2)
def plot_HIdef(self):
''' compare delta sfr and HI def of field, core and infall '''
pass
#######################################################################
#######################################################################
########## NEW CLASS: comp_lcs_gsw
#######################################################################
#######################################################################
class comp_lcs_gsw():
'''
class that combines the LCS and GSWLC catalogs as self.lcs.xx and self.gsw.xx
- used to compare LCS with field sample constructed from SDSS with GSWLC SFR and Mstar values
'''
def __init__(self,lcs,gsw,minmstar = 10, minssfr = -11.5,cutBT=False):
self.lcs = lcs
self.gsw = gsw
self.masscut = minmstar
self.ssfrcut = minssfr
#self.lowssfr_flag = (self.lcs.cat['logMstar']> self.masscut) & (self.lcs.ssfr > self.ssfrcut) & (self.lcs.lowsfr_flag)
self.cutBT = cutBT
self.lcs_mass_sfr_flag = (self.lcs.cat['logMstar']> self.masscut) & (self.lcs.ssfr > self.ssfrcut)
self.gsw_mass_sfr_flag = (self.gsw.cat['logMstar']> self.masscut) & (self.gsw.ssfr > self.ssfrcut)
self.gsw_uvir_flag = (self.gsw.cat['flag_uv']> 0) & (self.gsw.cat['flag_midir'] > 0)
self.lcs_uvir_flag = (self.lcs.cat['flag_uv']> 0) & (self.lcs.cat['flag_midir'] > 0)
self.fit_gsw_ms()
self.get_lowsfr()
def get_lowsfr(self):
self.gsw_dsfr = self.gsw.cat['logSFR'] - self.gsw.get_MS(self.gsw.cat['logMstar'])
self.gsw_lowsfr_flag = self.gsw_dsfr < -0.45
self.lcs_dsfr = self.lcs.cat['logSFR'] - self.gsw.get_MS(self.lcs.cat['logMstar'])
self.lcs_lowsfr_flag = self.lcs_dsfr < -0.45
self.lcs.lowsfr_flag = self.lcs_dsfr < -0.45
def write_tables_for_SFR_sim(self):
''' write out field and LCS core samples to use in SFR simulation '''
# LCS sample
# above ssfr and mstar limits, and core galaxy (membflag)
lcs = self.lcs.cat[self.lcs_mass_sfr_flag & self.lcs.membflag]
# create new table with Mstar and SFR
newtab = Table([lcs['logMstar'],lcs['logSFR'],lcs[BTkey]],names=['logMstar','logSFR','BT'])
# write out table with Mstar and SFR
if args.cutBT:
outfile = os.path.join(homedir,'research/LCS/tables/','lcs-sfr-sim-BTcut.fits')
else:
outfile = os.path.join(homedir,'research/LCS/tables/','lcs-sfr-sim.fits')
newtab.write(outfile,overwrite=True,format='fits')
# FIELD SAMPLE
# above ssfr and mstar limits
gsw = self.gsw.cat[self.gsw_mass_sfr_flag]
# create new table with Mstar and SFR
newtab = Table([gsw['logMstar'],gsw['logSFR'],gsw[BTkey]],names=['logMstar','logSFR','BT'])
# write out table with Mstar and SFR
if args.cutBT:
outfile = os.path.join(homedir,'research/LCS/tables/','gsw-sfr-sim-BTcut.fits')
else:
outfile = os.path.join(homedir,'research/LCS/tables/','gsw-sfr-sim.fits')
newtab.write(outfile,overwrite=True,format='fits')
def fit_gsw_ms(self):
#self.gsw.fit_MS(flag=self.gsw_mass_sfr_flag)
flag = self.gsw_mass_sfr_flag #& (self.gsw.cat[BTkey]< .5)
self.gsw.fit_MS(flag=flag)#flag=
def plot_sfr_mstar(self,lcsflag=None,label='LCS core',outfile1=None,outfile2=None,coreflag=True,massmatch=True,hexbinflag=False):
"""
PURPOSE:
* compares ssfr vs mstar of lcs and gswlc field samples
* applies stellar mass and ssfr cuts that are specified at beginning of program
INPUT:
* lcsflag
- if None, this will select LCS members
- you can use this to select other slices of the LCS sample, like external, or uv/ir detected
* label
- name of the LCS subsample being plotted
- default is 'LCS core'
* outfile1, outfile2
- figure name to save as
- I'm using two to save a png and pdf version
* massmatch
- set to True to draw a mass-matched sample from GSWLC field sample
* hexbinflag
- set to True to use hexbin to plot field sample
- do this if NOT drawing mass-matched sample b/c number of points is large
OUTPUT:
* creates a plot of sfr vs mstar
* creates histograms above x and y axes to compare two samples
* prints out the KS test statistics
"""
if lcsflag is None:
#lcsflag = self.lcs.cat['membflag']
lcsflag = self.lcs.membflag
flag1 = lcsflag & self.lcs_mass_sfr_flag
# removing field1 cut because we are now using Tempel catalog that only
# includes galaxies in halo masses logM < 12.5
flag2 = (self.gsw.cat['logMstar'] > self.masscut) & (self.gsw.ssfr > self.ssfrcut) #& self.gsw.field1
print('number in lcs sample = ',sum(flag1))
print('number in gsw sample = ',sum(flag2))
# GSWLC sample
x1 = self.gsw.cat['logMstar'][flag2]
y1 = self.gsw.cat['logSFR'][flag2]
z1 = self.gsw.cat['Z_1'][flag2]
# LCS sample (forgive the switch in indices)
x2 = self.lcs.cat['logMstar'][flag1]
y2 = self.lcs.cat['logSFR'][flag1]
z2 = self.lcs.cat['Z_1'][flag1]
# get indices for mass-matched gswlc sample
if massmatch:
#keep_indices = mass_match(x2,x1,inputZ=z2,compZ=z1,dz=.002)
keep_indices = mass_match(x2,x1)
# if keep_indices == False
# remove redshift constraint
if (len(keep_indices) == 1):
if (keep_indices == False):
print("WARNING: Removing the redshift constraint from the mass-matched sample")
keep_indices = mass_match(x2,x1)
x1 = x1[keep_indices]
y1 = y1[keep_indices]
if coreflag:
color2=darkblue
else:
color2=lightblue
ax1,ax2,ax3 = colormass(x1,y1,x2,y2,'GSWLC',label,'sfr-mstar-gswlc-field.pdf',ymin=-2,ymax=1.6,xmin=9.5,xmax=11.5,nhistbin=10,ylabel='$\log_{10}(SFR/(M_\odot/yr))$',contourflag=False,alphagray=.05,hexbinflag=hexbinflag,color2=color2,color1='0.2',alpha1=1,ssfrlimit=-11.5)
# add marker to figure to show galaxies with size measurements
#self.plot_lcs_size_sample(ax1,memb=lcsmemb,infall=lcsinfall,ssfrflag=False)
#ax1.legend(loc='upper left')
if not hexbinflag:
ax1.legend(loc='upper left')
#plot_BV_MS(ax1)
self.gsw.plot_MS(ax1)
ax1.legend(loc='lower right')
plt.subplots_adjust(left=.15)
#mybins=np.linspace(min(x2),max(x2),8)
#ybin,xbin,binnumb = binned_statistic(x2,y2,statistic='median',bins=mybins)
#yerr,xbin,binnumb = binned_statistic(x2,y2,statistic='std',bins=mybins)
#nyerr,xbin,binnumb = binned_statistic(x2,y2,statistic='count',bins=mybins)
#yerr = yerr/np.sqrt(nyerr)
#dbin = xbin[1]-xbin[0]
#ax1.plot(xbin[:-1]+0.5*dbin,ybin,color=color2,lw=3)
#ax1.fill_between(xbin[:-1]+0.5*dbin,ybin+yerr,ybin-yerr,color=color2,alpha=.4)
if outfile1 is None:
plt.savefig(homedir+'/research/LCS/plots/lcscore-gsw-sfms.pdf')
else:
plt.savefig(outfile1)
if outfile2 is None:
plt.savefig(homedir+'/research/LCS/plots/lcscore-gsw-sfms.png')
else:
plt.savefig(outfile2)
def plot_ssfr_mstar(self,lcsflag=None,outfile1=None,outfile2=None,label='LCS core',nbins=20,coreflag=True,massmatch=True,hexbinflag=True,lcsmemb=False,lcsinfall=False):
"""
OVERVIEW:
* compares ssfr vs mstar of lcs and gswlc field samples
* applies stellar mass and ssfr cuts that are specified at beginning of program
INPUT:
* lcsflag
- if None, this will select LCS members
- you can use this to select other slices of the LCS sample, like external
* label
- name of the LCS subsample being plotted
- default is 'LCS core'
* outfile1, outfile2
- figure name to save as
- I'm using two to save a png and pdf version
OUTPUT:
* creates a plot of ssfr vs mstar
* creates histograms above x and y axes to compare two samples
"""
if lcsflag is None:
lcsflag = self.lcs.cat['membflag']
flag1 = lcsflag & (self.lcs.cat['logMstar']> self.masscut) & (self.lcs.ssfr > self.ssfrcut)
flag2 = (self.gsw.cat['logMstar'] > self.masscut) & (self.gsw.ssfr > self.ssfrcut) #& self.gsw.field1
print('number in core sample = ',sum(flag1))
print('number in external sample = ',sum(flag2))
x1 = self.gsw.cat['logMstar'][flag2]
y1 = self.gsw.ssfr[flag2]
z1 = self.gsw.cat['Z_1'][flag2]
# LCS sample (forgive the switch in indices)
x2 = self.lcs.cat['logMstar'][flag1]
y2 = self.lcs.cat['logSFR'][flag1] - self.lcs.cat['logMstar'][flag1]
z2 = self.lcs.cat['Z_1'][flag1]
if coreflag:
color2=darkblue
else:
color2=lightblue
# get indices for mass-matched gswlc sample
if massmatch:
keep_indices = mass_match(x2,x1,inputZ=z2,compZ=z1,dz=.002)
x1 = x1[keep_indices]
y1 = y1[keep_indices]
print('AFTER MASS MATCHING')
print('number of gswlc = ',len(x1))
print('number of lcs = ',len(x2))
ax1,ax2,ax3 = colormass(x1,y1,x2,y2,'GSWLC',label,'sfr-mstar-gswlc-field.pdf',ymin=-11.6,ymax=-8.75,xmin=9.5,xmax=11.5,nhistbin=nbins,ylabel='$\log_{10}(sSFR)$',\
contourflag=False,alphagray=.15,hexbinflag=hexbinflag,color1='0.5',color2=color2,alpha1=1)
#self.plot_lcs_size_sample(ax1,memb=lcsmemb,infall=lcsinfall,ssfrflag=True)
ax1.legend(loc='upper left')
if outfile1 is None:
plt.savefig(homedir+'/research/LCS/plots/lcscore-gsw-ssfrmstar.pdf')
else:
plt.savefig(outfile1)
if outfile2 is None:
plt.savefig(homedir+'/research/LCS/plots/lcscore-gsw-ssfrmstar.png')
else:
plt.savefig(outfile2)
def plot_sfr_mstar_lcs(self,outfile1=None,outfile2=None,nbins=10):
"""
OVERVIEW:
* compares ssfr vs mstar within lcs samples
* applies stellar mass and ssfr cuts that are specified at beginning of program
INPUT:
* outfile1, outfile2
- figure name to save as
- I'm using two to save a png and pdf version
OUTPUT:
* creates a plot of ssfr vs mstar
* creates histograms above x and y axes to compare two samples
"""
baseflag = (self.lcs.cat['logMstar']> self.masscut) & (self.lcs.ssfr > self.ssfrcut)
flag1 = baseflag & self.lcs.cat['membflag']
flag2 = baseflag & ~self.lcs.cat['membflag'] & (self.lcs.cat['DELTA_V'] < 3.)
print('number in core sample = ',sum(flag1))
print('number in external sample = ',sum(flag2))
x1 = self.lcs.cat['logMstar'][flag1]
y1 = self.lcs.cat['logSFR'][flag1]
x2 = self.lcs.cat['logMstar'][flag2]
y2 = self.lcs.cat['logSFR'][flag2]
ax1,ax2,ax3 = colormass(x1,y1,x2,y2,'LCS core','LCS infall','sfr-mstar-lcs-core-field.pdf',ymin=-2,ymax=1.5,xmin=9.5,xmax=11.25,nhistbin=nbins,ylabel='$\log_{10}(SFR)$',contourflag=False,alphagray=.8,alpha1=1,color1=darkblue,lcsflag=True,ssfrlimit=-11.5)
self.plot_lcs_size_sample(ax1,memb=True,infall=True)
ax1.legend(loc='upper left')
#plot_BV_MS(ax1)
self.gsw.plot_MS(ax1)
ax1.legend(loc='lower right')
plt.subplots_adjust(left=.15)
if outfile1 is None:
plt.savefig(homedir+'/research/LCS/plots/lcscore-external-sfrmstar.pdf')
else:
plt.savefig(outfile1)
if outfile2 is None:
plt.savefig(homedir+'/research/LCS/plots/lcscore-external-sfrmstar.png')
else:
plt.savefig(outfile2)
def plot_ssfr_mstar_lcs(self,outfile1=None,outfile2=None,nbins=20):
"""
OVERVIEW:
* compares ssfr vs mstar within lcs samples
* applies stellar mass and ssfr cuts that are specified at beginning of program
INPUT:
* outfile1, outfile2
- figure name to save as
- I'm using two to save a png and pdf version
OUTPUT:
* creates a plot of ssfr vs mstar
* creates histograms above x and y axes to compare two samples
"""
baseflag = (self.lcs.cat['logMstar']> self.masscut) & (self.lcs.ssfr > self.ssfrcut)
flag1 = baseflag & self.lcs.cat['membflag']
flag2 = baseflag & ~self.lcs.cat['membflag'] & (self.lcs.cat['DELTA_V'] < 3.)
print('number in core sample = ',sum(flag1))
print('number in external sample = ',sum(flag2))
x1 = self.lcs.cat['logMstar'][flag1]
y1 = self.lcs.ssfr[flag1]
x2 = self.lcs.cat['logMstar'][flag2]
y2 = self.lcs.ssfr[flag2]
ax1,ax2,ax3 = colormass(x1,y1,x2,y2,'LCS core','LCS infall','sfr-mstar-lcs-core-field.pdf',ymin=-11.6,ymax=-8.75,xmin=9.5,xmax=11.5,nhistbin=nbins,ylabel='$\log_{10}(sSFR)$',contourflag=False,alphagray=.8,alpha1=1,color1=darkblue,lcsflag=True)
#self.plot_lcs_size_sample(ax1,memb=True,infall=True,ssfrflag=True)
ax1.legend(loc='upper left')
plt.subplots_adjust(left=.15)
if outfile1 is None:
plt.savefig(homedir+'/research/LCS/plots/lcscore-gsw-ssfrmstar.pdf')
else:
plt.savefig(outfile1)
if outfile2 is None:
plt.savefig(homedir+'/research/LCS/plots/lcscore-gsw-ssfrmstar.png')
else:
plt.savefig(outfile2)
def plot_lcs_size_sample(self,ax,memb=True,infall=True,ssfrflag=False):
cmemb = colorblind1
cmemb = 'darkmagenta'
cinfall = lightblue
cinfall = 'blueviolet'
cinfall = 'darkmagenta'
if ssfrflag:
y = self.lcs.cat['logSFR'] - self.lcs.cat['logMstar']
else:
y = self.lcs.cat['logSFR']
baseflag = self.lcs.sampleflag& (self.lcs.cat['logMstar'] > self.masscut)
if memb:
flag = self.lcs.sampleflag & self.lcs.cat['membflag'] & (self.lcs.cat['logMstar'] > self.masscut)
#ax.plot(self.lcs.cat['logMstar'][flag],y[flag],'ks',color=cmemb,alpha=.5,markersize=8,label='LCS memb w/size ('+str(sum(flag))+')')
if infall:
flag = self.lcs.sampleflag & ~self.lcs.cat['membflag'] & (self.lcs.cat['DELTA_V'] < 3.) & (self.lcs.cat['logMstar'] > self.masscut)
#ax.plot(self.lcs.cat['logMstar'][flag],y[flag],'k^',color=cinfall,alpha=.5,markersize=10,label='LCS infall w/size ('+str(sum(flag))+')')
flag = baseflag& (self.lcs.cat['DELTA_V'] < 3.)
ax.plot(self.lcs.cat['logMstar'][flag],y[flag],'ks',alpha=.6,zorder=1,markersize=10,lw=2,label='LCS w/size ('+str(sum(flag))+')')
def plot_dsfr_hist(self,nbins=15,outfile1=None,outfile2=None):
#lcsflag = self.lcs.cat['membflag']
flag1 = self.lcs.membflag & self.lcs_mass_sfr_flag #(self.lcs.cat['logMstar']> self.masscut) & (self.lcs.ssfr > self.ssfrcut)
# removing field1 cut because we are now using Tempel catalog that only
# includes galaxies in halo masses logM < 12.5
flag2 = self.gsw_mass_sfr_flag #(self.gsw.cat['logMstar'] > self.masscut) & (self.gsw.ssfr > self.ssfrcut) #& self.gsw.field1
# GSWLC
x1 = self.gsw.cat['logMstar'][flag2]
y1 = self.gsw.cat['logSFR'][flag2]
#dsfr1 = y1-get_BV_MS(x1)
dsfr1 = y1-self.gsw.get_MS(x1)
#LCS core
x2 = self.lcs.cat['logMstar'][flag1]
y2 = self.lcs.cat['logSFR'][flag1]
#dsfr2 = y2-get_BV_MS(x2)
dsfr2 = y2-self.gsw.get_MS(x2)
#LCS infall
#(abs(self.cat['DELTA_V']) < 3) & ~self.membflag
#lcsflag = ~self.lcs.cat['membflag'] & (np.abs(self.lcs.cat['DELTA_V']) < 3.)
flag3 = self.lcs.infallflag & self.lcs_mass_sfr_flag
#print('number of infall galaxies, first selection = ',sum(flag3))
#flag3 = lcsflag & (self.lcs.cat['logMstar']> self.masscut) & (self.lcs.ssfr > self.ssfrcut)
#print('number of infall galaxies, second selection = ',sum(flag3))
x3 = self.lcs.cat['logMstar'][flag3]
y3 = self.lcs.cat['logSFR'][flag3]
#dsfr3 = y3-get_BV_MS(x3)
dsfr3 = y3-self.gsw.get_MS(x3)
plt.figure(figsize=(8,6))
mybins = np.linspace(-1.6,1.6,nbins)
delta_bin = mybins[1]-mybins[0]
mybins = mybins + 0.5*delta_bin
dsfrs = [dsfr1,dsfr2,dsfr3]
colors = ['.5',darkblue,lightblue]
labels = ['Field','LCS Core','LCS Infall']
orders = [1,3,2]
lws = [3,4,3]
alphas = [.4,0,.5]
hatches = ['/','\\','|']
for i in range(len(dsfrs)):
plt.hist(dsfrs[i],bins=mybins,color=colors[i],density=True,\
histtype='stepfilled',lw=lws[i],alpha=alphas[i],zorder=orders[i])#hatch=hatches[i])
plt.hist(dsfrs[i],bins=mybins,color=colors[i],density=True,\
histtype='step',lw=lws[i],zorder=orders[i],label=labels[i])#hatch=hatches[i])
# calculate median and error from bootstrap resampling
thismedian = np.median(dsfrs[i])
# error from bootstrap resampling
lower_median, upper_median = get_bootstrap_confint(dsfrs[i])
print()
print('{}: median = {:.2f}-{:.2f}+{:.2f} (bootstrap)'.format(labels[i],thismedian,thismedian-lower_median,upper_median-thismedian))
print('{}: mean, std, std_err = {:.2f},{:.2f},{:.2f} '.format(labels[i],np.mean(dsfrs[i]),np.std(dsfrs[i]),np.std(dsfrs[i])/np.sqrt(len(dsfrs[i]))))
print('')
#plt.xlabel('$ \log_{10}SFR - \log_{10}SFR_{MS} \ (M_\odot/yr) $',fontsize=20)
plt.xlabel('$ \Delta \log_{10}SFR $',fontsize=20)
plt.ylabel('$Normalized \ Distribution$',fontsize=20)
# add range considered "normal" SF
plt.axvline(x=.45,ls='--',color='b')
plt.axvline(x=-.45,ls='--',color='b')
plt.legend()
if outfile1 is not None:
plt.savefig(outfile1)
if outfile2 is not None:
plt.savefig(outfile2)
print('KS STATISTICS: FIELD VS CORE')
print(ks_2samp(dsfr1,dsfr2))
print(anderson_ksamp([dsfr1,dsfr2]))
print('')
print('KS STATISTICS: FIELD VS INFALL')
print(ks_2samp(dsfr1,dsfr3))
print(anderson_ksamp([dsfr1,dsfr3]))
print('')
print('KS STATISTICS: CORE VS INFALL')
print(ks_2samp(dsfr2,dsfr3))
print(anderson_ksamp([dsfr2,dsfr3]))
def plot_sfr_morph_hist(self,nbins=15,outfile1=None,outfile2=None):
# B/T
# ng - sersic index
# for LCS, we have NSA, NUV-r
#
lcsflag = self.lcs.cat['membflag']
flag1 = lcsflag & self.lcs.sfsample
# removing field1 cut because we are now using Tempel catalog that only
# includes galaxies in halo masses logM < 12.5
flag = (self.gsw.cat['logMstar'] > self.masscut) & (self.gsw.ssfr > self.ssfrcut) #& self.gsw.field1
# GSWLC
#LCS infall
lcsflag = ~self.lcs.cat['membflag'] & (self.lcs.cat['DELTA_V'] < 3.)
flag3 = lcsflag & (self.lcs.cat['logMstar']> self.masscut) & (self.lcs.ssfr > self.ssfrcut)
x3 = self.lcs.cat['logMstar'][flag3]
y3 = self.lcs.cat['logSFR'][flag3]
#dsfr3 = y3-get_BV_MS(x3)
dsfr3 = y3-self.gsw.get_MS(x3)
plt.figure(figsize=(8,6))
mybins = np.linspace(-1.5,1.5,nbins)
delta_bin = mybins[1]-mybins[0]
mybins = mybins + 0.5*delta_bin
dsfrs = [dsfr1,dsfr2,dsfr3]
colors = ['.5',darkblue,lightblue]
labels = ['Field','LCS Core','LCS Infall']
orders = [1,3,2]
lws = [3,4,3]
alphas = [.4,0,.5]
hatches = ['/','\\','|']
for i in range(len(dsfrs)):
plt.hist(dsfrs[i],bins=mybins,color=colors[i],density=True,\
histtype='stepfilled',lw=lws[i],alpha=alphas[i],zorder=orders[i])#hatch=hatches[i])
plt.hist(dsfrs[i],bins=mybins,color=colors[i],density=True,\
histtype='step',lw=lws[i],zorder=orders[i],label=labels[i])#hatch=hatches[i])
plt.xlabel('$ \log_{10}SFR - \log_{10}SFR_{MS} \ (M_\odot/yr) $',fontsize=20)
plt.ylabel('$Normalized \ Distribution$',fontsize=20)
plt.legend()
if outfile1 is not None:
plt.savefig(outfile1)
if outfile2 is not None:
plt.savefig(outfile2)
def plot_dsfr_sizeratio(self,nbins=15,outfile1=None,outfile2=None,sampleflag=None):
if sampleflag is None:
sampleflag = self.lcs.sampleflag
print('number in sampleflag = ',sum(sampleflag),len(sampleflag))
print('number in membflag = ',sum(self.lcs.membflag),len(self.lcs.membflag))
lcsflag = self.lcs.membflag & sampleflag
print('number in both = ',sum(lcsflag))
print('number in both and in sfr/mstar cut = ',sum(lcsflag & self.lcs_mass_sfr_flag))
flag2 = lcsflag & self.lcs_mass_sfr_flag
#LCS core
x2 = self.lcs.cat['logMstar'][flag2]
y2 = self.lcs.cat['logSFR'][flag2]
z2 = self.lcs.sizeratio[flag2]
#dsfr2 = y2-get_BV_MS(x2)
dsfr2 = y2-self.gsw.get_MS(x2)
print('fraction of core with dsfr below 0.3dex = {:.3f} ({:d}/{:d})'.format(sum(dsfr2 < -0.3)/len(dsfr2),sum(dsfr2 < -0.3),len(dsfr2)))
#LCS infall
lcsflag = sampleflag &self.lcs.infallflag
flag3 = lcsflag & self.lcs_mass_sfr_flag
x3 = self.lcs.cat['logMstar'][flag3]
y3 = self.lcs.cat['logSFR'][flag3]
z3 = self.lcs.sizeratio[flag3]
#dsfr3 = y3-get_BV_MS(x3)
dsfr3 = y3-self.gsw.get_MS(x3)
print('fraction of core with dsfr below 0.3dex = {:.3f} ({:d}/{:d})'.format(sum(dsfr3 < -0.3)/len(dsfr3),sum(dsfr3 < -0.3),len(dsfr3)) )
# make figure
#plt.figure(figsize=(8,6))
sizes = [z2,z3]
dsfrs = [dsfr2,dsfr3]
colors = [darkblue,lightblue]
labels = ['LCS Core ({})'.format(sum(flag2)),'LCS Infall ({})'.format(sum(flag3))]
hatches = ['/','\\','|']
#for i in range(len(dsfrs)):
# plt.plot(sizes[i],dsfrs[i],'bo',c=colors[i],label=labels[i])
#plt.ylabel('$ SFR - SFR_{MS}(M_\star) \ (M_\odot/yr) $',fontsize=20)
#plt.xlabel('$R_{24}/R_d$',fontsize=20)
#plt.legend()
#plt.axhline(y=0,color='k')
plt.figure()
nbins=12
ax1,ax2,ax3 = colormass(z2,dsfr2,z3,dsfr3,'LCS core','LCS infall','temp.pdf',ymin=-1.5,ymax=1.,xmin=-.05,xmax=2,nhistbin=nbins,xlabel='$R_{24}/R_d$',ylabel='$\log_{10}(SFR)-\log_{10}(SFR_{MS}) \ (M_\odot/yr)$',contourflag=False,alphagray=.8,alpha1=1,color1=darkblue,lcsflag=True)
var1 = z2.tolist()+z3.tolist()
var2 = dsfrs[0].tolist()+dsfrs[1].tolist()
t = lcscommon.spearmanr(var1,var2)
print(t)
# add line to show sb limit from LCS size measurements
size=np.linspace(0,2)
sb = .022
sfr = sb*(4*np.pi*size**2)
ax1.plot(size,np.log10(sfr),'k--',label='SB limit')
ax1.axhline(y=.3,ls=':',color='0.5')
ax1.axhline(y=-.3,ls=':',color='0.5')
# plot all galfit results
baseflag = self.lcs_mass_sfr_flag & ~sampleflag #& ~self.lcs.cat['agnflag']
flag4 = self.lcs.cat['membflag'] & baseflag
x4 = self.lcs.cat['logMstar'][flag4]
y4 = self.lcs.cat['logSFR'][flag4]
z4 = self.lcs.sizeratio[flag4]
#dsfr4 = y4-get_BV_MS(x4)
dsfr4 = y4-self.gsw.get_MS(x4)
ax1.plot(z4,dsfr4,'kx',c=darkblue,markersize=10)
# plot all galfit results
flag4 = self.lcs.infallflag& self.lcs_mass_sfr_flag & ~sampleflag #& ~self.lcs.cat['agnflag']
x4 = self.lcs.cat['logMstar'][flag4]
y4 = self.lcs.cat['logSFR'][flag4]
z4 = self.lcs.sizeratio[flag4]
#dsfr4 = y4-get_BV_MS(x4)
dsfr4 = y4-self.gsw.get_MS(x4)
ax1.plot(z4,dsfr4,'kx',markersize=10,c=lightblue)
if outfile1 is not None:
plt.savefig(outfile1)
if outfile2 is not None:
plt.savefig(outfile2)
### COMPARE MEAN SKEW AND KURTOSIS OF THE SIZE and SFR DISTRIBUTIONS
print('#################################################')
print('size distributions')
print('\tMean Core = {:.2f}; Infall = {:.2f}'.format(np.mean(z2),np.mean(z3)))
print('\tSkew Core = {:.2f}; Infall = {:.2f}'.format(st.skew(z2),st.skew(z3)))
nboot = 100
# get conf intervale
coreskew_low,coreskew_up = get_bootstrap_confint(z2,bootfunc=st.skew)
infallskew_low,infallskew_up = get_bootstrap_confint(z3,bootfunc=st.skew)
print('Core skew:',coreskew_low,coreskew_up)
print('\tSkew Core = {:.2f}+{:.2f}-{:.2f}; Infall = {:.2f}+{:.2f}-{:.2f}'.format(st.skew(z2),coreskew_up-st.skew(z2),st.skew(z2)-coreskew_low,st.skew(z3),infallskew_up-st.skew(z3),st.skew(z3)-infallskew_low))
print('\tKurtosis Core = {:.2f}; Infall = {:.2f}'.format(st.kurtosis(z2),st.kurtosis(z3)))
# x2 and x3 are the core and infall mass
print('#################################################')
print('dSFR distributions')
print('\tMean Core = {:.2f}; Infall = {:.2f}'.format(np.mean(dsfr2),np.mean(dsfr3)))
print('\tSTD Core = {:.2f}; Infall = {:.2f}'.format(np.std(dsfr2),np.std(dsfr3)))
print('\tERR MEAN Core = {:.3f}; Infall = {:.3f}'.format(np.std(dsfr2)/np.sqrt(len(dsfr2)),np.std(dsfr3)/np.sqrt(len(dsfr3))))
print('\tSkew Core = {:.2f}; Infall = {:.2f}'.format(st.skew(dsfr2),st.skew(dsfr3)))
coreskew_low,coreskew_up = get_bootstrap_confint(dsfr2,bootfunc=st.skew)
infallskew_low,infallskew_up = get_bootstrap_confint(dsfr3,bootfunc=st.skew)
print('Core skew:',coreskew_low,coreskew_up)
print('\tSkew Core = {:.2f}+{:.2f}-{:.2f}; Infall = {:.2f}+{:.2f}-{:.2f}'.format(st.skew(dsfr2),coreskew_up-st.skew(dsfr2),st.skew(dsfr2)-coreskew_low,st.skew(dsfr3),infallskew_up-st.skew(dsfr3),st.skew(dsfr3)-infallskew_low))
print('\tKurtosis Core = {:.2f}; Infall = {:.2f}'.format(st.kurtosis(dsfr2),st.kurtosis(dsfr3)))
return ax1,ax2,ax3
def plot_dsfr_HIdef(self,nbins=15,outfile1=None,outfile2=None):
lcsflag = self.lcs.cat['membflag'] & self.lcs.sampleflag & self.lcs.cat['HIflag']
flag2 = lcsflag & (self.lcs.cat['logMstar']> self.masscut) & (self.lcs.ssfr > self.ssfrcut)
#LCS core
x2 = self.lcs.cat['logMstar'][flag2]
y2 = self.lcs.cat['logSFR'][flag2]
z2 = self.lcs.cat['HIDef'][flag2]
#dsfr2 = y2-get_BV_MS(x2)
dsfr2 = y2-self.gsw.get_MS(x2)
#LCS infall
lcsflag = self.lcs.sampleflag & self.lcs.cat['HIflag'] & ~self.lcs.cat['membflag'] & (self.lcs.cat['DELTA_V'] < 3.)
flag3 = lcsflag & (self.lcs.cat['logMstar']> self.masscut) & (self.lcs.ssfr > self.ssfrcut)
x3 = self.lcs.cat['logMstar'][flag3]
y3 = self.lcs.cat['logSFR'][flag3]
z3 = self.lcs.cat['HIDef'][flag3]
#dsfr3 = y3-get_BV_MS(x3)
dsfr3 = y3-self.gsw.get_MS(x3)
# make figure
#plt.figure(figsize=(8,6))
sizes = [z2,z3]
dsfrs = [dsfr2,dsfr3]
colors = [darkblue,lightblue]
labels = ['LCS Core ({})'.format(sum(flag2)),'LCS Infall ({})'.format(sum(flag3))]
hatches = ['/','\\','|']
#for i in range(len(dsfrs)):
# plt.plot(sizes[i],dsfrs[i],'bo',c=colors[i],label=labels[i])
#plt.ylabel('$ SFR - SFR_{MS}(M_\star) \ (M_\odot/yr) $',fontsize=20)
#plt.xlabel('$R_{24}/R_d$',fontsize=20)
#plt.legend()
#plt.axhline(y=0,color='k')
plt.figure()
nbins=12
ax1,ax2,ax3 = colormass(z2,dsfr2,z3,dsfr3,'LCS core','LCS infall','temp.pdf',ymin=-1,ymax=1.,xmin=-.05,xmax=2,nhistbin=nbins,xlabel='$HI \ Deficiency$',ylabel='$\log_{10}(SFR)-\log_{10}(SFR_{MS}) \ (M_\odot/yr)$',contourflag=False,alphagray=.8,alpha1=1,color1=darkblue,lcsflag=True)
var1 = z2.tolist()+z3.tolist()
var2 = dsfrs[0].tolist()+dsfrs[1].tolist()
t = lcscommon.spearmanr(var1,var2)
print(t)
if outfile1 is not None:
plt.savefig(outfile1)
if outfile2 is not None:
plt.savefig(outfile2)
def plot_phasespace_dsfr(self):
''' plot phase space diagram, and mark galaxies with SFR < (MS - 1.5sigma) '''
# maybe try size that scales with size ratio
pass
def print_lowssfr_nsaids(self,lcsflag=None,ssfrmin=None,ssfrmax=-11):
if ssfrmin is not None:
ssfrmin=ssfrmin
else:
ssfrmin = -11.5
lowssfr_flag = (self.lcs.cat['logMstar']> self.masscut) &\
(self.lcs.ssfr > ssfrmin) & (self.lcs.ssfr < ssfrmax)
if lcsflag is None:
lcsflag = self.lcs.cat['membflag']
flag = lcsflag & lowssfr_flag
nsaids = self.lcs.cat['NSAID'][flag]
for n in nsaids:
print(n)
def get_legacy_images(self,lcsflag=None,ssfrmax=-11,ssfrmin=-11.5):
if ssfrmin is not None:
ssfrmin=ssfrmin
lowssfr_flag = (self.lcs.cat['logMstar']> self.masscut) &\
(self.lcs.ssfr > ssfrmin) & (self.lcs.ssfr < ssfrmax)
if lcsflag is None:
lcsflag = self.lcs.cat['membflag']
flag = lowssfr_flag & lcsflag
ids = np.arange(len(self.lowssfr_flag))[flag]
for i in ids:
# open figure
plt.figure()
# get legacy image
d, w = getlegacy(self.lcs.cat['RA_1'][i],self.lcs.cat['DEC_2'][i])
plt.title("NSID {0}, dSFR={1:.1f}, BT={2:.1f}".format(self.lcs.cat['NSAID'][i],self.lcs_dsfr[i].self.lcs.cat[BTkey]))
def compare_BT(self,nbins=15,xmax=.3):
flag1 = self.lcs.membflag & (self.lcs.cat['logMstar']> self.masscut) & (self.lcs.ssfr > self.ssfrcut)
coreBT = self.lcs.cat[BTkey][flag1]
flag2 = self.lcs.infallflag & (self.lcs.cat['logMstar']> self.masscut) & (self.lcs.ssfr > self.ssfrcut)
infallBT = self.lcs.cat[BTkey][flag2]
flag3 = (self.gsw.cat['logMstar'] > self.masscut) & (self.gsw.ssfr > self.ssfrcut) #& self.gsw.field1
gswBT = self.gsw.cat[BTkey][flag3]
plt.figure()
mybins = np.linspace(0,xmax,nbins)
delta_bin = mybins[1]-mybins[0]
mybins = mybins + 0.5*delta_bin
xvars = [gswBT,coreBT,infallBT]
colors = ['.5',darkblue,lightblue]
labels = ['Field','LCS Core','LCS Infall']
orders = [1,3,2]
lws = [3,4,3]
alphas = [.4,0,.5]
hatches = ['/','\\','|']
for i in range(len(xvars)):
plt.hist(xvars[i],bins=mybins,color=colors[i],density=True,\
histtype='stepfilled',lw=lws[i],alpha=alphas[i],zorder=orders[i])#hatch=hatches[i])
plt.hist(xvars[i],bins=mybins,color=colors[i],density=True,\
histtype='step',lw=lws[i],zorder=orders[i],label=labels[i])#hatch=hatches[i])
plt.axvline(x=np.median(xvars[i]),color=colors[i],ls='--')
plt.xlabel('$B/T$',fontsize=20)
plt.ylabel('$Normalized \ Distribution$',fontsize=20)
plt.legend()
print('comparing BT: Core vs Infall')
t = ks_2samp(coreBT,infallBT)
print(t)
print()
print('comparing BT: Core vs Field')
t = ks_2samp(coreBT,gswBT)
print(t)
print()
print('comparing BT: Field vs Infall')
t = ks_2samp(gswBT,infallBT)
print(t)
print()
print('number in core and infall = {}, {}'.format(sum(flag1),sum(flag2)))
def compare_morph(self,nbins=10,xmax=.4,coreonly=False):
'''what is this for? '''
plt.figure(figsize=(10,6))
plt.subplots_adjust(wspace=.25,hspace=.4)
# 2x2 figure
nrow=2
ncol=2
if coreonly:
lcsflag = self.lcs.membflag #| self.lcs.infallflag
labels = ['Field','LCS core']
else:
lcsflag = self.lcs.membflag | self.lcs.infallflag
labels = ['Field','LCS all']
field_cols=[BTkey,'ng']
catalogs = [self.gsw.cat,self.lcs.cat,self.lcs.cat]
sampleflags = [self.gsw_mass_sfr_flag,self.lcs_mass_sfr_flag & self.lcs.membflag,self.lcs_mass_sfr_flag & self.lcs.infallflag]
lowflags = [self.gsw.lowsfr_flag, self.lcs.lowsfr_flag,self.lcs.lowsfr_flag]
#colors = ['0.5',darkblue,lightblue] # color for field and LCS
#alphas = [.8,.5,.5]
#lws = [2,2]
#zorders = [2,2]
colors = ['.5',darkblue,lightblue]
labels = ['Field','LCS Core','LCS Infall']
zorders = [1,3,2]
lws = [3,4,3]
alphas = [.4,0,.5]
#for i,col in enumerate(field_cols):
xlabels = ['$B/T$','Sersic n','g-i','prob Sc','$\log_{10}(M_\star/M_\odot)$','$\Delta SFR$']
xlabels = ['$B/T$','Sersic n','prob Sc','$\log_{10}(M_\star/M_\odot)$','$\Delta SFR$']
titles = ['Normal SFR','Suppressed SFR']
ylabels=['Norm. Counts','Norm. Counts']
# BT
allbins = [np.linspace(0,xmax,nbins),np.linspace(1,6,nbins),\
np.linspace(0.2,1.6,nbins),np.linspace(0,1,nbins),\
np.linspace(9.7,11,nbins),np.linspace(-1,1,nbins)]
allbins = [np.linspace(0,xmax,nbins),np.linspace(1,6,nbins),\
np.linspace(0,1,nbins),\
np.linspace(9.7,11,nbins),np.linspace(-1,1,nbins)]
plotcols = [0,1,2]
ncol=len(plotcols)
colnumber = 0
for col in plotcols:
for i in range(len(catalogs)):
if col == 0:
xvar = catalogs[i][BTkey]
plt.ylabel(ylabels[0])
elif col == 1:
try:
# single component fit in GSWLC is ng
xvar = catalogs[i]['ng']
except KeyError:
xvar = catalogs[i]['ng_2']
#elif col == 2:
# xvar = catalogs[i]['gmag'] - catalogs[i]['imag']
elif col == 2:
xvar = catalogs[i]['pSc'] #+ catalogs[i]['pSa']
elif col == 3: # stellar mass
xvar = catalogs[i]['logMstar'] #+ catalogs[i]['pSa']
elif col == 5: # delta SFR
x1 = catalogs[i]['logMstar']
y1 = catalogs[i]['logSFR']
#xvar = y1-get_BV_MS(x1)
xvar = y1-self.gsw.get_MS(x1)
# plot on top row normal SF galaxies
plt.subplot(nrow,ncol,1+colnumber)
flag1 = sampleflags[i] & ~lowflags[i]
print(labels[i],': number of galaxies in normal sf subsample = ',sum(flag1))
plt.hist(xvar[flag1],color=colors[i],density=True,bins=allbins[col],\
histtype='stepfilled',lw=lws[i],alpha=alphas[i],zorder=zorders[i])
plt.hist(xvar[flag1],color=colors[i],density=True,bins=allbins[col],\
histtype='step',lw=lws[i],zorder=zorders[i],label=labels[i])#hatch=hatches[i])
if col == 0:
plt.ylabel(ylabels[0],fontsize=16)
plt.legend()
plt.axvline(x=0.4,ls='--',color='k')
if colnumber == 1:
plt.text(-.1,1.05,titles[0],transform=plt.gca().transAxes,horizontalalignment='center')
# plot on bottom row those with low delta SFR
print(nrow,ncol,ncol+1+col)
plt.subplot(nrow,ncol,ncol+1+colnumber)
flag2 = sampleflags[i] & lowflags[i]
print(labels[i],': number of galaxies in suppressed sf subsample = ',sum(flag2))
print()
if col == 0:
plt.ylabel(ylabels[1],fontsize=16)
plt.axvline(x=0.4,ls='--',color='k')
if colnumber == 1:
plt.text(-.1,1.05,titles[1],transform=plt.gca().transAxes,horizontalalignment='center')
plt.hist(xvar[flag2],color=colors[i],density=True,bins=allbins[col],\
histtype='stepfilled',lw=lws[i],alpha=alphas[i],zorder=zorders[i],label=labels[i])#hatch=hatches[i])
plt.hist(xvar[flag2],color=colors[i],density=True,bins=allbins[col],\
histtype='step',lw=lws[i],zorder=zorders[i],label=labels[i])#hatch=hatches[i])
plt.xlabel(xlabels[col],fontsize=16)
plt.subplot(nrow,ncol,1+colnumber)
colnumber += 1
# compare mass distributions
# compare normal SF galaxies
flag1 = sampleflags[0] & ~lowflags[0]
flag2 = sampleflags[0] & lowflags[0]
gswmass_normal = self.gsw.cat['logMstar'][flag1]
gswmass_lowsfr = self.gsw.cat['logMstar'][flag2]
gswBT_normal = self.gsw.cat[BTkey][flag1]
gswBT_lowsfr = self.gsw.cat[BTkey][flag2]
# core
flag1 = sampleflags[1] & ~lowflags[1]
flag2 = sampleflags[1] & lowflags[1]
lcsmass_normal = self.lcs.cat['logMstar'][flag1]
lcsmass_lowsfr = self.lcs.cat['logMstar'][flag2]
lcsBT_normal = self.lcs.cat[BTkey][flag1]
lcsBT_lowsfr = self.lcs.cat[BTkey][flag2]
#infall
flag1 = sampleflags[2] & ~lowflags[1]
flag2 = sampleflags[2] & lowflags[1]
infallmass_normal = self.lcs.cat['logMstar'][flag1]
infallmass_lowsfr = self.lcs.cat['logMstar'][flag2]
infallBT_normal = self.lcs.cat[BTkey][flag1]
infallBT_lowsfr = self.lcs.cat[BTkey][flag2]
print('######################################################')
print('comparing mass distribution of normal SFR galaxies, field vs core')
#print('######################################################')
t = ks_2samp(gswmass_normal,lcsmass_normal)
print(t)
print('######################################################')
print('comparing mass distribution of low SFR galaxies, field vs core')
#print('######################################################')
t = ks_2samp(gswmass_lowsfr,lcsmass_lowsfr)
print(t)
print('######################################################')
print('comparing mass distribution of low SFR galaxies, field vs infall')
#print('######################################################')
t = ks_2samp(gswmass_lowsfr,infallmass_lowsfr)
print(t)
print()
print('######################################################')
print('comparing mass distribution of normal vs low SFR galaxies, field')
#print('######################################################')
t = ks_2samp(gswmass_normal,gswmass_lowsfr)
print(t)
print('######################################################')
print('comparing mass distribution of normal vs low SFR galaxies, core')
#print('######################################################')
t = ks_2samp(lcsmass_normal,lcsmass_lowsfr)
print(t)
print('######################################################')
print('comparing mass distribution of normal vs low SFR galaxies, infall')
#print('######################################################')
t = ks_2samp(infallmass_normal,infallmass_lowsfr)
print(t)
print('######################################################')
print('comparing BT distribution of normal vs low SFR galaxies, field')
#print('######################################################')
t = ks_2samp(gswBT_normal,gswBT_lowsfr)
print(t)
print('######################################################')
print('comparing BT distribution of normal vs low SFR galaxies, core')
#print('######################################################')
t = ks_2samp(lcsBT_normal,lcsBT_lowsfr)
print(t)
print('######################################################')
print('comparing BT distribution of normal vs low SFR galaxies, infall')
#print('######################################################')
t = ks_2samp(infallBT_normal,infallBT_lowsfr)
print(t)
def BT_mstar_normal_low_sfr(self,nbins=10,xmax=.3,coreonly=False):
''' plot B/T vs Mstar for normal and low sfr galaxies '''
plt.figure(figsize=(10,6))
plt.subplots_adjust(wspace=.25,hspace=.4)
# 2x2 figure
nrow=2
ncol=2
if coreonly:
lcsflag = self.lcs.membflag #| self.lcs.infallflag
labels = ['Field','LCS core']
else:
lcsflag = self.lcs.membflag | self.lcs.infallflag
labels = ['Field','LCS all']
field_cols=[BTkey,'ng']
catalogs = [self.gsw.cat,self.lcs.cat,self.lcs.cat]
sampleflags = [self.gsw_mass_sfr_flag,self.lcs_mass_sfr_flag & self.lcs.membflag,self.lcs_mass_sfr_flag & self.lcs.infallflag]
lowflags = [self.gsw.lowsfr_flag, self.lcs.lowsfr_flag,self.lcs.lowsfr_flag]
#colors = ['0.5',darkblue,lightblue] # color for field and LCS
#alphas = [.8,.5,.5]
#lws = [2,2]
#zorders = [2,2]
colors = ['.5',darkblue,lightblue]
labels = ['Field','LCS Core','LCS Infall']
zorders = [1,3,2]
lws = [3,4,3]
alphas = [.4,0,.5]
#for i,col in enumerate(field_cols):
xlabels = ['B/T','Sersic n','g-i','prob Sc','logMstar','$\Delta SFR$']
xlabels = ['B/T','Sersic n','prob Sc','logMstar','$\Delta SFR$']
titles = ['Normal SFR','Low SFR']
ylabels=['Norm. Counts','Norm. Counts']
# BT
plotcols = [0,3]
ncol=len(plotcols)
colnumber = 0
for col in plotcols:
for i in range(len(catalogs)):
if col == 0:
xvar = catalogs[i][BTkey]
plt.ylabel(ylabels[0])
elif col == 1:
try:
# single component fit in GSWLC is ng
xvar = catalogs[i]['ng']
except KeyError:
xvar = catalogs[i]['ng_2']
#elif col == 2:
# xvar = catalogs[i]['gmag'] - catalogs[i]['imag']
elif col == 2:
xvar = catalogs[i]['pSc'] #+ catalogs[i]['pSa']
elif col == 3: # stellar mass
xvar = catalogs[i]['logMstar'] #+ catalogs[i]['pSa']
elif col == 5: # delta SFR
x1 = catalogs[i]['logMstar']
y1 = catalogs[i]['logSFR']
#xvar = y1-get_BV_MS(x1)
xvar = y1-self.gsw.get_MS(x1)
# plot on top row normal SF galaxies
plt.subplot(nrow,ncol,1+colnumber)
flag1 = sampleflags[i] & ~lowflags[i]
print(labels[i],': number of galaxies in subsample = ',sum(flag1))
plt.hist(xvar[flag1],color=colors[i],density=True,bins=allbins[col],\
histtype='stepfilled',lw=lws[i],alpha=alphas[i],zorder=zorders[i])
plt.hist(xvar[flag1],color=colors[i],density=True,bins=allbins[col],\
histtype='step',lw=lws[i],zorder=zorders[i],label=labels[i])#hatch=hatches[i])
if col == 0:
plt.ylabel(ylabels[0],fontsize=16)
plt.legend()
plt.axvline(x=0.4,ls='--',color='k')
if colnumber == 1:
plt.text(-.1,1.05,titles[0],transform=plt.gca().transAxes,horizontalalignment='center')
# plot on bottom row those with low delta SFR
print(nrow,ncol,ncol+1+col)
plt.subplot(nrow,ncol,ncol+1+colnumber)
flag2 = sampleflags[i] & lowflags[i]
if col == 0:
plt.ylabel(ylabels[1],fontsize=16)
plt.axvline(x=0.4,ls='--',color='k')
if colnumber == 1:
plt.text(-.1,1.05,titles[1],transform=plt.gca().transAxes,horizontalalignment='center')
plt.hist(xvar[flag2],color=colors[i],density=True,bins=allbins[col],\
histtype='stepfilled',lw=lws[i],alpha=alphas[i],zorder=zorders[i],label=labels[i])#hatch=hatches[i])
plt.hist(xvar[flag2],color=colors[i],density=True,bins=allbins[col],\
histtype='step',lw=lws[i],zorder=zorders[i],label=labels[i])#hatch=hatches[i])
plt.xlabel(xlabels[col],fontsize=16)
plt.subplot(nrow,ncol,1+colnumber)
colnumber += 1
# compare mass distributions
# compare normal SF galaxies
flag1 = sampleflags[0] & ~lowflags[0]
flag2 = sampleflags[0] & lowflags[0]
gswmass_normal = self.gsw.cat['logMstar'][flag1]
gswmass_lowsfr = self.gsw.cat['logMstar'][flag2]
flag1 = sampleflags[1] & ~lowflags[1]
flag2 = sampleflags[1] & lowflags[1]
lcsmass_normal = self.lcs.cat['logMstar'][flag1]
lcsmass_lowsfr = self.lcs.cat['logMstar'][flag2]
print('######################################################')
print('comparing mass distribution of normal SFR galaxies')
#print('######################################################')
t = ks_2samp(gswmass_normal,lcsmass_normal)
print(t)
print('######################################################')
print('comparing mass distribution of low SFR galaxies')
#print('######################################################')
t = ks_2samp(gswmass_lowsfr,lcsmass_lowsfr)
print(t)
def compare_field2others(self,nbins=10,xmax=.3,coreonly=False):
'''compare properties of LCS normal, low and field low to normal field '''
### properties to compare
# BT
# sersic n
# pSc
# B/A - b/c it seems like we have more edge on among low sfr
# want to make sure it's not just extinction!
plt.figure(figsize=(14,6))
plt.subplots_adjust(wspace=.25,hspace=.4)
# 1x4 figure
nrow=1
ncol=4
labels = ['LCS normal','LCS low', 'Field low']
var_cols=[BTkey,'ng']
catalogs = [self.gsw.cat,self.lcs.cat]
sampleflags = [self.gsw_mass_sfr_flag,self.lcs_mass_sfr_flag & lcsflag]
lowflags = [self.gsw.lowsfr_flag, self.lcs.lowsfr_flag]
lcsflag = lcs.membflag | lcs.infallflag
catalogs = [self.lcs.cat[self.lcs_mass_sfr_flag & lcsflag & ~self.lcs.lowsfr_flag],\
self.lcs.cat[self.lcs_mass_sfr_flag & lcsflag & self.lcs.lowsfr_flag ],\
self.gsw.cat[self.gsw_mass_sfr_flag & self.gsw.lowsfr_flag]]
# comparison catalog
compcat = self.gsw.cat[self.gsw_mass_sfr_flag & ~self.gsw.lowsfr_flag]
colors = ['b','r','0.5'] # color for field and LCS
alphas = [.5,.5,.8]
lws = [2,2,2]
zorders = [2,2,2]
#for i,col in enumerate(field_cols):
xlabels = ['B/T','Sersic n','prob Sc','logMstar','B/A']
keys = [BTkey,'ng','pSc','logMstar',]
titles = ['Normal SFR','Low SFR']
ylabels=['Norm. Counts','Norm. Counts']
# BT
allbins = [np.linspace(0,xmax,nbins),np.linspace(1,6,nbins),\
np.linspace(0.2,1.6,nbins),np.linspace(0,1,nbins),\
np.linspace(9.7,11,nbins),np.linspace(-1,1,nbins)]
allbins = [np.linspace(0,xmax,nbins),np.linspace(1,6,nbins),\
np.linspace(0,1,nbins),\
np.linspace(9.7,11,nbins),np.linspace(-1,1,nbins)]
for col in range(4):
for i in range(len(catalogs)):
if col == 0:
xvar = catalogs[i][BTkey]
plt.ylabel(ylabels[0])
elif col == 1:
xvar = catalogs[i]['ng']
#elif col == 2:
# xvar = catalogs[i]['gmag'] - catalogs[i]['imag']
elif col == 2:
xvar = catalogs[i]['pSc'] #+ catalogs[i]['pSa']
elif col == 3: # stellar mass
xvar = catalogs[i]['logMstar'] #+ catalogs[i]['pSa']
elif col == 5: # delta SFR
x1 = catalogs[i]['logMstar']
y1 = catalogs[i]['logSFR']
#xvar = y1-get_BV_MS(x1)
xvar = y1-self.gsw.get_MS(x1)
# plot on top row normal SF galaxies
plt.subplot(nrow,ncol,1+col)
flag1 = sampleflags[i] & ~lowflags[i]
plt.hist(xvar[flag1],color=colors[i],density=True,bins=allbins[col],\
histtype='stepfilled',lw=lws[i],alpha=alphas[i],zorder=zorders[i],label=labels[i])#hatch=hatches[i])
if col == 0:
plt.ylabel(ylabels[0],fontsize=16)
plt.legend()
if col == 2:
plt.text(-.1,1.05,titles[0],transform=plt.gca().transAxes,horizontalalignment='center')
# plot on bottom row those with low delta SFR
plt.subplot(nrow,ncol,ncol+1+col)
flag2 = sampleflags[i] & lowflags[i]
if col == 0:
plt.ylabel(ylabels[1],fontsize=16)
if col == 2:
plt.text(-.1,1.05,titles[1],transform=plt.gca().transAxes,horizontalalignment='center')
plt.hist(xvar[flag2],color=colors[i],density=True,bins=allbins[col],\
histtype='stepfilled',lw=lws[i],alpha=alphas[i],zorder=zorders[i],label=labels[i])#hatch=hatches[i])
plt.xlabel(xlabels[col],fontsize=16)
plt.subplot(nrow,ncol,1+col)
def compare_BT_lowsfr_field_core(self,nbins=12,coreonly=False,infallonly=False,BTmax=1):
''' compare the B/T distribution of LCS low SFR galaxies with mass-matched sample drawn from low SFR field galaxies '''
if coreonly:
lcsflag = self.lcs.membflag #| self.lcs.infallflag
labels = ['Field','LCS core']
elif infallonly:
lcsflag = self.lcs.infallflag #| self.lcs.infallflag
labels = ['Field','LCS infall']
else:
lcsflag = self.lcs.membflag | self.lcs.infallflag
labels = ['Field','LCS all']
# compare B/T distribution of LCS low SFR galaxies with
# mass-matched sample drawn from low SFR field galaxies
# select low SFR galaxies from field
sampleflags = [self.gsw_mass_sfr_flag,self.lcs_mass_sfr_flag & lcsflag]
lowflags = [self.gsw.lowsfr_flag, self.lcs.lowsfr_flag]
lcsflag2 = sampleflags[1] & lowflags[1]
lcsmass_lowsfr = self.lcs.cat['logMstar'][lcsflag2]
lcsBT_lowsfr = self.lcs.cat[BTkey][lcsflag2]
flag2 = sampleflags[0] & lowflags[0]
gswmass_lowsfr = self.gsw.cat['logMstar'][flag2]
gswBT_lowsfr = self.gsw.cat[BTkey][flag2]
flags2=[flag2,lcsflag2]
# create a mass-matched sample of field galaxies with normal SFR
myrandom = np.random.random()
keep_indices = mass_match(lcsmass_lowsfr,gswmass_lowsfr,nmatch=10,seed=379)
massmatched_gswBT_lowsfr = gswBT_lowsfr[keep_indices]
# compare the B/T distributions
# does low SFR sample have higher B/T than normal field galaxies
plt.figure(figsize=(9,3))
plt.subplots_adjust(wspace=.3,bottom=.2)
colors = ['0.5',darkblue,'k'] # color for field and LCS
xvars = [massmatched_gswBT_lowsfr, lcsBT_lowsfr]
alphas = [.8,.5,1.]
lws = [2,2,2]
zorders = [2,2,2]
histtypes = ['stepfilled','stepfilled','stepfilled']
#for i,col in enumerate(field_cols):
xlabels = ['B/T','Sersic n','g-i','prob Sc','logMstar','$\Delta SFR$']
#xlabels = ['B/T','Sersic n','prob Sc','logMstar','$\Delta SFR$']
labels = ['Field','LCS','Low SFR Field']
if coreonly:
labels = ['Field','LCS Core','Low SFR Field']
allbins = [np.linspace(0,BTmax,nbins),np.linspace(1,6,nbins),\
np.linspace(-2,2,nbins),\
np.linspace(0,1,nbins),\
np.linspace(9.7,11,nbins),np.linspace(-1,1,nbins)]
# BT
ncols=4
for col in range((ncols)):
if col == 0:
xvars = [self.gsw.cat[BTkey],self.lcs.cat[BTkey]]
elif col == 1:
xvars = [self.gsw.cat['ng'],self.lcs.cat['ng_2']]
#elif col == 2:
# xvars = [self.gsw.cat['gmag'] - self.gsw.cat['imag'],\
# self.lcs.cat['gmag']- self.lcs.cat['imag']]
elif col == 3:
xvars = [self.gsw.cat['pSc'],self.lcs.cat['pSc']] #+ catalogs[i]['pSa']
plt.subplot(1,ncols,col+1)
for i in range(len(xvars)):
if i == 0:
xvar = xvars[i][flags2[i]][keep_indices]
else:
xvar = xvars[i][flags2[i]]
plt.hist(xvar,color=colors[i],density=True,bins=allbins[col],\
histtype=histtypes[i],lw=lws[i],alpha=alphas[i],zorder=zorders[i],label=labels[i])#hatch=hatches[i])
if col == 0:
#plt.text(-.2,-.1,'Normalized Distribution',transform=plt.gca().transAxes,rotation=90,horizontalalignment='center',verticalalignment='center',fontsize=24)
plt.ylabel('Norm. Distribution',fontsize=16)
print('###################################################################')
print('comparing LCS low SFR and mass-matched field low SFR:',xlabels[col])
t = ks_2samp(xvars[0][flags2[0]][keep_indices],xvars[1][flags2[1]])
print('statistic={:.2f}, pvalue={:.2e}'.format(t[0],t[1]))
print('')
print('anderson-darling test')
print(anderson_ksamp([xvars[0][flags2[0]][keep_indices],xvars[1][flags2[1]]]))
plt.xlabel(xlabels[col],fontsize=16)
plt.legend()
def compare_BT_lowsfr_lcs_field_mmatch(self,nbins=12,coreonly=False,infallonly=False,BTmax=1,nrandom=100,nmatch=1):
''' compare the B/T distribution of LCS low SFR galaxies with mass-matched sample drawn from low SFR field galaxies '''
if coreonly:
lcsflag = self.lcs.membflag #| self.lcs.infallflag
labels = ['Field','LCS core']
elif infallonly:
lcsflag = self.lcs.infallflag #| self.lcs.infallflag
labels = ['Field','LCS infall']
else:
lcsflag = self.lcs.membflag | self.lcs.infallflag
labels = ['Field','LCS all']
# compare B/T distribution of LCS low SFR galaxies with
# mass-matched sample drawn from low SFR field galaxies
# select low SFR galaxies from field
sampleflags = [self.gsw_mass_sfr_flag,self.lcs_mass_sfr_flag & self.lcs.membflag,self.lcs_mass_sfr_flag & self.lcs.infallflag]
lowflags = [self.gsw.lowsfr_flag, self.lcs.lowsfr_flag,self.lcs.lowsfr_flag]
lcsflag2 = sampleflags[1] & lowflags[1]
lcsmass_sample = self.lcs_mass_sfr_flag & (self.lcs.membflag | self.lcs.infallflag) & self.lcs.lowsfr_flag
coremass_lowsfr = self.lcs.cat['logMstar'][lcsmass_sample]
coreBT_lowsfr = self.lcs.cat[BTkey][lcsflag2]
flag2 = sampleflags[0] & lowflags[0]
gswmass_lowsfr = self.gsw.cat['logMstar'][flag2]
gswBT_lowsfr = self.gsw.cat[BTkey][flag2]
lcsflag2_infall = sampleflags[2] & lowflags[2]
flags2=[flag2,lcsflag2,lcsflag2_infall]
# repeat mass matching many times, but select one field galaxy for each core
# create a mass-matched sample of field galaxies with normal SFR
##########################################################
### MAKE A LOOP TO RUN MASS MATCHING MANY TIMES
##########################################################
# store all the realization
# store p values and distributions for all realizations
all_keep_indices = []
for i in range(nrandom):
keep_indices = mass_match(coremass_lowsfr,gswmass_lowsfr,nmatch=nmatch)
all_keep_indices.append(keep_indices)
# compare the B/T distributions
# does low SFR sample have higher B/T than normal field galaxies
plt.figure(figsize=(10,4))
plt.subplots_adjust(wspace=.3,bottom=.2)
colors = ['.5',darkblue,lightblue]
labels = ['Field','LCS Core','LCS Infall']
zorders = [1,3,2]
lws = [3,4,3]
alphas = [.4,0,.5]
histtypes = ['stepfilled','stepfilled','stepfilled']
#for i,col in enumerate(field_cols):
xlabels = ['B/T','Sersic n','g-i','prob Sc','logMstar','$\Delta SFR$']
xlabels = ['B/T','Sersic n','prob Sc','logMstar','$\Delta SFR$']
labels = ['Field','LCS','Low SFR Field']
if coreonly:
labels = ['Field','LCS Core','Low SFR Field']
allbins = [np.linspace(0,BTmax,nbins),np.linspace(1,6,nbins),\
np.linspace(0,1,nbins),\
np.linspace(9.7,11,nbins),np.linspace(-1,1,nbins)]
# BT
ncols=3
for col in range((ncols)):
all_KS_pvalue = np.zeros(nrandom)
all_AD_pvalue = np.zeros(nrandom)
all_hist_yvalues = np.zeros((nrandom,len(allbins[col])-1))
if col == 0:
xvars = [self.gsw.cat[BTkey],self.lcs.cat[BTkey],self.lcs.cat[BTkey]]
elif col == 1:
xvars = [self.gsw.cat['ng'],self.lcs.cat['ng_2'],self.lcs.cat['ng_2']]
#elif col == 2:
# xvars = [self.gsw.cat['gmag'] - self.gsw.cat['imag'],\
# self.lcs.cat['gmag']- self.lcs.cat['imag']]
elif col == 2:
xvars = [self.gsw.cat['pSc'],self.lcs.cat['pSc'],self.lcs.cat['pSc']] #+ catalogs[i]['pSa']
plt.subplot(1,3,col+1)
for i in range(len(xvars)):
if i == 0:
for k in range(nrandom):
keep_indices = all_keep_indices[k]
xvar = xvars[i][flags2[i]][keep_indices]
t = plt.hist(xvar,color=colors[i],density=True,bins=allbins[col],\
histtype='step',lw=lws[i],alpha=.05,zorder=zorders[i])#hatch=hatches[i])
# get median of hist and plot that
all_hist_yvalues[k] = t[0]
a = ks_2samp(xvars[0][flags2[0]][keep_indices],xvars[1][flags2[1]])
all_KS_pvalue[k] = a[1]
#a = anderson_ksamp(xvars[0][flags2[0]][keep_indices],xvars[1][flags2[1]])
#all_AD_pvalue[k] = a[1]
# calc the median of all the distributions
med_hist = np.median(all_hist_yvalues,axis=0)
#t = plt.hist(med_hist,color=colors[i],density=True,bins=allbins[col],\
# histtype=histtypes[i],lw=lws[i],alpha=.05,zorder=zorders[i],label=labels[i])#
x = (allbins[col][0:-1]+allbins[col][1:])/2.
y = med_hist
#print(x,y)
plt.plot(x,y,'ko',color=colors[i])#
percentiles = [16,50,84]
KSmin = np.percentile(all_KS_pvalue,percentiles[0])
KSmed = np.percentile(all_KS_pvalue,percentiles[1])
KSmax = np.percentile(all_KS_pvalue,percentiles[2])
print('#############')
print(xlabels[col])
print('KS test')
print('pvalue={:.2e} ({:.2e},{:.2e})'.format(KSmed,KSmin,KSmax))
print('')
#print('anderson-darling test')
#print('pvalue={:.2e} ({:.2e},{:.2e}_'.format(np.percentile(all_AD_pvalue,percentiles[1]),\
# np.percentile(all_AD_pvalue,percentiles[0]),\
# np.percentile(all_AD_pvalue,percentiles[2])))
else:
xvar = xvars[i][flags2[i]]
plt.hist(xvar,color=colors[i],density=True,bins=allbins[col],\
histtype='stepfilled',lw=lws[i],alpha=alphas[i],zorder=zorders[i])#hatch=hatches[i])
plt.hist(xvar,color=colors[i],density=True,bins=allbins[col],\
histtype='step',lw=lws[i],zorder=zorders[i],label=labels[i])#hatch=hatches[i])
if col == 0:
#plt.text(-.2,-.1,'Normalized Distribution',transform=plt.gca().transAxes,rotation=90,horizontalalignment='center',verticalalignment='center',fontsize=24)
plt.ylabel('Norm. Distribution',fontsize=16)
plt.xlabel(xlabels[col],fontsize=16)
plt.legend()
def compare_BT_lowsfr_field(self,nbins=12,coreonly=False,BTmax=0.4):
''' compare the B/T distribution of field galaxies with low and normal SFR '''
# select low SFR galaxies from field
sampleflags = [self.gsw_mass_sfr_flag]#,self.lcs_mass_sfr_flag & lcsflag]
lowflags = [self.gsw.lowsfr_flag]#, self.lcs.lowsfr_flag]
flag1 = sampleflags[0] & ~lowflags[0]
flag2 = sampleflags[0] & lowflags[0]
gswmass_normal = self.gsw.cat['logMstar'][flag1]
gswBT_normal = self.gsw.cat[BTkey][flag1]
gswmass_lowsfr = self.gsw.cat['logMstar'][flag2]
gswBT_lowsfr = self.gsw.cat[BTkey][flag2]
# create a mass-matched sample of field galaxies with normal SFR
keep_indices = mass_match(gswmass_lowsfr,gswmass_normal,nmatch=1)
massmatched_gswBT_normal = gswBT_normal[keep_indices]
# compare the B/T distributions
# does low SFR sample have higher B/T than normal field galaxies
print('#######################################')
print('comparing low SFR and mass-matched normal SFR')
t = ks_2samp(gswBT_lowsfr,massmatched_gswBT_normal)
print(t)
print('#######################################')
print('comparing normal SFR and normal SFR mass-matched to low SFR')
t = ks_2samp(gswBT_normal,massmatched_gswBT_normal)
print(t)
plt.figure(figsize=(8,6))
colors = ['k','0.5','c'] # color for field and LCS
xvars = [gswBT_normal, massmatched_gswBT_normal,gswBT_lowsfr]
alphas = [1.,.8,.5]
lws = [2,2,2]
zorders = [2,2,2]
histtypes = ['step','stepfilled','stepfilled']
#for i,col in enumerate(field_cols):
xlabels = ['B/T','Sersic n','g-i','prob Sc','logMstar','$\Delta SFR$']
labels = ['Normal Field','Normal Field, MM to Low SFR Field','Low SFR Field']
# BT
for i in range(len(xvars)):
plt.hist(xvars[i],color=colors[i],density=True,bins=np.linspace(0,BTmax,nbins),\
histtype=histtypes[i],lw=lws[i],alpha=alphas[i],zorder=zorders[i],label=labels[i])#hatch=hatches[i])
#plt.legend()
plt.legend(fontsize=16)
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.axis([-.01,BTmax+.01,0,9])
plt.xlabel('B/T')
plt.ylabel('Normalized Distribution')
def compare_BT_lowsfr_lcs_massmatch(self,nbins=12,infallonly=False,coreonly=False,lcsflag=None,BTmax=0.4):
''' compare the B/T distribution of field galaxies with low and normal SFR '''
# select low SFR galaxies from field
if coreonly:
lcsflag = self.lcs.membflag #| self.lcs.infallflag
elif infallonly:
lcsflag = self.lcs.infallflag #| self.lcs.infallflag
else:
lcsflag = self.lcs.membflag | self.lcs.infallflag
# compare B/T distribution of LCS low SFR galaxies with
# mass-matched sample drawn from low SFR field galaxies
# select low SFR galaxies from field
sampleflags = [self.gsw_mass_sfr_flag,self.lcs_mass_sfr_flag & self.lcs.membflag, self.lcs_mass_sfr_flag & self.lcs.infallflag]
lowflags = [self.gsw.lowsfr_flag, self.lcs.lowsfr_flag, self.lcs.lowsfr_flag]
if coreonly:
lcsflag2 = sampleflags[1] & lowflags[1]
elif infallonly:
lcsflag2 = sampleflags[2] & lowflags[2]
else:
lcsflag2 = (sampleflags[1] | sampleflags[2]) & lowflags[1]
lcsmass_lowsfr = self.lcs.cat['logMstar'][lcsflag2]
lcsBT_lowsfr = self.lcs.cat[BTkey][lcsflag2]
flag1 = sampleflags[0] & ~lowflags[0]
gswmass_normal = self.gsw.cat['logMstar'][flag1]
gswBT_normal = self.gsw.cat[BTkey][flag1]
flag2 = sampleflags[0] & lowflags[0]
gswmass_lowsfr = self.gsw.cat['logMstar'][flag2]
gswBT_lowsfr = self.gsw.cat[BTkey][flag2]
# create a mass-matched sample of field galaxies with normal SFR
keep_indices = mass_match(lcsmass_lowsfr,gswmass_normal,nmatch=20)
massmatched_gswBT_normal = gswBT_normal[keep_indices]
# compare the B/T distributions
# does low SFR sample have higher B/T than normal field galaxies
print('#######################################')
print('comparing low SFR and mass-matched normal SFR')
t = ks_2samp(lcsBT_lowsfr,massmatched_gswBT_normal)
print(t)
#print('#######################################')
#print('comparing normal SFR and normal SFR mass-matched to low SFR')
#t = ks_2samp(gswBT_normal,massmatched_gswBT_normal)
#print(t)
plt.figure(figsize=(8,6))
colors = ['k','0.5',darkblue] # color for field and LCS
if infallonly:
colors = ['k','0.5',lightblue] # color for field and LCS
xvars = [gswBT_normal, massmatched_gswBT_normal,lcsBT_lowsfr]
alphas = [1.,.8,.5]
lws = [2,2,2]
zorders = [2,2,2]
histtypes = ['step','stepfilled','stepfilled']
#for i,col in enumerate(field_cols):
xlabels = ['B/T','Sersic n','g-i','prob Sc','logMstar','$\Delta SFR$']
labels = ['Normal Field','Normal Field, MM to low SFR Core','Low SFR Infall']
if infallonly:
labels = ['Normal Field','Normal Field, MM to low SFR Infall','Low SFR Infall']
# BT
for i in range(len(xvars)):
plt.hist(xvars[i],color=colors[i],density=True,bins=np.linspace(0,BTmax,nbins),\
histtype=histtypes[i],lw=lws[i],alpha=alphas[i],zorder=zorders[i],label=labels[i])#hatch=hatches[i])
plt.legend(fontsize=16)
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.axis([-.01,BTmax+.01,0,9.])
plt.xlabel('B/T')
plt.ylabel('Normalized Distribution')
def plot_dsfr_BT(self,nbins=15,xmax=.3,writefiles=False,nsersic_cut=10,ecut=1,BTline=None):
nflag = (self.lcs.cat['ng_2'] < nsersic_cut)
sflag = (self.lcs.cat['p_el'] < ecut)
flag1 = sflag & nflag & self.lcs.membflag & (self.lcs.cat['logMstar']> self.masscut) & (self.lcs.ssfr > self.ssfrcut)
coreBT = self.lcs.cat[BTkey][flag1]
x1 = self.lcs.cat['logMstar'][flag1]
y1 = self.lcs.cat['logSFR'][flag1]
#core_dsfr = y1-get_BV_MS(x1)
core_dsfr = y1-self.gsw.get_MS(x1)
flag2 = sflag & nflag & self.lcs.infallflag & (self.lcs.cat['logMstar']> self.masscut) & (self.lcs.ssfr > self.ssfrcut)
infallBT = self.lcs.cat[BTkey][flag2]
x2 = self.lcs.cat['logMstar'][flag2]
y2 = self.lcs.cat['logSFR'][flag2]
#infall_dsfr = y2-get_BV_MS(x2)
infall_dsfr = y2-self.gsw.get_MS(x2)
nflag = (self.gsw.cat['ng'] < nsersic_cut)
flag3 = nflag & (self.gsw.cat['logMstar'] > self.masscut) & (self.gsw.ssfr > self.ssfrcut) #& self.gsw.field1
fieldBT = self.gsw.cat[BTkey][flag3]
x3 = self.gsw.cat['logMstar'][flag3]
y3 = self.gsw.cat['logSFR'][flag3]
#field_dsfr = y3-get_BV_MS(x3)
field_dsfr = y3-self.gsw.get_MS(x3)
plt.figure()
mybins = np.linspace(0,xmax,nbins)
xvars = [fieldBT,coreBT,infallBT]
yvars = [field_dsfr,core_dsfr,infall_dsfr]
colors = ['.5',darkblue,lightblue]
labels = ['Field','LCS Core','LCS Infall']
orders = [1,3,2]
lws = [3,4,3]
alphas = [.5,.6,.6]
markers = ['o','s','o']
markersizes = [1,8,8]
hatches = ['/','\\','|']
allBT=[]
alldsfr=[]
for i in range(len(xvars)):
if i > 0:
plt.plot(xvars[i],yvars[i],'k.',color=colors[i],alpha=alphas[i],zorder=orders[i],markersize=markersizes[i],\
marker=markers[i],label=labels[i])
t = lcscommon.spearmanr(xvars[i],yvars[i])
ybin,xbin,binnumb = binned_statistic(xvars[i],yvars[i],statistic='mean',bins=mybins)
yerr,xbin,binnumb = binned_statistic(xvars[i],yvars[i],statistic='std',bins=mybins)
nyerr,xbin,binnumb = binned_statistic(xvars[i],yvars[i],statistic='count',bins=mybins)
yerr = yerr/np.sqrt(nyerr)
dbin = xbin[1]-xbin[0]
if i == 0:
plt.plot(xbin[:-1]+0.5*dbin,ybin,color=colors[i],lw=3,label=labels[i])
else:
plt.plot(xbin[:-1]+0.5*dbin,ybin,color=colors[i],lw=3)
plt.fill_between(xbin[:-1]+0.5*dbin,ybin+yerr,ybin-yerr,color=colors[i],alpha=.4)
print('\n'+labels[i])
print('r={:.4f}, pvalue={:.3e}'.format(t[0],t[1]))
allBT.append(xvars[i].tolist())
alldsfr.append(yvars[i].tolist())
plt.xlabel('$B/T$',fontsize=20)
plt.ylabel('$\Delta SFR \ (M_\odot/yr)$',fontsize=20)
plt.axhline(y=.45,ls='--',color='k',label='$1.5\sigma_{MS}$')
plt.axhline(y=-.45,ls='--',color='k')
plt.legend()
if BTline is not None:
plt.axvline(x=BTline,ls=':',color='k')
print('\n Combined Samples: Spearman Rank')
#t = lcscommon.spearmanr(allBT,alldsfr)
#print(t)
if writefiles:
coreflag = (core_dsfr < -0.45) & (coreBT > 0.3)
outtab = Table([self.lcs.cat['NSAID'][flag1][coreflag],self.lcs.cat['RA_1'][flag1][coreflag],self.lcs.cat['DEC_1'][flag1][coreflag]],names=['NSAID','RA','DEC'])
outtab.write('core-btgt03-dsfrlt045.fits',overwrite=True)
# write out infall
flag = (infall_dsfr < -0.45) & (infallBT > 0.3)
outtab = Table([self.lcs.cat['NSAID'][flag2][flag],self.lcs.cat['RA_1'][flag2][flag],self.lcs.cat['DEC_1'][flag2][flag]],names=['NSAID','RA','DEC'])
outtab.write('infall-btgt03-dsfrlt045.fits',overwrite=True)
return xvars,yvars
def plot_dsfr_mstar(self,nbins=15,xmax=.3,writefiles=False,nsersic_cut=10,ecut=1,BTline=None):
nflag = (self.lcs.cat['ng_2'] < nsersic_cut)
sflag = (self.lcs.cat['p_el'] < ecut)
flag1 = sflag & nflag & self.lcs.membflag & (self.lcs.cat['logMstar']> self.masscut) & (self.lcs.ssfr > self.ssfrcut)
coreBT = self.lcs.cat[BTkey][flag1]
x1 = self.lcs.cat['logMstar'][flag1]
y1 = self.lcs.cat['logSFR'][flag1]
#core_dsfr = y1-get_BV_MS(x1)
core_dsfr = y1-self.gsw.get_MS(x1)
flag2 = sflag & nflag & self.lcs.infallflag & (self.lcs.cat['logMstar']> self.masscut) & (self.lcs.ssfr > self.ssfrcut)
infallBT = self.lcs.cat[BTkey][flag2]
x2 = self.lcs.cat['logMstar'][flag2]
y2 = self.lcs.cat['logSFR'][flag2]
#infall_dsfr = y2-get_BV_MS(x2)
infall_dsfr = y2-self.gsw.get_MS(x2)
nflag = (self.gsw.cat['ng'] < nsersic_cut)
flag3 = nflag & (self.gsw.cat['logMstar'] > self.masscut) & (self.gsw.ssfr > self.ssfrcut) #& self.gsw.field1
fieldBT = self.gsw.cat[BTkey][flag3]
x3 = self.gsw.cat['logMstar'][flag3]
y3 = self.gsw.cat['logSFR'][flag3]
#field_dsfr = y3-get_BV_MS(x3)
field_dsfr = y3-self.gsw.get_MS(x3)
plt.figure()
mybins = np.linspace(9.7,11.25,nbins)
xvars = [x3,x1,x2]
yvars = [field_dsfr,core_dsfr,infall_dsfr]
colors = ['.5',darkblue,lightblue]
labels = ['Field','LCS Core','LCS Infall']
orders = [1,3,2]
lws = [3,4,3]
alphas = [.5,.6,.6]
markers = ['o','s','o']
markersizes = [1,8,8]
hatches = ['/','\\','|']
allBT=[]
alldsfr=[]
for i in range(len(xvars)):
if i > 0:
plt.plot(xvars[i],yvars[i],'k.',color=colors[i],alpha=alphas[i],zorder=orders[i],markersize=markersizes[i],\
marker=markers[i],label=labels[i])
t = lcscommon.spearmanr(xvars[i],yvars[i])
ybin,xbin,binnumb = binned_statistic(xvars[i],yvars[i],statistic='mean',bins=mybins)
yerr,xbin,binnumb = binned_statistic(xvars[i],yvars[i],statistic='std',bins=mybins)
nyerr,xbin,binnumb = binned_statistic(xvars[i],yvars[i],statistic='count',bins=mybins)
yerr = yerr/np.sqrt(nyerr)
dbin = xbin[1]-xbin[0]
if i == 0:
plt.plot(xbin[:-1]+0.5*dbin,ybin,color=colors[i],lw=3,label=labels[i])
else:
plt.plot(xbin[:-1]+0.5*dbin,ybin,color=colors[i],lw=3)
plt.fill_between(xbin[:-1]+0.5*dbin,ybin+yerr,ybin-yerr,color=colors[i],alpha=.4)
print('\n'+labels[i])
print('r={:.4f}, pvalue={:.3e}'.format(t[0],t[1]))
allBT.append(xvars[i].tolist())
alldsfr.append(yvars[i].tolist())
plt.xlabel('$\log_{10}(M_\star)$',fontsize=20)
plt.ylabel('$\Delta \log_{10}(SFR \ (M_\odot/yr))$',fontsize=20)
plt.axhline(y=.45,ls='--',color='k',label='$1.5\sigma_{MS}$')
plt.axhline(y=-.45,ls='--',color='k')
plt.legend()
if BTline is not None:
plt.axvline(x=BTline,ls=':',color='k')
print('\n Combined Samples: Spearman Rank')
#t = lcscommon.spearmanr(allBT,alldsfr)
#print(t)
if writefiles:
coreflag = (core_dsfr < -0.45) & (coreBT > 0.3)
outtab = Table([self.lcs.cat['NSAID'][flag1][coreflag],self.lcs.cat['RA_1'][flag1][coreflag],self.lcs.cat['DEC_1'][flag1][coreflag]],names=['NSAID','RA','DEC'])
outtab.write('core-btgt03-dsfrlt045.fits',overwrite=True)
# write out infall
flag = (infall_dsfr < -0.45) & (infallBT > 0.3)
outtab = Table([self.lcs.cat['NSAID'][flag2][flag],self.lcs.cat['RA_1'][flag2][flag],self.lcs.cat['DEC_1'][flag2][flag]],names=['NSAID','RA','DEC'])
outtab.write('infall-btgt03-dsfrlt045.fits',overwrite=True)
def plot_BThist_env(self,nbins=10,xmax=.3,writefiles=False):
''' Plot normalized hist of BT for different environments '''
flag1 = self.lcs.membflag & self.lcs.sampleflag
coreBT = self.lcs.cat[BTkey][flag1]
flag2 = self.lcs.infallflag & self.lcs.sampleflag
infallBT = self.lcs.cat[BTkey][flag2]
flag3 = (self.gsw.cat['logMstar'] > self.masscut) & (self.gsw.ssfr > self.ssfrcut)
fieldBT = self.gsw.cat[BTkey][flag3]
plt.figure()
mybins = np.linspace(0,xmax,nbins)
delta_bin = mybins[1]-mybins[0]
mybins = mybins + 0.5*delta_bin
xvars = [fieldBT,coreBT,infallBT]
colors = ['.5',darkblue,lightblue]
labels = ['Field','LCS Core','LCS Infall']
orders = [1,3,2]
lws = [3,4,3]
markers = ['o','s','o']
markersizes = [1,8,8]
hatches = ['/','\\','|']
alphas = [.4,0,.5]
hatches = ['/','\\','|']
for i in range(len(xvars)):
npoints = len(xvars[i])
w = np.ones(npoints)/npoints
#print(w)
plt.hist(xvars[i],bins=mybins,color=colors[i],weights=w,\
histtype='stepfilled',lw=lws[i],alpha=alphas[i],zorder=orders[i])#hatch=hatches[i])
plt.hist(xvars[i],bins=mybins,color=colors[i],weights=w,\
histtype='step',lw=lws[i],zorder=orders[i],label=labels[i])#hatch=hatches[i])
#ybin,xbin = np.histogram(xvars[i],bins=mybins)
#yerr,xbin,binnumb = binned_statistic(xvars[i],yvars[i],statistic='std',bins=mybins)
#nyerr,xbin,binnumb = binned_statistic(xvars[i],yvars[i],statistic='count',bins=mybins)
#yerr = yerr/np.sqrt(nyerr)
#dbin = xbin[1]-xbin[0]
#plt.fill_between(xbin[:-1]+0.5*dbin,ybin+yerr,ybin-yerr,color=colors[i],alpha=.4)
#print('\n'+labels[i])
#print('r={:.4f}, pvalue={:.3e}'.format(t[0],t[1]))
#plt.plot(xbin,ybin/len(xvars[i]),'ko',color=colors[i],markersize=14)
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.xlabel('$B/T$',fontsize=20)
plt.ylabel('$Fraction$',fontsize=20)
plt.legend(fontsize=14)
def get_legacy_images_BT(self,lcsflag=None,ncol=4,nrow=4,fignameroot='dsfr-bt-',figsize=(12,10)):
'''
PURPOSE:
'''
if lcsflag is None:
lcsflag = self.lcs.cat['membflag']
ids = np.arange(len(self.lcs.cat))[lcsflag]
plt.figure(figsize=figsize)
nfig=0
nplot=0
for i in ids:
# open figure
plt.subplot(nrow,ncol,np.remainder(nplot+1,nrow*ncol)+1)
# get legacy image
d, w = getlegacy(self.lcs.cat['RA_1'][i],self.lcs.cat['DEC_2'][i])
plt.xticks([],[])
plt.yticks([],[])
plt.title("sSFR={0:.1f},dSFR={1:.1f},BT={2:.2f},n={3:.1f}".format(self.lcs.ssfr[i],self.lcs.dsfr[i],self.lcs.cat[BTkey][i],self.lcs.cat['SERSIC_N'][i]),fontsize=10)
if np.remainder(nplot+1,nrow*ncol) == 0:
# write out file and start a new one
plt.savefig(fignameroot+str(nfig)+'.png')
nfig+=1
plt.figure(figsize=figsize)
nplot += 1
plt.savefig(fignameroot+str(nfig)+'.png')
def core_getlegacy(self,figsize=(12,10),extraflag=None,dsfrcut=-0.45):
'''
PURPOSE:
* get legacy images for core galaxies with low dsfr and high B/T
INPUT
* extraflag = any cuts beyone membership, mass, ssfrcut, and dist from MS
* dsfrcut = distance from MS, default = 1.5sigma = -0.45
'''
flag = self.lcs.membflag & (self.lcs.cat['logMstar']> self.masscut) \
& (self.lcs.ssfr > self.ssfrcut) \
& (self.lcs_dsfr < dsfrcut) #& (self.lcs.cat[BTkey] > 0.3)
if extraflag is not None:
flag = flag & extraflag
print('getting legacy images for ',sum(flag),' galaxies')
self.get_legacy_images_BT(flag,fignameroot='core-dsfr-bt-',figsize=figsize)
def infall_getlegacy(self,figsize=(12,10),extraflag=None):
'''get legacy images for core galaxies with low dsfr and high B/T '''
flag = self.lcs.infallflag & (self.lcs.cat['logMstar']> self.masscut) & (self.lcs.ssfr > self.ssfrcut) &\
(self.lcs_dsfr < -0.45) #& (self.lcs.cat[BTkey] > 0.3)
if extraflag is not None:
flag = flag & extraflag
print('getting legacy images for ',sum(flag),' galaxies')
self.get_legacy_images_BT(flag,fignameroot='infall-dsfr-bt-',figsize=figsize)
def gsw_nobt_getlegacy(self,figsize=(12,10),ncol=4,nrow=4,extraflag=None):
'''get legacy images for core galaxies with low dsfr and high B/T '''
flag = self.gsw_mass_sfr_flag & ~(self.gsw.cat[BTkey] < 1.1)
if extraflag is not None:
flag = flag & extraflag
print('getting legacy images for ',sum(flag),' galaxies')
ids = np.arange(len(self.gsw.cat))[flag]
fignameroot='gsw-no-bt-'
plt.figure(figsize=figsize)
nfig=0
nplot=0
for i in ids:
# open figure
plt.subplot(nrow,ncol,np.remainder(nplot+1,nrow*ncol)+1)
# get legacy image
d, w = getlegacy(self.gsw.cat['RA'][i],self.gsw.cat['DEC'][i])
plt.xticks([],[])
plt.yticks([],[])
#plt.title("sSFR={0:.1f},dSFR={1:.1f},BT={2:.2f},n={3:.1f}".format(self.gsw.ssfr[i],self.gsw.dsfr[i],self.lcs.cat[BTkey][i],self.lcs.cat['SERSIC_N'][i]),fontsize=10)
if np.remainder(nplot+1,nrow*ncol) == 0:
# write out file and start a new one
plt.savefig(fignameroot+str(nfig)+'.png')
nfig+=1
plt.figure(figsize=figsize)
nplot += 1
plt.savefig(fignameroot+str(nfig)+'.png')
def plot_dvdr_sfgals(self, figname1=None,figname2=None,coreflag=False):
# log10(chabrier) = log10(Salpeter) - .25 (SFR estimate)
# log10(chabrier) = log10(diet Salpeter) - 0.1 (Stellar mass estimates)
xmin,xmax,ymin,ymax = 0,3.5,0,3.5
#if plotsingle:
# plt.figure(figsize=(8,6))
# ax=plt.gca()
# plt.subplots_adjust(left=.1,bottom=.15,top=.9,right=.9)
# plt.ylabel('$ \Delta v/\sigma $',fontsize=26)
# plt.xlabel('$ \Delta R/R_{200} $',fontsize=26)
# plt.legend(loc='upper left',numpoints=1)
# plot low sfr galaxies
#lowsfrcoreflag = self.lowsfr_flag & self.membflag
#lowsfrinfallflag = self.lowsfr_flag & self.infallflag
if coreflag:
lcsflag = self.lcs.membflag
else:
lcsflag = (self.lcs.membflag | self.lcs.infallflag)
parentflag = self.lcs_mass_sfr_flag & lcsflag
lowsfrflag = parentflag & self.lcs.lowsfr_flag #& self.infallflag
normalsfrflag = parentflag & ~self.lcs.lowsfr_flag
#####
## CHECK NUMBERS
#####
print('number in parent sample = ',sum(parentflag))
print('number in low SFR sample = ',sum(lowsfrflag))
print('number in normal SFR sample = ',sum(normalsfrflag))
x=(self.lcs.cat['DR_R200'])
y=abs(self.lcs.cat['DELTA_V'])
x1 = x[normalsfrflag]
y1 = y[normalsfrflag]
x2 = x[lowsfrflag]
y2 = y[lowsfrflag]
label='low SFR LCS'
ax1,ax2,ax3 = colormass(x1,y1,x2,y2,'Normal SFR LCS',label,'temp.pdf',ymin=-.02,ymax=3.02,xmin=0,xmax=3,nhistbin=10,ylabel='$\Delta v/\sigma$',xlabel='$\Delta R/R_{200}$',contourflag=False,alphagray=.7,hexbinflag=False,color2='k',color1='0.5',alpha1=1,ssfrlimit=-11.5,cumulativeFlag=True)
## DIVISION BETWEEN CORE/INFALL
xl=np.arange(0,2,.1)
ax1.plot(xl,-4./3.*xl+2,'k-',lw=3,color=darkblue)
## LABELS FOR CORE/INFALL
props = dict(boxstyle='square', facecolor=darkblue, alpha=0.9)
ax1.text(.03,.32,'CORE',transform=ax1.transAxes,fontsize=18,color='k',bbox=props)
props = dict(boxstyle='square', facecolor=lightblue, alpha=0.6)
ax1.text(.6,.6,'INFALL',transform=ax1.transAxes,fontsize=18,color='k',bbox=props)
## POINTS FOR LOW SFR CORE/INFALL
ax1.plot(x2,y2,'ko',mec='k',markersize=10)
if figname1 is not None:
plt.savefig(figname1)
if figname2 is not None:
plt.savefig(figname2)
def plot_dvdr_sfgals_2panel(self, figname1=None,figname2=None,coreflag=False,HIflag=False):
# log10(chabrier) = log10(Salpeter) - .25 (SFR estimate)
# log10(chabrier) = log10(diet Salpeter) - 0.1 (Stellar mass estimates)
xmin,xmax,ymin,ymax = 0,3.5,0,3.5
#if plotsingle:
# plt.figure(figsize=(8,6))
# ax=plt.gca()
# plt.subplots_adjust(left=.1,bottom=.15,top=.9,right=.9)
# plt.ylabel('$ \Delta v/\sigma $',fontsize=26)
# plt.xlabel('$ \Delta R/R_{200} $',fontsize=26)
# plt.legend(loc='upper left',numpoints=1)
# plot low sfr galaxies
#lowsfrcoreflag = self.lowsfr_flag & self.membflag
#lowsfrinfallflag = self.lowsfr_flag & self.infallflag
if coreflag:
lcsflag = self.lcs.membflag
else:
lcsflag = (self.lcs.membflag | self.lcs.infallflag)
parentflag = self.lcs_mass_sfr_flag & lcsflag
lowsfrflag = parentflag & self.lcs.lowsfr_flag #& self.infallflag
normalsfrflag = parentflag & ~self.lcs.lowsfr_flag
#####
## CHECK NUMBERS
#####
print('number in parent sample = ',sum(parentflag))
print('number in low SFR sample = ',sum(lowsfrflag))
print('number in normal SFR sample = ',sum(normalsfrflag))
x=(self.lcs.cat['DR_R200'])
y=abs(self.lcs.cat['DELTA_V'])
x1 = x[normalsfrflag]
y1 = y[normalsfrflag]
x2 = x[lowsfrflag]
y2 = y[lowsfrflag]
label2='Suppressed SFR'
## COMPARE POPULATIONS
print("")
print("delta R: normal vs suppressed")
t = anderson_ksamp([x1,x2])
print('Anderson-Darling: ',t)
pvalue_x = t[2]
print('pvalue = {:.3e}'.format(pvalue_x))
print("")
print("delta v/sigma: normal vs suppressed")
t = anderson_ksamp([y1,y2])
print('Anderson-Darling: ',t)
pvalue_y = t[2]
print('pvalue = {:.3e}'.format(pvalue_y))
#ax1,ax2,ax3 = colormass(x1,y1,x2,y2,'Normal SFR LCS',label,'temp.pdf',ymin=-.02,ymax=3.02,xmin=0,xmax=3,nhistbin=10,ylabel='$\Delta v/\sigma$',xlabel='$\Delta R/R_{200}$',contourflag=False,alphagray=.7,hexbinflag=False,color2='k',color1='0.5',alpha1=1,ssfrlimit=-11.5,cumulativeFlag=True)
## USING SUBPLOT2GRID INSTEAD
label1 = 'Normal SFR'
ymin=-.02
ymax=3.02
xmin=0
xmax=3
nhistbin=10,
ylabel='$\Delta v/\sigma$'
xlabel='$\Delta R/R_{200}$'
color1='0.5'
color2='k'
nplotx=4
nploty=4
plt.figure(figsize=(12,5))
gs = gridspec.GridSpec(2,2)
ax1 = plt.subplot(gs[:,0])
ax2 = plt.subplot(gs[0,1])
ax3 = plt.subplot(gs[1,1])
plt.subplots_adjust(wspace=.25,hspace=.5)
#ax1 = plt.subplot2grid((nplotx,nploty),(0,0),rowspan=4,colspan=2)
#ax2 = plt.subplot2grid((nplotx,nploty),(0,2),colspan=2,rowspan=2)
#ax3 = plt.subplot2grid((nplotx,nploty),(2,2),colspan=2,rowspan=2)
ax1.plot(x1,y1,'ko',markersize=5,label=label1,color=color1)
## POINTS FOR LOW SFR CORE/INFALL
ax1.plot(x2,y2,'ko',markersize=10,label=label2,color=color2)
ax1.axis([xmin,xmax,ymin,ymax])
ax1.set_xlabel(xlabel,fontsize=20)
ax1.set_ylabel(ylabel,fontsize=20)
## DIVISION BETWEEN CORE/INFALL
xl=np.arange(0,2,.1)
ax1.plot(xl,-4./3.*xl+2,'k-',lw=3,color=darkblue)
## LABELS FOR CORE/INFALL
props = dict(boxstyle='square', facecolor=darkblue, alpha=0.9)
ax1.text(.03,.32,'CORE',transform=ax1.transAxes,fontsize=18,color='k',bbox=props)
props = dict(boxstyle='square', facecolor=lightblue, alpha=0.6)
ax1.text(.6,.6,'INFALL',transform=ax1.transAxes,fontsize=18,color='k',bbox=props)
## ADD HISTOGRAMS ON THE RIGHT
# HISTOGRAM OF DELTA V/SIGMA
ax2.hist(x1,bins=len(x1),cumulative=True,normed=True,label=label1,histtype='step',color=color1,lw=2)
ax2.hist(x2,bins=len(x2),cumulative=True,normed=True,label=label2,histtype='step',color=color2,lw=3)
ax2.set_xlabel(xlabel,fontsize=20)
ax2.text(0,.9,'AD pvalue={:.2f}'.format(pvalue_x),fontsize=16)
# HISTOGRAM OF DELTA R/R200
ax3.hist(y1,bins=len(x1),cumulative=True,normed=True,label=label1,histtype='step',color=color1,lw=2)
ax3.hist(y2,bins=len(x2),cumulative=True,normed=True,label=label2,histtype='step',color=color2,lw=3)
ax3.set_xlabel(ylabel,fontsize=20)
ax3.text(0,.9,'AD pvalue={:.2f}'.format(pvalue_y),fontsize=16)
## ADD THE HI SOURCES
if HIflag:
lcsHIFlag = self.lcs.cat['HIdef_flag'] & self.lcs_mass_sfr_flag & (self.lcs.membflag | self.lcs.infallflag)
ax1.plot(x[lcsHIFlag],y[lcsHIFlag],'bo',markerfacecolor='None',markersize=14,label='HI')
ax2.hist(x[lcsHIFlag],bins=sum(lcsHIFlag),cumulative=True,normed=True,label='HI',histtype='step',color='b')
ax3.hist(y[lcsHIFlag],bins=sum(lcsHIFlag),cumulative=True,normed=True,label='HI',histtype='step',color='b')
# fraction of normal SF galaxies with HI detections
Nnormal = np.sum(normalsfrflag)
NnormalHI = np.sum(normalsfrflag & lcsHIFlag)
Nsuppressed = np.sum(lowsfrflag)
NsuppressedHI = np.sum(lowsfrflag & lcsHIFlag)
binom_err = binom_conf_interval(NnormalHI,Nnormal)#lower, upper
frac = NnormalHI/Nnormal
print('fraction of normal galaxies with HI detections = {:.2f}-{:.2f}+{:.2f}'.format(frac,frac-binom_err[0],binom_err[1]-frac))
print(binom_err)
binom_err = binom_conf_interval(NsuppressedHI,Nsuppressed)#lower, upper
frac = NsuppressedHI/Nsuppressed
print('fraction of suppressed galaxies with HI detections = {:.2f}-{:.2f}+{:.2f}'.format(frac,frac-binom_err[0],binom_err[1]-frac))
# fraction of suppressed SF galaxies with HI detections
## ADD LEGENDS AFTER HI
ax1.legend(loc='upper right')
ax2.legend(loc='lower right')
ax3.legend(loc='lower right')
if figname1 is not None:
plt.savefig(figname1)
if figname2 is not None:
plt.savefig(figname2)
def plot_frac_suppressed(self,uvirflag=False,BTcut=None,plotsingle=True):
'''fraction of suppressed galaxies vs environment'''
slimit = -0.45
flag1 = self.lcs.membflag & (self.lcs_mass_sfr_flag)
if BTcut is not None:
flag1 = flag1 & (self.lcs.cat[BTkey] < BTcut)
if uvirflag:
flag1 = flag1 & (self.lcs_uvir_flag)
coreBT = self.lcs.cat[BTkey][flag1]
x1 = self.lcs.cat['logMstar'][flag1]
y1 = self.lcs.cat['logSFR'][flag1]
if BTcut is not None:
core_dsfr = y1-get_MS_BTcut(x1)
else:
core_dsfr = y1-self.gsw.get_MS(x1)
core_fsup = sum(core_dsfr < slimit)/len(core_dsfr)
core_err = binom_conf_interval(sum(core_dsfr < slimit),len(core_dsfr))#lower, upper
print('CORE')
print('frac suppressed = {:.2f}, {:.2f},{:.2f}'.format(core_fsup,core_err[0]-core_fsup,core_err[1]-core_fsup))
flag2 = self.lcs.infallflag & (self.lcs_mass_sfr_flag)
if BTcut is not None:
flag2 = flag2 & (self.lcs.cat[BTkey] < BTcut)
if uvirflag:
flag2 = flag2 & self.lcs_uvir_flag
infallBT = self.lcs.cat[BTkey][flag2]
x2 = self.lcs.cat['logMstar'][flag2]
y2 = self.lcs.cat['logSFR'][flag2]
#infall_dsfr = y2-get_BV_MS(x2)
if BTcut is not None:
infall_dsfr = y2-get_MS_BTcut(x2)
else:
infall_dsfr = y2-self.gsw.get_MS(x2)
infall_fsup = sum(infall_dsfr < slimit)/len(infall_dsfr)
infall_err = binom_conf_interval(sum(infall_dsfr < slimit),len(infall_dsfr))#lower, upper
print('INFALL')
print(infall_fsup,infall_err-infall_fsup)
flag3 = (self.gsw.cat['logMstar'] > self.masscut) & (self.gsw.ssfr > self.ssfrcut) #& self.gsw.field1
if BTcut is not None:
flag3 = flag3 & (self.gsw.cat[BTkey] < BTcut)
fieldBT = self.gsw.cat[BTkey][flag3]
x3 = self.gsw.cat['logMstar'][flag3]
y3 = self.gsw.cat['logSFR'][flag3]
#field_dsfr = y3-get_BV_MS(x3)
if BTcut is not None:
field_dsfr = y3-get_MS_BTcut(x3)
else:
field_dsfr = y3-self.gsw.get_MS(x3)
field_fsup = sum(field_dsfr < slimit)/len(field_dsfr)
field_err = binom_conf_interval(sum(field_dsfr < slimit),len(field_dsfr))#lower, upper
print('FIELD')
print(field_fsup,field_err-field_fsup)
print(np.array(field_err).shape)
if plotsingle:
plt.figure()
xvars = [0,2,1]
yvars = [field_fsup,core_fsup,infall_fsup]
yerrs = [field_err,core_err,infall_err]
colors = ['.5',darkblue,lightblue]
labels = ['$Field$','$LCS \ Core$','$LCS \ Infall$']
orders = [1,3,2]
lws = [3,4,3]
alphas = [.8,.8,.8]
markers = ['o','s','o']
markersizes = 20*np.ones(3)
for i in [0,2,1]: # change order so we go field, infall, core
yerr = np.zeros((2,1))
yerr[0] = yvars[i]-yerrs[i][0]
yerr[1] = yerrs[i][1]-yvars[i]
if plotsingle:
markerfacecolor = colors[i]
marker = 'o'
else:
markerfacecolor = colors[i]
marker = '^'
plt.errorbar(np.array(xvars[i]),np.array(yvars[i]),\
yerr=yerr,\
color=colors[i],alpha=alphas[i],zorder=orders[i],\
markersize=markersizes[i],\
fmt=marker,label=labels[i],markerfacecolor=markerfacecolor)
if plotsingle:
plt.xticks(np.arange(0,4),['$Field$','$LCS \ Infall$','$LCS \ Core$'],fontsize=18)
#plt.xlabel('$Environment$',fontsize=20)
plt.ylabel('$Suppressed \ Fraction $',fontsize=20)
plt.xlim(-0.4,2.4)
def compare_lcs_sfr(self,core=True,infall=True):
'''hist of sfrs of core vs infall lcs galaxies'''
plt.figure(figsize=(8,5))
plt.subplots_adjust(bottom=.2,left=.05)
mybins=np.linspace(-2.5,2,10)
ssfr = self.lcs.cat['logSFR'] - self.lcs.cat['logMstar']
flag1 = (self.lcs.cat['logSFR'] > -50) & (ssfr > self.ssfrcut) & (self.lcs.cat['logMstar'] > self.masscut)
x1 = self.lcs.cat['logSFR'][self.lcs.cat['membflag'] & flag1 ]
x2 = self.lcs.cat['logSFR'][~self.lcs.cat['membflag'] &( self.lcs.cat['DELTA_V'] < 3.) & flag1]
print('KS test comparing SFRs:')
t = ks_2samp(x1,x2)
print(t)
vars = [x1,x2]
labels = ['LCS Core','LCS Infall']
mycolors = ['r','b']
myhatch=['/','\\']
for i in [0,1]:
#plt.subplot(1,2,i+1)
if (i == 0) and not(core):
continue
if (i == 1) and not(infall):
continue
t = plt.hist(vars[i],label=labels[i],lw=2,bins=mybins,hatch=myhatch[i],color=mycolors[i],histtype='step')
plt.xlabel('$log_{10}(SFR/(M_\odot/yr))$',fontsize=24)
#plt.axis([-2.5,2,0,50])
plt.legend(loc='upper left')
if not(core):
outfile=homedir+'/research/LCS/plots/lcsinfall-sfrs'
elif not(infall):
outfile=homedir+'/research/LCS/plots/lcscore-sfrs'
else:
outfile=homedir+'/research/LCS/plots/lcscore-infall-sfrs'
if self.cutBT:
plt.savefig(outfile+'-BTcut.png')
plt.savefig(outfile+'-BTcut.pdf')
else:
plt.savefig(outfile+'.png')
plt.savefig(outfile+'.pdf')
def ks_stats(self,massmatch=False):
'''
GOAL:
* compute KS statistics for table in paper comparing
- LCS core/field vs GSWLC
- LCS core vs field
- B/T cut is set when calling program
- with and without mass matching
'''
#flag3 = lcsflag & (self.lcs.cat['logMstar']> self.masscut) & (self.lcs.ssfr > self.ssfrcut)
# store all KS test restults, for writing latex table
all_results = []
lcsmemb = self.lcs.membflag & self.lcs_mass_sfr_flag
lcsinfall = self.lcs.infallflag & self.lcs_mass_sfr_flag
mc = self.lcs.cat['logMstar'][lcsmemb]
sfrc = self.lcs.cat['logSFR'][lcsmemb]
#dsfrc = sfrc - get_BV_MS(mc)
dsfrc = sfrc - self.gsw.get_MS(mc)
zc = self.lcs.cat['Z_1'][lcsmemb]
BTc = self.lcs.cat[BTkey][lcsmemb]
mi = self.lcs.cat['logMstar'][lcsinfall]
sfri = self.lcs.cat['logSFR'][lcsinfall]
#dsfri = sfri - get_BV_MS(mi)
dsfri = sfri - self.gsw.get_MS(mi)
zi = self.lcs.cat['Z_1'][lcsinfall]
BTi = self.lcs.cat[BTkey][lcsinfall]
field = self.gsw_mass_sfr_flag #(self.gsw.cat['logMstar'] > self.masscut) & (self.gsw.ssfr > self.ssfrcut)
mf = self.gsw.cat['logMstar'][field]
sfrf = self.gsw.cat['logSFR'][field]
#dsfrf = sfrf - get_BV_MS(mf)
dsfrf = sfrf - self.gsw.get_MS(mf)
zf = self.gsw.cat['Z_1'][field]
BTf = self.gsw.cat[BTkey][field]
# write out file to use in latex table program
allnames = ['logmstar','logsfr','dlogsfr','z','BT']
alldata = [[mc,sfrc,dsfrc,zc,BTc],\
[mi,sfri,dsfri,zi,BTi],\
[mf,sfrf,dsfrf,zf,BTf]]
outnames = ['core','infall','field']
for i in range(len(alldata)):
stab = Table(alldata[i],names=allnames)
if massmatch:
basename = 'LCS-sfr-mstar-{}-mmatch'.format(outnames[i])
else:
basename = 'LCS-sfr-mstar-{}'.format(outnames[i])
if args.cutBT:
sfile = basename+'-BTcut.fits'
else:
sfile = basename+'.fits'.format(outnames[i])
stab.write(sfile,format='fits',overwrite=True)
def compare_HIdef(self,combineLCS=False):
'''
GOAL: compare HIdef for field and LCS core, infall
'''
# select LCS galaxies with:
# HI measurements
# in sfr and stellar mass cuts
lcsFlag = self.lcs.cat['HIdef_flag'] & self.lcs_mass_sfr_flag
lcsCoreFlag = lcsFlag & self.lcs.membflag
lcsInfallFlag = lcsFlag & self.lcs.infallflag
# select field galaxies with:
# HI measurements
# in sfr and stellar mass cuts
fieldFlag = self.gsw.HIdef['HIdef_flag'] & self.gsw_mass_sfr_flag
# plot HIdef vs dSFR
HIdef_cats = [self.lcs.cat,self.lcs.cat,self.gsw.HIdef]
HIdefKey = 'HIdef_Boselli'
dsfr_vars = [self.lcs_dsfr,self.lcs_dsfr,self.gsw_dsfr]
flags = [lcsCoreFlag,lcsInfallFlag,fieldFlag]
labels = ['Core','Infall','Field']
colors = [darkblue, lightblue,'0.5']
alphas = [1,1,.1]
msize = [8,8,4]
fig, ax = plt.subplots(figsize=(8,6))
#colors = ['.5',darkblue,lightblue]
#labels = ['Field','LCS Core','LCS Infall']
orders = [3,2,1]
lws = [4,3,3]
histalphas = [0,.5,.4]
divider = make_axes_locatable(ax)
# below height and pad are in inches
ax_histx = divider.append_axes("top", 1.2, pad=0.1, sharex=ax)
ax_histy = divider.append_axes("right", 1.2, pad=0.1, sharey=ax)
# make some labels invisible
ax_histx.xaxis.set_tick_params(labelbottom=False)
ax_histy.yaxis.set_tick_params(labelleft=False)
for i in [2,0,1]:
# plot LCS core
# plot LCS infall
# plot field
x = dsfr_vars[i][flags[i]]
y = HIdef_cats[i][HIdefKey][flags[i]]
nbins=8
ax.plot(x,y,'bo',color=colors[i],alpha=alphas[i],markersize=msize[i],label=labels[i])
# plot filled histogram, then same histogram with a heavier line
ax_histx.hist(x,bins=nbins,cumulative=False,normed=True,histtype='stepfilled',color=colors[i],lw=lws[i],alpha=histalphas[i],zorder=orders[i])
ax_histx.hist(x,bins=nbins,cumulative=False,normed=True,histtype='step',color=colors[i],lw=lws[i],zorder=orders[i])
# plot y histograms
ax_histy.hist(y,bins=nbins,cumulative=False,normed=True,histtype='stepfilled',orientation='horizontal',color=colors[i],lw=lws[i],alpha=histalphas[i],zorder=orders[i])
ax_histy.hist(y,bins=nbins,cumulative=False,normed=True,histtype='step',orientation='horizontal',color=colors[i],lw=lws[i],zorder=orders[i])
ax.legend()
ax.set_xlabel('$\Delta SFR$',fontsize=20)
ax.set_ylabel('$HI \ Deficiency $',fontsize=20)
# add linear fit to dSFR vs HIdef for field galaxies
x = dsfr_vars[2][flags[2]]
y = HIdef_cats[2][HIdefKey][flags[2]]
popt,V = np.polyfit(x,y,1,cov=True)
xline = np.linspace(-.75,.75,100)
yline = np.polyval(popt,xline)
ax.plot(xline,yline,'k-',lw=2)
# print best-fit line slope and error
print()
print('best-field line for field = {:.3f}+/-{:.3f}'.format(popt[0],np.sqrt(V[0][0])))
print()
# test for correlation between HIdef and dSFR
# field
x = dsfr_vars[2][flags[2]]
y = HIdef_cats[2][HIdefKey][flags[2]]
print('Spearman Rank test between dSFR and HIdef for field:')
t = lcscommon.spearmanr(x,y)
print(t)
print()
print('number is core={}, infall={},field={}'.format(sum(flags[0]),sum(flags[1]),sum(flags[2])))
print()
# ks test comparing LCS core and field
x1 = dsfr_vars[0][flags[0]]
x2 = dsfr_vars[2][flags[2]]
print()
print('comparing LCS core vs Field: dSFR')
print(ks_2samp(x1,x2))
t = anderson_ksamp([x1,x2])
print('Anderson-Darling: ',t)
y1 = HIdef_cats[0][HIdefKey][flags[0]]
y2 = HIdef_cats[2][HIdefKey][flags[2]]
print()
print('comparing LCS core vs Field: HIdef')
print(ks_2samp(y1,y2))
t = anderson_ksamp([y1,y2])
print('Anderson-Darling: ',t)
# repeat test, but use all LCS vs field
x1 = dsfr_vars[0][lcsFlag]
x2 = dsfr_vars[2][flags[2]]
print()
print('comparing all LCS vs Field: dSFR')
print(ks_2samp(x1,x2))
t = anderson_ksamp([x1,x2])
print('Anderson-Darling: ',t)
y1 = HIdef_cats[0][HIdefKey][lcsFlag]
y2 = HIdef_cats[2][HIdefKey][flags[2]]
print()
print('comparing all LCS vs Field: HIdef')
print(ks_2samp(y1,y2))
t = anderson_ksamp([y1,y2])
print('Anderson-Darling: ',t)
plt.savefig(plotdir+'/dsfr-HIdef.png')
plt.savefig(plotdir+'/dsfr-HIdef.pdf')
if __name__ == '__main__':
###########################
##### SET UP ARGPARSE
###########################
parser = argparse.ArgumentParser(description ='Program to run analysis for LCS paper 2')
parser.add_argument('--minmass', dest = 'minmass', default = 9.7, help = 'minimum stellar mass for sample. default is log10(M*) > 9.7')
parser.add_argument('--cutBT', dest = 'cutBT', default = False, action='store_true', help = 'Set this to cut the sample by B/T < 0.3.')
parser.add_argument('--BT', dest = 'BT', default = 0.4, help = 'B/T cut to use. Default is 0.4.')
parser.add_argument('--HIdef', dest = 'HIdef', default=False,action='store_true', help = 'Include HIdef information for GSWLC')
parser.add_argument('--ellip', dest = 'ellip', default = .75, help = 'ellipt cut to use. Default is 1.1 (= no cut)')
#parser.add_argument('--cutBT', dest = 'diskonly', default = 1, help = 'True/False (enter 1 or 0). normalize by Simard+11 disk size rather than Re for single-component sersic fit. Default is true. ')
args = parser.parse_args()
trimgswlc = True
# 10 arcsec match b/w GSWLC-X2-NO-DR10-AGN-Simard2011-tab1 and Tempel_gals_below_13.fits in topcat, best,symmetric
#gsw_basefile = homedir+'/research/GSWLC/GSWLC-X2-NO-DR10-AGN-Simard2011-tab1-Tempel-13-2020Nov11'
# 10 arcsec match b/w GSWLC-X2-NO-DR10-AGN-Simard2011-tab1 and Tempel_gals_below_13_5.fits in topcat, best,symmetric
#gsw_basefile = homedir+'/research/GSWLC/GSWLC-X2-NO-DR10-AGN-Simard2011-tab1-Tempel-13.5-2020Nov11'
# 10 arcsec match b/w GSWLC-X2-NO-DR10-AGN-Simard2011-tab1 and Tempel_gals_below_12.cat in topcat, best,symmetric
#gsw_basefile = homedir+'/research/GSWLC/GSWLC-X2-NO-DR10-AGN-Simard2011-tab1-Tempel-12.5-2020Nov11'
gsw_basefile = homedir+'/research/GSWLC/GSWLC-X2-NO-DR10-AGN-Simard2011-tab1-Tempel-13-2020Nov25'
# matched to Simard table 3 so we could use the sersic index from the single component fit as
# one measure of morphology
gsw_basefile = homedir+'/research/GSWLC/GSWLC-X2-NO-DR10-AGN-Simard2011-tab1-tab3-Tempel-13-2021Jan07'
if trimgswlc:
#g = gswlc_full('/home/rfinn/research/GSWLC/GSWLC-X2.dat')
# 10 arcsec match b/s GSWLC-X2 and Tempel-12.5_v_2 in topcat, best, symmetric
#g = gswlc_full('/home/rfinn/research/LCS/tables/GSWLC-Tempel-12.5-v2.fits')
#g = gswlc_full(homedir+'/research/GSWLC/GSWLC-Tempel-12.5-v2-Simard2011-NSAv0-unwise.fits',cutBT=args.cutBT)
g = gswlc_full(gsw_basefile+'.fits',cutBT=args.cutBT,HIdef=args.HIdef)
g.cut_redshift()
g.save_trimmed_cat()
gfull = g
#g = gswlc('/home/rfinn/research/LCS/tables/GSWLC-X2-LCS-Zoverlap.fits')
if args.cutBT:
infile=gsw_basefile+'-LCS-Zoverlap-BTcut.fits'
else:
infile = gsw_basefile+'-LCS-Zoverlap.fits'
if args.HIdef:
HIdef_file = '/home/rfinn/research/GSWLC/GSWLC-X2-NO-DR10-AGN-Simard2011-tab1-tab3-Tempel-13-2021Jan07-HIdef-2021Aug28-trimmed.fits'
else:
HIdef_file = None
g = gswlc(infile,HIdef_file=HIdef_file)
#g.plot_ms()
#g.plot_field1()
lcsfile = homedir+'/research/LCS/tables/lcs-gswlc-x2-match.fits'
lcsfile = homedir+'/research/LCS/tables/LCS_all_size_KE_SFR_GSWLC2_X2.fits'
lcsfile = homedir+'/research/LCS/tables/LCS-KE-SFR-GSWLC-X2-NO-DR10-AGN-Simard2011-tab1.fits'
lcsfile = homedir+'/research/LCS/tables/LCS-GSWLC-X2-NO-DR10-AGN-Simard2011-tab1-vizier-10arcsec.fits'
# added HI def measurements from A100 catalog
# this adds columns AGC, ..., logMH, logMH_err,
# HIdef_Toribio, HIdef_Boselli, HIdef_Jones
# HIdef_flag - this means it has HI, not that it is deficient!
lcsfile = homedir+'/research/LCS/tables/LCS-GSWLC-X2-NO-DR10-AGN-Simard2011-tab1-vizier-10arcsec-Tempel-Simard-tab3-2021Apr21-HIdef-2021Aug28.fits'
lcs = lcsgsw(lcsfile,cutBT=args.cutBT)
#lcs = lcsgsw('/home/rfinn/research/LCS/tables/LCS_all_size_KE_SFR_GSWLC2_X2.fits',cutBT=args.cutBT)
#lcs.compare_sfrs()
b = comp_lcs_gsw(lcs,g,minmstar=float(args.minmass),cutBT=args.cutBT)
|
rfinn/LCS
|
python/lcs_paper2.py
|
Python
|
gpl-3.0
| 156,757
|
[
"Galaxy"
] |
54a1323c9d2b7fe29b7cc02a2dbd91903fa8779f59351e556f7ab4bddaf8deec
|
# https://leetcode.com/problems/binary-tree-longest-consecutive-sequence/
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
# This algorithm is basically a graph DFS algorithm that traverses the tree
# using **iteration**. In the stack we not only append parent node and its
# children but we also keep track of the length of longest consecutive sequence
# that could be made using that node. By popping it off from stack we know that
# it will be a parent node when adding new items to stack we just have to make
# sure to add the lengths of consecutive sequences correctly.
# When popping a node to visit from stack make sure to update the new max_sofar
# variable that keeps track of len of maximum consecutive sequence.
class Solution(object):
def longestConsecutive(self, root):
"""
:type root: TreeNode
:rtype: int
"""
if not root:
return 0
max_sofar = 0
stack = [[root, 1]]
while stack:
node, length = stack.pop()
max_sofar = max(max_sofar, length)
for child in [node.left, node.right]:
if child:
if child.val == node.val + 1:
l = length + 1
else:
l = 1
stack.append([child, l])
return max_sofar
|
young-geng/leet_code
|
problems/298_binary-tree-longest-consecutive-sequence/main.py
|
Python
|
mit
| 1,474
|
[
"VisIt"
] |
bffa6bc28040f4d184b8658482f0fdfcab1ed80919f2aac8dfed4a719a3ad4cb
|
#----------------------------------
#Lab8.py
#Brian O'Dell
#October 24, 2016
#All that is needed to run this file is to type
#python Lab8.py
#the file needs to be in the current path in the terminal
#
#This program will take a truth table that the user enters
#and construct the appropriate conjunctive normal form,
#and disjunctive normal form expressions that represent
#the given values of the truth table
#
#The program will alert the user if the enter incorrect
#input but it will not account for it or change how it runs
#If invalid input is entered the output will not be correct.
#For the input to be considered correct it must be a 'T'/'t' or 'F'/'f'
table = {'':{'p':'','q':'','r':'','s':''}}
DNF = ''
CNF = ''
for i in range(8):
test = bin(i)[2:].zfill(3)
table[i] = {'p':'T', 'q':'T', 'r':'T', 's':'T'}
x = "p"
y = "q"
z = "r"
a = "p'"
b = "q'"
c = "r'"
#this section adjusts the dictionary to have the proper truth values
#It will then prompt the user to enter the overall truth value and
#will store it in table[i]['s']. It also gets the correct strings
#needed to create the DNF and CNF forms stored in x,y,z and a,b,c respectively
if test[0] == '1':
table[i]['p'] = 'F'
x = "p'"
a = "p"
if test[1] == '1':
table[i]['q'] = 'F'
y = "q'"
b = "q"
if test[2] == '1':
table[i]['r'] ='F'
z = "r'"
c = "r"
print 'p=',table[i]['p'],'q=',table[i]['q'],'r=',table[i]['r'],'Truth value is (T/F): '
table[i]['s'] = str(raw_input())
#this section will test for CNF and DNF forms based on what the
#user previously entered, If they entered T the proper expression
#will be entered into the DNF string and if they entered F the
#proper expression will be entered into the CNF string
if(table[i]['s']=='T' or table[i]['s']=='t'):
DNF += x + y + z + '+'
elif(table[i]['s']=='F' or table[i]['s']=='f'):
CNF += '('+a+'+'+b+'+'+c+')'
else:
print('\nAn invalid input was detected\n')
#end of for
#corrects DNF for trailing '+'
DNF = DNF[:(len(DNF)-1)]
#if no T's or no F's were found one of these will not print
#if invalid input is found then the output will not be correct
#if every input was invalid it is possible nothing will print
if(DNF != ""):
print('DNF expression for the truth table is:')
print(DNF)
if(CNF != ""):
print('CNF expression for the truth table is:')
print(CNF)
|
brian-o/CS-CourseWork
|
CS278/Lab8/Lab8.py
|
Python
|
gpl-3.0
| 2,407
|
[
"Brian"
] |
607f043d28895cf94708b3221c2168babc26f833fef704ac3feaba370ee7507e
|
# -*- coding: utf-8 -*-
"""
pysteps.noise.utils
===================
Miscellaneous utility functions related to generation of stochastic perturbations.
.. autosummary::
:toctree: ../generated/
compute_noise_stddev_adjs
"""
import numpy as np
try:
import dask
dask_imported = True
except ImportError:
dask_imported = False
def compute_noise_stddev_adjs(
R,
R_thr_1,
R_thr_2,
F,
decomp_method,
noise_filter,
noise_generator,
num_iter,
conditional=True,
num_workers=1,
seed=None,
):
"""Apply a scale-dependent adjustment factor to the noise fields used in STEPS.
Simulates the effect of applying a precipitation mask to a Gaussian noise
field obtained by the nonparametric filter method. The idea is to decompose
the masked noise field into a cascade and compare the standard deviations
of each level into those of the observed precipitation intensity field.
This gives correction factors for the standard deviations :cite:`BPS2006`.
The calculations are done for n realizations of the noise field, and the
correction factors are calculated from the average values of the standard
deviations.
Parameters
----------
R: array_like
The input precipitation field, assumed to be in logarithmic units
(dBR or reflectivity).
R_thr_1: float
Intensity threshold for precipitation/no precipitation.
R_thr_2: float
Intensity values below R_thr_1 are set to this value.
F: dict
A bandpass filter dictionary returned by a method defined in
pysteps.cascade.bandpass_filters. This defines the filter to use and
the number of cascade levels.
decomp_method: function
A function defined in pysteps.cascade.decomposition. Specifies the
method to use for decomposing the observed precipitation field and
noise field into different spatial scales.
num_iter: int
The number of noise fields to generate.
conditional: bool
If set to True, compute the statistics conditionally by excluding areas
of no precipitation.
num_workers: int
The number of workers to use for parallel computation. Applicable if
dask is installed.
seed: int
Optional seed number for the random generators.
Returns
-------
out: list
A list containing the standard deviation adjustment factor for each
cascade level.
"""
MASK = R >= R_thr_1
R = R.copy()
R[~np.isfinite(R)] = R_thr_2
R[~MASK] = R_thr_2
if not conditional:
mu, sigma = np.mean(R), np.std(R)
else:
mu, sigma = np.mean(R[MASK]), np.std(R[MASK])
R -= mu
MASK_ = MASK if conditional else None
decomp_R = decomp_method(R, F, mask=MASK_)
if dask_imported and num_workers > 1:
res = []
else:
N_stds = []
randstates = []
seed = None
for k in range(num_iter):
randstates.append(np.random.RandomState(seed=seed))
seed = np.random.randint(0, high=1e9)
for k in range(num_iter):
def worker():
# generate Gaussian white noise field, filter it using the chosen
# method, multiply it with the standard deviation of the observed
# field and apply the precipitation mask
N = noise_generator(noise_filter, randstate=randstates[k], seed=seed)
N = N / np.std(N) * sigma + mu
N[~MASK] = R_thr_2
# subtract the mean and decompose the masked noise field into a
# cascade
N -= mu
decomp_N = decomp_method(N, F, mask=MASK_)
return decomp_N["stds"]
if dask_imported and num_workers > 1:
res.append(dask.delayed(worker)())
else:
N_stds.append(worker())
if dask_imported and num_workers > 1:
N_stds = dask.compute(*res, num_workers=num_workers)
# for each cascade level, compare the standard deviations between the
# observed field and the masked noise field, which gives the correction
# factors
return decomp_R["stds"] / np.mean(np.vstack(N_stds), axis=0)
|
pySTEPS/pysteps
|
pysteps/noise/utils.py
|
Python
|
bsd-3-clause
| 4,181
|
[
"Gaussian"
] |
57f5d9f9c2a856a374ea0c2e96b2074970696bdf09a6e1a839f323eb3f88a475
|
# Copyright (c) 2015 the GPy Austhors (see AUTHORS.txt)
# Licensed under the BSD 3-clause license (see LICENSE.txt)
from .bayesian_gplvm import BayesianGPLVM
class DPBayesianGPLVM(BayesianGPLVM):
"""
Bayesian Gaussian Process Latent Variable Model with Descriminative prior
"""
def __init__(self, Y, input_dim, X_prior, X=None, X_variance=None, init='PCA', num_inducing=10,
Z=None, kernel=None, inference_method=None, likelihood=None,
name='bayesian gplvm', mpi_comm=None, normalizer=None,
missing_data=False, stochastic=False, batchsize=1):
super(DPBayesianGPLVM,self).__init__(Y=Y, input_dim=input_dim, X=X, X_variance=X_variance, init=init, num_inducing=num_inducing, Z=Z, kernel=kernel, inference_method=inference_method, likelihood=likelihood, mpi_comm=mpi_comm, normalizer=normalizer, missing_data=missing_data, stochastic=stochastic, batchsize=batchsize, name='dp bayesian gplvm')
self.X.mean.set_prior(X_prior)
self.link_parameter(X_prior)
|
mikecroucher/GPy
|
GPy/models/dpgplvm.py
|
Python
|
bsd-3-clause
| 1,046
|
[
"Gaussian"
] |
5c4c4e72f7fa63a9f1fe6e1c05dd2266bbaeb4a40c1107c1d9ba2b5b1f62fb80
|
#
# File : keil.py
# This file is part of RT-Thread RTOS
# COPYRIGHT (C) 2006 - 2015, RT-Thread Development Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Change Logs:
# Date Author Notes
# 2017-10-16 Tanek Add CDK IDE support
#
import os
import sys
import string
import xml.etree.ElementTree as etree
from xml.etree.ElementTree import SubElement
from utils import _make_path_relative
from utils import xml_indent
def SDKAddGroup(ProjectFiles, parent, name, files, project_path):
# don't add an empty group
if len(files) == 0:
return
group = SubElement(parent, 'VirtualDirectory', attrib={'Name': name})
for f in files:
fn = f.rfile()
name = fn.name
path = os.path.dirname(fn.abspath)
basename = os.path.basename(path)
path = _make_path_relative(project_path, path)
elm_attr_name = os.path.join(path, name)
file = SubElement(group, 'File', attrib={'Name': elm_attr_name})
return group
def _CDKProject(tree, target, script):
project_path = os.path.dirname(os.path.abspath(target))
root = tree.getroot()
out = open(target, 'w')
out.write('<?xml version="1.0" encoding="UTF-8"?>\n')
CPPPATH = []
CPPDEFINES = []
LINKFLAGS = ''
CCFLAGS = ''
ProjectFiles = []
for child in root:
if child.tag == 'VirtualDirectory':
root.remove(child)
for group in script:
group_tree = SDKAddGroup(ProjectFiles, root, group['name'], group['src'], project_path)
# get each include path
if 'CPPPATH' in group and group['CPPPATH']:
if CPPPATH:
CPPPATH += group['CPPPATH']
else:
CPPPATH += group['CPPPATH']
# get each group's definitions
if 'CPPDEFINES' in group and group['CPPDEFINES']:
if CPPDEFINES:
CPPDEFINES += group['CPPDEFINES']
else:
CPPDEFINES += group['CPPDEFINES']
# get each group's cc flags
if 'CCFLAGS' in group and group['CCFLAGS']:
if CCFLAGS:
CCFLAGS += ' ' + group['CCFLAGS']
else:
CCFLAGS += group['CCFLAGS']
# get each group's link flags
if 'LINKFLAGS' in group and group['LINKFLAGS']:
if LINKFLAGS:
LINKFLAGS += ' ' + group['LINKFLAGS']
else:
LINKFLAGS += group['LINKFLAGS']
# todo: cdk add lib
# write include path, definitions and link flags
text = ';'.join([_make_path_relative(project_path, os.path.normpath(i)) for i in CPPPATH])
IncludePath = tree.find('BuildConfigs/BuildConfig/Compiler/IncludePath')
IncludePath.text = text
IncludePath = tree.find('BuildConfigs/BuildConfig/Asm/IncludePath')
IncludePath.text = text
Define = tree.find('BuildConfigs/BuildConfig/Compiler/Define')
Define.text = ', '.join(set(CPPDEFINES))
CC_Misc = tree.find('BuildConfigs/BuildConfig/Compiler/OtherFlags')
CC_Misc.text = CCFLAGS
LK_Misc = tree.find('BuildConfigs/BuildConfig/Linker/OtherFlags')
LK_Misc.text = LINKFLAGS
xml_indent(root)
out.write(etree.tostring(root, encoding='utf-8'))
out.close()
def CDKProject(target, script):
template_tree = etree.parse('template.cdkproj')
_CDKProject(template_tree, target, script)
|
weiyuliang/rt-thread
|
tools/cdk.py
|
Python
|
apache-2.0
| 4,091
|
[
"CDK"
] |
81fcfea3545077bea7893635b6163f78d67a535c4c2570410a01dbcb87ae6fdd
|
import logging
import os
import shutil
import galaxy.tools
from galaxy import util
from galaxy.datatypes import checkers
from galaxy.tools import parameters
from galaxy.util.expressions import ExpressionContext
from galaxy.web.form_builder import SelectField
from galaxy.tools.actions.upload import UploadToolAction
from tool_shed.util import basic_util
log = logging.getLogger( __name__ )
def build_shed_tool_conf_select_field( app ):
"""Build a SelectField whose options are the keys in app.toolbox.shed_tool_confs."""
options = []
for shed_tool_conf_dict in app.toolbox.shed_tool_confs:
shed_tool_conf_filename = shed_tool_conf_dict[ 'config_filename' ]
if shed_tool_conf_filename != app.config.migrated_tools_config:
if shed_tool_conf_filename.startswith( './' ):
option_label = shed_tool_conf_filename.replace( './', '', 1 )
else:
option_label = shed_tool_conf_filename
options.append( ( option_label, shed_tool_conf_filename ) )
select_field = SelectField( name='shed_tool_conf' )
for option_tup in options:
select_field.add_option( option_tup[ 0 ], option_tup[ 1 ] )
return select_field
def build_tool_panel_section_select_field( app ):
"""Build a SelectField whose options are the sections of the current in-memory toolbox."""
options = []
for k, v in app.toolbox.tool_panel.items():
if isinstance( v, galaxy.tools.ToolSection ):
options.append( ( v.name, v.id ) )
select_field = SelectField( name='tool_panel_section_id', display='radio' )
for option_tup in options:
select_field.add_option( option_tup[ 0 ], option_tup[ 1 ] )
return select_field
def copy_sample_file( app, filename, dest_path=None ):
"""
Copy xxx.sample to dest_path/xxx.sample and dest_path/xxx. The default value for dest_path
is ~/tool-data.
"""
if dest_path is None:
dest_path = os.path.abspath( app.config.tool_data_path )
sample_file_name = basic_util.strip_path( filename )
copied_file = sample_file_name.replace( '.sample', '' )
full_source_path = os.path.abspath( filename )
full_destination_path = os.path.join( dest_path, sample_file_name )
# Don't copy a file to itself - not sure how this happens, but sometimes it does...
if full_source_path != full_destination_path:
# It's ok to overwrite the .sample version of the file.
shutil.copy( full_source_path, full_destination_path )
# Only create the .loc file if it does not yet exist. We don't overwrite it in case it
# contains stuff proprietary to the local instance.
if not os.path.exists( os.path.join( dest_path, copied_file ) ):
shutil.copy( full_source_path, os.path.join( dest_path, copied_file ) )
def copy_sample_files( app, sample_files, tool_path=None, sample_files_copied=None, dest_path=None ):
"""
Copy all appropriate files to dest_path in the local Galaxy environment that have not
already been copied. Those that have been copied are contained in sample_files_copied.
The default value for dest_path is ~/tool-data. We need to be careful to copy only
appropriate files here because tool shed repositories can contain files ending in .sample
that should not be copied to the ~/tool-data directory.
"""
filenames_not_to_copy = [ 'tool_data_table_conf.xml.sample' ]
sample_files_copied = util.listify( sample_files_copied )
for filename in sample_files:
filename_sans_path = os.path.split( filename )[ 1 ]
if filename_sans_path not in filenames_not_to_copy and filename not in sample_files_copied:
if tool_path:
filename=os.path.join( tool_path, filename )
# Attempt to ensure we're copying an appropriate file.
if is_data_index_sample_file( filename ):
copy_sample_file( app, filename, dest_path=dest_path )
def generate_message_for_invalid_tools( app, invalid_file_tups, repository, metadata_dict, as_html=True,
displaying_invalid_tool=False ):
if as_html:
new_line = '<br/>'
bold_start = '<b>'
bold_end = '</b>'
else:
new_line = '\n'
bold_start = ''
bold_end = ''
message = ''
if app.name == 'galaxy':
tip_rev = str( repository.changeset_revision )
else:
tip_rev = str( repository.tip( app ) )
if not displaying_invalid_tool:
if metadata_dict:
message += "Metadata may have been defined for some items in revision '%s'. " % tip_rev
message += "Correct the following problems if necessary and reset metadata.%s" % new_line
else:
message += "Metadata cannot be defined for revision '%s' so this revision cannot be automatically " % tip_rev
message += "installed into a local Galaxy instance. Correct the following problems and reset metadata.%s" % new_line
for itc_tup in invalid_file_tups:
tool_file, exception_msg = itc_tup
if exception_msg.find( 'No such file or directory' ) >= 0:
exception_items = exception_msg.split()
missing_file_items = exception_items[ 7 ].split( '/' )
missing_file = missing_file_items[ -1 ].rstrip( '\'' )
if missing_file.endswith( '.loc' ):
sample_ext = '%s.sample' % missing_file
else:
sample_ext = missing_file
correction_msg = "This file refers to a missing file %s%s%s. " % \
( bold_start, str( missing_file ), bold_end )
correction_msg += "Upload a file named %s%s%s to the repository to correct this error." % \
( bold_start, sample_ext, bold_end )
else:
if as_html:
correction_msg = exception_msg
else:
correction_msg = exception_msg.replace( '<br/>', new_line ).replace( '<b>', bold_start ).replace( '</b>', bold_end )
message += "%s%s%s - %s%s" % ( bold_start, tool_file, bold_end, correction_msg, new_line )
return message
def get_headers( fname, sep, count=60, is_multi_byte=False ):
"""Returns a list with the first 'count' lines split by 'sep'."""
headers = []
for idx, line in enumerate( file( fname ) ):
line = line.rstrip( '\n\r' )
if is_multi_byte:
line = unicode( line, 'utf-8' )
sep = sep.encode( 'utf-8' )
headers.append( line.split( sep ) )
if idx == count:
break
return headers
def get_tool_path_install_dir( partial_install_dir, shed_tool_conf_dict, tool_dict, config_elems ):
for elem in config_elems:
if elem.tag == 'tool':
if elem.get( 'guid' ) == tool_dict[ 'guid' ]:
tool_path = shed_tool_conf_dict[ 'tool_path' ]
relative_install_dir = os.path.join( tool_path, partial_install_dir )
return tool_path, relative_install_dir
elif elem.tag == 'section':
for section_elem in elem:
if section_elem.tag == 'tool':
if section_elem.get( 'guid' ) == tool_dict[ 'guid' ]:
tool_path = shed_tool_conf_dict[ 'tool_path' ]
relative_install_dir = os.path.join( tool_path, partial_install_dir )
return tool_path, relative_install_dir
return None, None
def handle_missing_index_file( app, tool_path, sample_files, repository_tools_tups, sample_files_copied ):
"""
Inspect each tool to see if it has any input parameters that are dynamically
generated select lists that depend on a .loc file. This method is not called
from the tool shed, but from Galaxy when a repository is being installed.
"""
for index, repository_tools_tup in enumerate( repository_tools_tups ):
tup_path, guid, repository_tool = repository_tools_tup
params_with_missing_index_file = repository_tool.params_with_missing_index_file
for param in params_with_missing_index_file:
options = param.options
missing_file_name = basic_util.strip_path( options.missing_index_file )
if missing_file_name not in sample_files_copied:
# The repository must contain the required xxx.loc.sample file.
for sample_file in sample_files:
sample_file_name = basic_util.strip_path( sample_file )
if sample_file_name == '%s.sample' % missing_file_name:
copy_sample_file( app, sample_file )
if options.tool_data_table and options.tool_data_table.missing_index_file:
options.tool_data_table.handle_found_index_file( options.missing_index_file )
sample_files_copied.append( options.missing_index_file )
break
# Reload the tool into the local list of repository_tools_tups.
repository_tool = app.toolbox.load_tool( os.path.join( tool_path, tup_path ), guid=guid )
repository_tools_tups[ index ] = ( tup_path, guid, repository_tool )
return repository_tools_tups, sample_files_copied
def is_column_based( fname, sep='\t', skip=0, is_multi_byte=False ):
"""See if the file is column based with respect to a separator."""
headers = get_headers( fname, sep, is_multi_byte=is_multi_byte )
count = 0
if not headers:
return False
for hdr in headers[ skip: ]:
if hdr and hdr[ 0 ] and not hdr[ 0 ].startswith( '#' ):
if len( hdr ) > 1:
count = len( hdr )
break
if count < 2:
return False
for hdr in headers[ skip: ]:
if hdr and hdr[ 0 ] and not hdr[ 0 ].startswith( '#' ):
if len( hdr ) != count:
return False
return True
def is_data_index_sample_file( file_path ):
"""
Attempt to determine if a .sample file is appropriate for copying to ~/tool-data when
a tool shed repository is being installed into a Galaxy instance.
"""
# Currently most data index files are tabular, so check that first. We'll assume that
# if the file is tabular, it's ok to copy.
if is_column_based( file_path ):
return True
# If the file is any of the following, don't copy it.
if checkers.check_html( file_path ):
return False
if checkers.check_image( file_path ):
return False
if checkers.check_binary( name=file_path ):
return False
if checkers.is_bz2( file_path ):
return False
if checkers.is_gzip( file_path ):
return False
if checkers.check_zip( file_path ):
return False
# Default to copying the file if none of the above are true.
return True
def new_state( trans, tool, invalid=False ):
"""Create a new `DefaultToolState` for the received tool. Only inputs on the first page will be initialized."""
state = galaxy.tools.DefaultToolState()
state.inputs = {}
if invalid:
# We're attempting to display a tool in the tool shed that has been determined to have errors, so is invalid.
return state
inputs = tool.inputs_by_page[ 0 ]
context = ExpressionContext( state.inputs, parent=None )
for input in inputs.itervalues():
try:
state.inputs[ input.name ] = input.get_initial_value( trans, context )
except:
state.inputs[ input.name ] = []
return state
def panel_entry_per_tool( tool_section_dict ):
# Return True if tool_section_dict looks like this.
# {<Tool guid> :
# [{ tool_config : <tool_config_file>,
# id: <ToolSection id>,
# version : <ToolSection version>,
# name : <TooSection name>}]}
# But not like this.
# { id: <ToolSection id>, version : <ToolSection version>, name : <TooSection name>}
if not tool_section_dict:
return False
if len( tool_section_dict ) != 3:
return True
for k, v in tool_section_dict:
if k not in [ 'id', 'version', 'name' ]:
return True
return False
def reload_upload_tools( app ):
if hasattr( app, 'toolbox' ):
for tool_id in app.toolbox.tools_by_id:
tool = app.toolbox.tools_by_id[ tool_id ]
if isinstance( tool.tool_action, UploadToolAction ):
app.toolbox.reload_tool_by_id( tool_id )
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/lib/tool_shed/util/tool_util.py
|
Python
|
gpl-3.0
| 12,468
|
[
"Galaxy"
] |
2292a71b90d7e166596bdfaff66aa1935b8bee22281576f90b9c9b0a7e3e7ba2
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
#******************************************************************************
# ZYNTHIAN PROJECT: Zynthian GUI
#
# Zynthian GUI Step-Sequencer Class
#
# Copyright (C) 2015-2020 Fernando Moyano <jofemodo@zynthian.org>
# Copyright (C) 2015-2020 Brian Walton <brian@riban.co.uk>
#
#******************************************************************************
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of
# the License, or any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# For a full copy of the GNU General Public License see the LICENSE.txt file.
#
#******************************************************************************
import inspect
import sys
import os
import tkinter
import logging
import tkinter.font as tkFont
import json
from xml.dom import minidom
import threading
from time import sleep
from datetime import datetime
import time
import ctypes
from math import ceil
from os.path import dirname, realpath, basename
from zynlibs.zynsmf import zynsmf # Python wrapper for zynsmf (ensures initialised and wraps load() function)
from zynlibs.zynsmf.zynsmf import libsmf # Direct access to shared library
# Zynthian specific modules
from zyngui import zynthian_gui_config
from zyngui import zynthian_gui_layer
from zyngui import zynthian_gui_stepsequencer
from zyngui.zynthian_gui_fileselector import zynthian_gui_fileselector
from zynlibs.zynseq import zynseq
from zynlibs.zynseq.zynseq import libseq
from zynlibs.zynsmf import zynsmf
from zynlibs.zynsmf.zynsmf import libsmf
#------------------------------------------------------------------------------
# Zynthian Step-Sequencer Pattern Editor GUI Class
#------------------------------------------------------------------------------
# Local constants
SELECT_BORDER = zynthian_gui_config.color_on
PLAYHEAD_CURSOR = zynthian_gui_config.color_on
CANVAS_BACKGROUND = zynthian_gui_config.color_panel_bg
CELL_BACKGROUND = zynthian_gui_config.color_panel_bd
CELL_FOREGROUND = zynthian_gui_config.color_panel_tx
GRID_LINE = zynthian_gui_config.color_tx_off
PLAYHEAD_HEIGHT = 5
CONFIG_ROOT = "/zynthian/zynthian-data/zynseq"
# Define encoder use: 0=Layer, 1=Back, 2=Snapshot, 3=Select
ENC_LAYER = 0
ENC_BACK = 1
ENC_SNAPSHOT = 2
ENC_SELECT = 3
EDIT_MODE_NONE = 0 # Edit mode disabled
EDIT_MODE_SINGLE = 1 # Edit mode enabled for selected note
EDIT_MODE_ALL = 2 # Edit mode enabled for all notes
# List of permissible steps per beat
STEPS_PER_BEAT = [1,2,3,4,6,8,12,24]
# Class implements step sequencer pattern editor
class zynthian_gui_patterneditor():
#TODO: Inherit child views from superclass
# Function to initialise class
def __init__(self, parent):
self.parent = parent
os.makedirs(CONFIG_ROOT, exist_ok=True)
self.edit_mode = EDIT_MODE_NONE # Enable encoders to adjust duration and velocity
self.zoom = 16 # Quantity of rows (notes) displayed in grid
self.duration = 1.0 # Current note entry duration
self.velocity = 100 # Current note entry velocity
self.copy_source = 1 # Index of pattern to copy
self.bank = 0 # Bank used for pattern editor sequence player
self.sequence = 0 # Sequence used for pattern editor sequence player
self.step_width = 40 # Grid column width in pixels
self.keymap_offset = 60 # MIDI note number of bottom row in grid
self.selected_cell = [0, 0] # Location of selected cell (column,row)
self.drag_velocity = False # True indicates drag will adjust velocity
self.drag_duration = False # True indicates drag will adjust duration
self.drag_start_velocity = None # Velocity value at start of drag
self.grid_drag_start = None # Coordinates at start of grid drag
self.keymap = [] # Array of {"note":MIDI_NOTE_NUMBER, "name":"key name","colour":"key colour"} name and colour are optional
self.notes = ["C","C#","D","D#","E","F","F#","G","G#","A","A#","B"]
#TODO: Get values from persistent storage
self.shown = False # True when GUI in view
self.zyngui = zynthian_gui_config.zyngui # Zynthian GUI configuration
self.cells = [] # Array of cells indices
self.redraw_pending = 0 # What to redraw: 0=nothing, 1=existing elements, 2=recreate grid
self.title = "Pattern 0"
self.channel = 0
# Geometry vars
self.width=zynthian_gui_config.display_width
self.height=zynthian_gui_config.body_height
self.select_thickness = 1 + int(self.width / 500) # Scale thickness of select border based on screen resolution
self.grid_height = self.height - PLAYHEAD_HEIGHT
self.grid_width = int(self.width * 0.9)
self.piano_roll_width = self.width - self.grid_width
self.update_row_height()
# Main Frame
self.main_frame = tkinter.Frame(self.parent.main_frame)
self.main_frame.grid(row=1, column=0, sticky="nsew")
# Create pattern grid canvas
self.grid_canvas = tkinter.Canvas(self.main_frame,
width=self.grid_width,
height=self.grid_height,
bg=CANVAS_BACKGROUND,
bd=0,
highlightthickness=0)
self.grid_canvas.grid(row=0, column=1)
# Create velocity level indicator canvas
self.velocity_canvas = tkinter.Canvas(self.main_frame,
width=self.piano_roll_width,
height=PLAYHEAD_HEIGHT,
bg=CELL_BACKGROUND,
bd=0,
highlightthickness=0,
)
self.velocity_canvas.create_rectangle(0, 0, self.piano_roll_width * self.velocity / 127, PLAYHEAD_HEIGHT, fill='yellow', tags="velocityIndicator", width=0)
self.velocity_canvas.grid(column=0, row=2)
# Create pianoroll canvas
self.piano_roll = tkinter.Canvas(self.main_frame,
width=self.piano_roll_width,
height=self.grid_height,
bg=CANVAS_BACKGROUND,
bd=0,
highlightthickness=0)
self.piano_roll.grid(row=0, column=0)
self.piano_roll.bind("<ButtonPress-1>", self.on_pianoroll_press)
self.piano_roll.bind("<ButtonRelease-1>", self.on_pianoroll_release)
self.piano_roll.bind("<B1-Motion>", self.on_pianoroll_motion)
# Create playhead canvas
self.play_canvas = tkinter.Canvas(self.main_frame,
width=self.grid_width,
height=PLAYHEAD_HEIGHT,
bg=CANVAS_BACKGROUND,
bd=0,
highlightthickness=0)
self.play_canvas.create_rectangle(0, 0, self.step_width, PLAYHEAD_HEIGHT,
fill=PLAYHEAD_CURSOR,
state="normal",
width=0,
tags="playCursor")
self.play_canvas.grid(column=1, row=2)
self.playhead = 0
# self.startPlayheadHandler()
# Select a cell
self.select_cell(0, self.keymap_offset)
# Function to get name of this view
def get_name(self):
return "pattern editor"
def play_note(self, note):
if libseq.getPlayState(self.bank, self.sequence) == zynthian_gui_stepsequencer.SEQ_STOPPED:
libseq.playNote(note, self.velocity, self.channel, int(200 * self.duration))
#Function to set values of encoders
# note: Call after other routine uses one or more encoders
def setup_encoders(self):
self.parent.register_zyncoder(ENC_BACK, self)
self.parent.register_zyncoder(ENC_SELECT, self)
self.parent.register_zyncoder(ENC_LAYER, self)
self.parent.register_switch(ENC_SELECT, self, "SB")
self.parent.register_switch(ENC_SNAPSHOT, self, "SB")
# Function to show GUI
# params: Pattern parameters to edit {'pattern':x, 'channel':x, 'pad':x (optional)}
def show(self, params=None):
try:
self.channel = params['channel']
self.load_pattern(params['pattern'])
self.title = "Pattern %d" % (params['pattern'])
self.title = "Pattern %d (Seq: %s)" % (params['pattern'], params['name'])
except:
pass # Probably already populated and just returning from menu action or similar
libseq.setGroup(self.bank, self.sequence, 0xFF)
self.copy_source = self.pattern
self.setup_encoders()
self.main_frame.tkraise()
self.parent.set_title(self.title)
libseq.setPlayMode(self.bank, self.sequence, zynthian_gui_stepsequencer.SEQ_LOOP)
libseq.enableMidiInput(True)
self.shown=True
# Function to hide GUI
def hide(self):
self.shown=False
self.parent.unregister_zyncoder(zynthian_gui_stepsequencer.ENC_BACK)
self.parent.unregister_zyncoder(zynthian_gui_stepsequencer.ENC_SELECT)
self.parent.unregister_zyncoder(zynthian_gui_stepsequencer.ENC_LAYER)
self.parent.unregister_switch(zynthian_gui_stepsequencer.ENC_SELECT, "SB")
self.parent.unregister_switch(zynthian_gui_stepsequencer.ENC_SNAPSHOT, "SB")
libseq.setPlayState(self.bank, self.sequence, zynthian_gui_stepsequencer.SEQ_STOPPED)
libseq.enableMidiInput(False)
self.enable_edit(EDIT_MODE_NONE)
libseq.setRefNote(self.keymap_offset)
# Function to add menus
def populate_menu(self):
self.parent.add_menu({'Beats in pattern':{'method':self.parent.show_param_editor, 'params':{'min':1, 'max':16, 'get_value':libseq.getBeatsInPattern, 'on_change':self.on_menu_change, 'on_assert': self.assert_beats_in_pattern}}})
self.parent.add_menu({'Steps per beat':{'method':self.parent.show_param_editor, 'params':{'min':0, 'max':len(STEPS_PER_BEAT)-1, 'get_value':self.get_steps_per_beat_index, 'on_change':self.on_menu_change, 'on_assert':self.assert_steps_per_beat}}})
self.parent.add_menu({'-------------------':{}})
self.parent.add_menu({'Copy pattern':{'method':self.parent.show_param_editor, 'params':{'min':1, 'max':64872, 'get_value':self.get_pattern, 'on_change':self.on_menu_change,'on_assert':self.copy_pattern,'on_cancel':self.cancel_copy}}})
self.parent.add_menu({'Clear pattern':{'method':self.clear_pattern}})
if libseq.getScale():
self.parent.add_menu({'Transpose pattern':{'method':self.parent.show_param_editor, 'params':{'min':-1, 'max':1, 'value':0, 'on_change':self.on_menu_change}}})
self.parent.add_menu({'-------------------':{}})
self.parent.add_menu({'Vertical zoom':{'method':self.parent.show_param_editor, 'params':{'min':1, 'max':127, 'get_value':self.get_vertical_zoom, 'on_change':self.on_menu_change, 'on_assert':self.assert_zoom}}})
self.parent.add_menu({'Scale':{'method':self.parent.show_param_editor, 'params':{'min':0, 'max':self.get_scales(), 'get_value':libseq.getScale, 'on_change':self.on_menu_change}}})
self.parent.add_menu({'Tonic':{'method':self.parent.show_param_editor, 'params':{'min':-1, 'max':12, 'get_value':libseq.getTonic, 'on_change':self.on_menu_change}}})
self.parent.add_menu({'Rest note':{'method':self.parent.show_param_editor, 'params':{'min':-1, 'max':128, 'get_value':libseq.getInputRest, 'on_change':self.on_menu_change}}})
self.parent.add_menu({'Program change':{'method':self.parent.show_param_editor, 'params':{'min':0, 'max':128, 'get_value':self.get_program_change, 'on_change':self.on_menu_change}}})
self.parent.add_menu({'Input channel':{'method':self.parent.show_param_editor, 'params':{'min':0, 'max':16, 'get_value':self.get_input_channel, 'on_change':self.on_menu_change}}})
self.parent.add_menu({'Export to SMF':{'method':self.export_smf}})
# Function to export pattern to SMF
def export_smf(self, params):
smf = libsmf.addSmf()
tempo = libseq.getTempo()
libsmf.addTempo(smf, 0, tempo)
ticks_per_step = libsmf.getTicksPerQuarterNote(smf) / libseq.getStepsPerBeat()
for step in range(libseq.getSteps()):
time = int(step * ticks_per_step)
for note in range(128):
duration = libseq.getNoteDuration(step, note)
if duration == 0.0:
continue
duration = int(duration * ticks_per_step)
velocity = libseq.getNoteVelocity(step, note)
libsmf.addNote(smf, 0, time, duration, self.channel, note, velocity)
libsmf.setEndOfTrack(smf, 0, int(libseq.getSteps() * ticks_per_step))
zynsmf.save(smf, "/zynthian/zynthian-my-data/capture/pattern%d_%s.mid"%(self.pattern, datetime.now()))
# Function to set edit mode
def enable_edit(self, mode):
if mode <= EDIT_MODE_ALL:
self.edit_mode = mode
if mode:
self.parent.register_switch(ENC_BACK, self)
self.parent.register_zyncoder(ENC_SNAPSHOT, self)
self.parent.register_zyncoder(ENC_LAYER, self)
if mode == EDIT_MODE_SINGLE:
self.parent.set_title("Note Parameters", zynthian_gui_config.color_header_bg, zynthian_gui_config.color_panel_tx)
else:
self.parent.set_title("Note Parameters ALL", zynthian_gui_config.color_header_bg, zynthian_gui_config.color_panel_tx)
else:
self.parent.unregister_switch(ENC_BACK)
self.parent.unregister_zyncoder(ENC_SNAPSHOT)
self.parent.unregister_zyncoder(ENC_LAYER)
self.parent.set_title(self.title, zynthian_gui_config.color_panel_tx, zynthian_gui_config.color_header_bg)
# Function to assert steps per beat
def assert_steps_per_beat(self):
self.zyngui.show_confirm("Changing steps per beat may alter timing and/or lose notes?", self.do_steps_per_beat)
# Function to actually change steps per beat
def do_steps_per_beat(self, params=None):
libseq.setStepsPerBeat(STEPS_PER_BEAT[self.parent.get_param('Steps per beat', 'value')])
self.redraw_pending = 2
# Function to assert beats in pattern
def assert_beats_in_pattern(self):
value = self.parent.get_param('Beats in pattern', 'value')
if libseq.getLastStep() >= libseq.getStepsPerBeat() * value:
self.zyngui.show_confirm("Reducing beats in pattern will truncate pattern", self.set_beats_in_pattern)
else:
self.set_beats_in_pattern()
# Function to assert beats in pattern
def set_beats_in_pattern(self, params=None):
libseq.setBeatsInPattern(self.parent.get_param('Beats in pattern', 'value'))
self.redraw_pending = 2
# Function to get the index of the closest steps per beat in array of allowed values
# returns: Index of closest allowed value
def get_steps_per_beat_index(self):
steps_per_beat = libseq.getStepsPerBeat()
for index in range(len(STEPS_PER_BEAT)):
if STEPS_PER_BEAT[index] >= steps_per_beat:
return index
return index
# Function to get quantity of scales
# returns: Quantity of available scales
def get_scales(self):
data = []
try:
with open(CONFIG_ROOT + "/scales.json") as json_file:
data = json.load(json_file)
except:
logging.warning("Unable to open scales.json")
return len(data)
# Function to assert zoom level
def assert_zoom(self):
self.update_row_height()
self.redraw_pending = 2
self.select_cell()
# Function to populate keymap array
# returns Name of scale / map
def load_keymap(self):
try:
base_note = int(self.keymap[self.keymap_offset]['note'])
except:
base_note = 60
scale = libseq.getScale()
tonic = libseq.getTonic()
name = None
self.keymap = []
if scale == 0:
# Map
path = None
for layer in self.zyngui.screens['layer'].layers:
if layer.midi_chan == self.channel:
path = layer.get_presetpath()
break
if path:
path = path.split('#')[1]
try:
with open(CONFIG_ROOT + "/keymaps.json") as json_file:
data = json.load(json_file)
if path in data:
name = data[path]
xml = minidom.parse(CONFIG_ROOT + "/%s.midnam" % (name))
notes = xml.getElementsByTagName('Note')
self.scale = []
for note in notes:
self.keymap.append({'note':int(note.attributes['Number'].value), 'name':note.attributes['Name'].value})
except:
logging.warning("Unable to load keymaps.json")
if name == None: # Not found map
# Scale
if scale > 0:
scale = scale - 1
libseq.setScale(scale + 1) # Use chromatic scale if map not found
if scale == 0:
for note in range(0,128):
new_entry = {"note":note}
key = note % 12
if key in (1,3,6,8,10): # Black notes
new_entry.update({"colour":"black"})
if key == 0: # 'C'
new_entry.update({"name":"C%d" % (note // 12 - 1)})
self.keymap.append(new_entry)
if note <= base_note:
self.keymap_offset = len(self.keymap) - 1
self.selected_cell[1] = self.keymap_offset
name = "Chromatic"
else:
with open(CONFIG_ROOT + "/scales.json") as json_file:
data = json.load(json_file)
if len(data) <= scale:
scale = 0
for octave in range(0,9):
for offset in data[scale]['scale']:
note = tonic + offset + octave * 12
if note > 127:
break
self.keymap.append({"note":note, "name":"%s%d"%(self.notes[note % 12],note // 12 - 1)})
if note <= base_note:
self.keymap_offset = len(self.keymap) - 1
self.selected_cell[1] = self.keymap_offset
name = data[scale]['name']
return name
# Function to handle start of pianoroll drag
def on_pianoroll_press(self, event):
if self.parent.lst_menu.winfo_viewable():
self.parent.hide_menu()
return
self.piano_roll_drag_start = event
index = self.keymap_offset + self.zoom - int(event.y / self.row_height) - 1
if index >= len(self.keymap):
return
note = self.keymap[index]['note']
libseq.playNote(note, 100, self.channel, 200)
# Function to handle pianoroll drag motion
def on_pianoroll_motion(self, event):
if not self.piano_roll_drag_start:
return
offset = int((event.y - self.piano_roll_drag_start.y) / self.row_height)
if offset == 0:
return
self.keymap_offset = self.keymap_offset + offset
if self.keymap_offset < 0:
self.keymap_offset = 0
if self.keymap_offset > len(self.keymap) - self.zoom:
self.keymap_offset = len(self.keymap) - self.zoom
self.piano_roll_drag_start = event
self.redraw_pending = 1
if self.selected_cell[1] < self.keymap_offset:
self.selected_cell[1] = self.keymap_offset
elif self.selected_cell[1] >= self.keymap_offset + self.zoom:
self.selected_cell[1] = self.keymap_offset + self.zoom - 1
self.select_cell()
# Function to handle end of pianoroll drag
def on_pianoroll_release(self, event):
self.piano_roll_drag_start = None
# Function to handle grid mouse down
# event: Mouse event
def on_grid_press(self, event):
if self.parent.lst_menu.winfo_viewable():
self.parent.hide_menu()
return
if self.parent.param_editor_item != None:
self.parent.hide_param_editor()
return
self.grid_drag_start = event
try:
col,row = self.grid_canvas.gettags(self.grid_canvas.find_withtag(tkinter.CURRENT))[0].split(',')
except:
return
note = self.keymap[self.keymap_offset + int(row)]["note"]
step = int(col)
if step < 0 or step >= libseq.getSteps():
return
self.drag_start_velocity = libseq.getNoteVelocity(step, note)
self.drag_start_duration = libseq.getNoteDuration(step, note)
self.drag_start_step = int(event.x / self.step_width)
if not self.drag_start_velocity:
self.play_note(note)
self.select_cell(int(col), self.keymap_offset + int(row))
# Function to handle grid mouse release
# event: Mouse event
def on_grid_release(self, event):
if not self.grid_drag_start:
return
if not (self.drag_velocity or self.drag_duration):
self.toggle_event(self.selected_cell[0], self.selected_cell[1])
self.drag_velocity = False
self.drag_duration = False
self.grid_drag_start = None
# Function to handle grid mouse drag
# event: Mouse event
def on_grid_drag(self, event):
if not self.grid_drag_start:
return
step = self.selected_cell[0]
index = self.selected_cell[1]
note = self.keymap[index]['note']
if self.drag_start_velocity:
# Selected cell has a note so we want to adjust its velocity or duration
if not self.drag_velocity and not self.drag_duration and (event.x > (self.drag_start_step + 1) * self.step_width or event.x < self.drag_start_step * self.step_width):
self.drag_duration = True
if not self.drag_duration and not self.drag_velocity and (event.y > self.grid_drag_start.y + self.row_height / 2 or event.y < self.grid_drag_start.y - self.row_height / 2):
self.drag_velocity = True
value = 0
if self.drag_velocity:
value = (self.grid_drag_start.y - event.y) / self.row_height
if value:
self.velocity = int(self.drag_start_velocity + value * self.height / 100)
if self.velocity > 127:
self.velocity = 127
return
if self.velocity < 1:
self.velocity = 1
return
self.velocity_canvas.coords("velocityIndicator", 0, 0, self.piano_roll_width * self.velocity / 127, PLAYHEAD_HEIGHT)
if libseq.getNoteDuration(self.selected_cell[0], note):
libseq.setNoteVelocity(self.selected_cell[0], note, self.velocity)
self.draw_cell(self.selected_cell[0], index)
if self.drag_duration:
value = int(event.x / self.step_width) - self.drag_start_step
duration = self.drag_start_duration + value
if duration != self.duration and duration > 0:
self.duration = duration
self.add_event(step, index) # Change length by adding event over previous one
else:
# Clicked on empty cell so want to add a new note by dragging towards the desired cell
x1 = self.selected_cell[0] * self.step_width # x pos of start of event
x3 = (self.selected_cell[0] + 1) * self.step_width # x pos right of event's first cell
y1 = self.grid_height - (self.selected_cell[1] - self.keymap_offset) * self.row_height # y pos of top of selected row
y2 = self.grid_height - (self.selected_cell[1] - self.keymap_offset + 1) * self.row_height # y pos of bottom of selected row
if event.x < x1:
self.select_cell(self.selected_cell[0] - 1, None)
elif event.x > x3:
self.select_cell(self.selected_cell[0] + 1, None)
elif event.y < y2:
self.select_cell(None, self.selected_cell[1] + 1)
self.play_note(self.keymap[self.selected_cell[1]]["note"])
elif event.y > y1:
self.select_cell(None, self.selected_cell[1] - 1)
self.play_note(self.keymap[self.selected_cell[1]]["note"])
# Function to toggle note event
# step: step (column) index
# index: key map index
# Returns: Note if note added else None
def toggle_event(self, step, index):
if step < 0 or step >= libseq.getSteps() or index >= len(self.keymap):
return
note = self.keymap[index]['note']
if libseq.getNoteVelocity(step, note):
self.remove_event(step, index)
else:
self.add_event(step, index)
return note
# Function to remove an event
# step: step (column) index
# index: keymap index
def remove_event(self, step, index):
if index >= len(self.keymap):
return
note = self.keymap[index]['note']
libseq.removeNote(step, note)
libseq.playNote(note, 0, self.channel) # Silence note if sounding
self.draw_row(index)
self.select_cell(step, index)
# Function to add an event
# step: step (column) index
# index: keymap index
def add_event(self, step, index):
note = self.keymap[index]["note"]
libseq.addNote(step, note, self.velocity, self.duration)
self.draw_row(index)
self.select_cell(step, index)
# Function to draw a grid row
# index: keymap index
def draw_row(self, index):
row = index - self.keymap_offset
self.grid_canvas.itemconfig("lastnotetext%d" % (row), state="hidden")
for step in range(libseq.getSteps()):
self.draw_cell(step, row)
# Function to get cell coordinates
# col: Column index
# row: Row index
# duration: Duration of cell in steps
# return: Coordinates required to draw cell
def get_cell(self, col, row, duration):
x1 = col * self.step_width + 1
y1 = (self.zoom - row - 1) * self.row_height + 1
x2 = x1 + self.step_width * duration - 1
y2 = y1 + self.row_height - 1
return [x1, y1, x2, y2]
# Function to draw a grid cell
# step: Step (column) index
# row: Index of row
def draw_cell(self, step, row):
libseq.isPatternModified() # Avoid refresh redrawing whole grid
cellIndex = row * libseq.getSteps() + step # Cells are stored in array sequentially: 1st row, 2nd row...
if cellIndex >= len(self.cells):
return
note = self.keymap[row + self.keymap_offset]["note"]
velocity_colour = libseq.getNoteVelocity(step, note)
if velocity_colour:
velocity_colour = 70 + velocity_colour
elif libseq.getScale() == 1:
# Draw tramlines for white notes in chromatic scale
key = note % 12
if key in (0,2,4,5,7,9,11): # White notes
# if key in (1,3,6,8,10): # Black notes
velocity_colour += 30
else:
# Draw tramlines for odd rows in other scales and maps
if (row + self.keymap_offset) % 2:
velocity_colour += 30
duration = libseq.getNoteDuration(step, note)
if not duration:
duration = 1.0
fill_colour = "#%02x%02x%02x" % (velocity_colour, velocity_colour, velocity_colour)
cell = self.cells[cellIndex]
coord = self.get_cell(step, row, duration)
if cell:
# Update existing cell
self.grid_canvas.itemconfig(cell, fill=fill_colour)
self.grid_canvas.coords(cell, coord)
else:
# Create new cell
cell = self.grid_canvas.create_rectangle(coord, fill=fill_colour, width=0, tags=("%d,%d"%(step,row), "gridcell", "step%d"%step))
self.grid_canvas.tag_bind(cell, '<ButtonPress-1>', self.on_grid_press)
self.grid_canvas.tag_bind(cell, '<ButtonRelease-1>', self.on_grid_release)
self.grid_canvas.tag_bind(cell, '<B1-Motion>', self.on_grid_drag)
self.cells[cellIndex] = cell
if step + duration > libseq.getSteps():
self.grid_canvas.itemconfig("lastnotetext%d" % row, text="+%d" % (duration - libseq.getSteps() + step), state="normal")
# Function to draw grid
def draw_grid(self):
clear_grid = (self.redraw_pending == 2)
self.redraw_pending = 0
if libseq.getSteps() == 0:
return #TODO: Should we clear grid?
if self.keymap_offset > len(self.keymap) - self.zoom:
self.keymap_offset = len(self.keymap) - self.zoom
if self.keymap_offset < 0:
self.keymap_offset = 0
font = tkFont.Font(family=zynthian_gui_config.font_topbar[0], size=self.fontsize)
if clear_grid:
self.grid_canvas.delete(tkinter.ALL)
self.step_width = (self.grid_width - 2) / libseq.getSteps()
self.draw_pianoroll()
self.cells = [None] * self.zoom * libseq.getSteps()
self.play_canvas.coords("playCursor", 1 + self.playhead * self.step_width, 0, 1 + self.playhead * self.step_width + self.step_width, PLAYHEAD_HEIGHT)
# Draw cells of grid
self.grid_canvas.itemconfig("gridcell", fill="black")
# Redraw gridlines
self.grid_canvas.delete("gridline")
if libseq.getStepsPerBeat():
for step in range(0, libseq.getSteps() + 1, libseq.getStepsPerBeat()):
self.grid_canvas.create_line(step * self.step_width, 0, step * self.step_width, self.zoom * self.row_height - 1, fill=GRID_LINE, tags=("gridline"))
# Delete existing note names
self.piano_roll.delete("notename")
for row in range(0, self.zoom):
index = row + self.keymap_offset
if(index >= len(self.keymap)):
break
if clear_grid:
# Create last note labels in grid
self.grid_canvas.create_text(self.grid_width - self.select_thickness, int(self.row_height * (self.zoom - row - 0.5)), state="hidden", tags=("lastnotetext%d" % (row), "lastnotetext"), font=font, anchor="e")
self.draw_row(index)
# Update pianoroll keys
id = "row%d" % (row)
try:
name = self.keymap[index]["name"]
except:
name = None
try:
colour = self.keymap[index]["colour"]
except:
colour = "white"
self.piano_roll.itemconfig(id, fill=colour)
if name:
self.piano_roll.create_text((2, self.row_height * (self.zoom - row - 0.5)), text=name, font=font, anchor="w", fill=CANVAS_BACKGROUND, tags="notename")
if self.keymap[index]['note'] % 12 == libseq.getTonic():
self.grid_canvas.create_line(0, (self.zoom - row) * self.row_height, self.grid_width, (self.zoom - row) * self.row_height, fill=GRID_LINE, tags=("gridline"))
# Set z-order to allow duration to show
if clear_grid:
for step in range(libseq.getSteps()):
self.grid_canvas.tag_lower("step%d"%step)
self.select_cell()
# Function to draw pianoroll key outlines (does not fill key colour)
def draw_pianoroll(self):
self.piano_roll.delete(tkinter.ALL)
for row in range(self.zoom):
x1 = 0
y1 = self.get_cell(0, row, 1)[1]
x2 = self.piano_roll_width
y2 = y1 + self.row_height - 1
id = "row%d" % (row)
id = self.piano_roll.create_rectangle(x1, y1, x2, y2, width=0, tags=id)
# Function to update selectedCell
# step: Step (column) of selected cell (Optional - default to reselect current column)
# index: Index of keymap to select (Optional - default to reselect current row) Maybe outside visible range to scroll display
def select_cell(self, step=None, index=None):
if len(self.keymap) == 0:
return
redraw = False
if step == None:
step = self.selected_cell[0]
if index == None:
index = self.selected_cell[1]
if step < 0:
step = 0
if step >= libseq.getSteps():
step = libseq.getSteps() - 1
if index >= len(self.keymap):
index = len(self.keymap) - 1
if index >= self.keymap_offset + self.zoom:
# Note is off top of display
self.keymap_offset = index - self.zoom + 1
redraw = True
if index < 0:
index = 0
if index < self.keymap_offset:
# Note is off bottom of display
self.keymap_offset = index
redraw = True
if redraw:
self.redraw_pending = 1
row = index - self.keymap_offset
note = self.keymap[index]['note']
# Skip hidden (overlapping) cells
for previous in range(int(step) - 1, -1, -1):
prev_duration = ceil(libseq.getNoteDuration(previous, note))
if not prev_duration:
continue
if prev_duration > step - previous:
if step > self.selected_cell[0]:
step = previous + prev_duration
else:
step = previous
break
if step < 0:
step = 0
if step >= libseq.getSteps():
step = libseq.getSteps() - 1
self.selected_cell = [int(step), index]
cell = self.grid_canvas.find_withtag("selection")
duration = libseq.getNoteDuration(step, row)
if not duration:
duration = self.duration
coord = self.get_cell(step, row, duration)
coord[0] = coord[0] - 1
coord[1] = coord[1] - 1
coord[2] = coord[2]
coord[3] = coord[3]
if not cell:
cell = self.grid_canvas.create_rectangle(coord, fill="", outline=SELECT_BORDER, width=self.select_thickness, tags="selection")
else:
self.grid_canvas.coords(cell, coord)
self.grid_canvas.tag_raise(cell)
# Function to calculate row height
def update_row_height(self):
self.row_height = (self.grid_height - 2) / self.zoom
self.fontsize = int(self.row_height * 0.5)
if self.fontsize > 20:
self.fontsize = 20 # Ugly font scale limiting
# Function to clear a pattern
def clear_pattern(self, params=None):
self.zyngui.show_confirm("Clear pattern %d?"%(self.pattern), self.do_clear_pattern)
# Function to actually clear pattern
def do_clear_pattern(self, params=None):
libseq.clear()
self.redraw_pending = 2
self.select_cell()
if libseq.getPlayState(self.bank, self.sequence, 0) != zynthian_gui_stepsequencer.SEQ_STOPPED:
libseq.sendMidiCommand(0xB0 | self.channel, 123, 0) # All notes off
# Function to copy pattern
def copy_pattern(self):
if libseq.getLastStep() == -1:
self.do_copy_pattern(self.pattern)
else:
self.zyngui.show_confirm("Overwrite pattern %d with content from pattern %d?"%(self.pattern, self.copy_source), self.do_copy_pattern, self.pattern)
self.load_pattern(self.copy_source)
# Function to cancel copy pattern operation
def cancel_copy(self):
self.load_pattern(self.copy_source)
# Function to actually copy pattern
def do_copy_pattern(self, dest_pattern):
libseq.copyPattern(self.copy_source, dest_pattern)
self.pattern = dest_pattern
self.load_pattern(self.pattern)
self.copy_source = self.pattern
self.parent.arranger.pattern = self.pattern
self.parent.arranger.pattern_canvas.itemconfig("patternIndicator", text="%d"%(self.pattern))
self.title = "Pattern %d" % (self.pattern)
self.parent.set_title(self.title)
# Function to get pattern index
def get_pattern(self):
return self.pattern
# Function to get program change at start of pattern
# returns: Program change number (1..128) or 0 for none
def get_program_change(self):
program = libseq.getProgramChange(0) + 1
if program > 128:
program = 0
return program
# Function to get MIDI channel listening
# returns: MIDI channel ((1..16) or 0 for none
def get_input_channel(self):
channel = libseq.getInputChannel() + 1
if channel > 16:
channel = 0
return channel
# Function to get vertical zoom
def get_vertical_zoom(self):
return self.zoom
# Function to handle menu editor change
# params: Menu item's parameters
# returns: String to populate menu editor label
# note: params is a dictionary with required fields: min, max, value
def on_menu_change(self, params):
menu_item = self.parent.param_editor_item
value = params['value']
if value < params['min']:
value = params['min']
if value > params['max']:
value = params['max']
params['value'] = value
if menu_item == 'Pattern':
self.pattern = value
self.copy_source = value
self.load_pattern(value)
elif menu_item =='Copy pattern':
self.load_pattern(value)
return "Copy %d => %d ?" % (self.copy_source, value)
elif menu_item == 'Transpose pattern':
if libseq.getScale() == 0:
self.parent.hide_param_editor()
return
if libseq.getScale() > 1:
# Only allow transpose when showing chromatic scale
libseq.setScale(1)
self.load_keymap()
self.redraw_pending = 1
if (value != 0 and libseq.getScale()):
libseq.transpose(value)
self.parent.set_param(menu_item, 'value', 0)
self.keymap_offset = self.keymap_offset + value
if self.keymap_offset > 128 - self.zoom:
self.keymap_offset = 128 - self.zoom
elif self.keymap_offset < 0:
self.keymap_offset = 0
else:
self.selected_cell[1] = self.selected_cell[1] + value
self.redraw_pending = 1
self.select_cell()
return "Transpose +/-"
elif menu_item == 'Vertical zoom':
self.zoom = value
elif menu_item == 'Steps per beat':
steps_per_beat = STEPS_PER_BEAT[value]
# libseq.setStepsPerBeat(steps_per_beat)
self.redraw_pending = 2
value = steps_per_beat
elif menu_item == 'Scale':
libseq.setScale(value)
name = self.load_keymap()
self.redraw_pending = 1
return "Keymap: %s" % (name)
elif menu_item == 'Tonic':
if value < 0:
value = 11
if value > 11:
value = 0
self.parent.set_param('Tonic', 'value', value)
offset = value - libseq.getTonic()
libseq.setTonic(value)
self.load_keymap()
self.redraw_pending = 1
return "Tonic: %s" % (self.notes[value])
elif menu_item == 'Program change':
if value == 0:
libseq.removeProgramChange(0)
return 'Program change: None'
else:
libseq.addProgramChange(0, value - 1)
elif menu_item == 'Input channel':
if value == 0:
libseq.setInputChannel(0xFF)
return 'Input channel: None'
libseq.setInputChannel(value - 1)
elif menu_item == 'Rest note':
if value < 0 or value > 127:
value = 128
libseq.setInputRest(value)
if value > 127:
return "Rest note: None"
return "Rest note: %s%d(%d)" % (['C','C#','D','D#','E','F','F#','G','G#','A','A#','B'][value%12],int(value/12)-1, value)
return "%s: %d" % (menu_item, value)
# Function to load new pattern
# index: Pattern index
def load_pattern(self, index):
libseq.clearSequence(self.bank, self.sequence)
libseq.setChannel(self.bank, self.sequence, 0, self.channel)
self.pattern = index
libseq.selectPattern(index)
libseq.addPattern(self.bank, self.sequence, 0, 0, index)
if self.selected_cell[0] >= libseq.getSteps():
self.selected_cell[0] = int(libseq.getSteps()) - 1
self.keymap_offset = libseq.getRefNote()
self.load_keymap()
self.redraw_pending = 2
self.select_cell(0, int(self.keymap_offset + self.zoom / 2))
self.play_canvas.coords("playCursor", 1, 0, 1 + self.step_width, PLAYHEAD_HEIGHT)
# Function to refresh display
def refresh_status(self):
step = libseq.getPatternPlayhead(self.bank, self.sequence, 0)
if self.playhead != step:
self.playhead = step
self.play_canvas.coords("playCursor", 1 + self.playhead * self.step_width, 0, 1 + self.playhead * self.step_width + self.step_width, PLAYHEAD_HEIGHT)
if self.redraw_pending or libseq.isPatternModified():
self.draw_grid()
# Function to handle zyncoder value change
# encoder: Zyncoder index [0..4]
# value: Current value of zyncoder
def on_zyncoder(self, encoder, value):
if encoder == ENC_BACK:
if self.edit_mode == EDIT_MODE_SINGLE:
self.velocity = self.velocity + value
if self.velocity > 127:
self.velocity = 127
return
if self.velocity < 1:
self.velocity = 1
return
self.velocity_canvas.coords("velocityIndicator", 0, 0, self.piano_roll_width * self.velocity / 127, PLAYHEAD_HEIGHT)
note = self.keymap[self.selected_cell[1]]["note"]
if libseq.getNoteDuration(self.selected_cell[0], note):
libseq.setNoteVelocity(self.selected_cell[0], note, self.velocity)
self.draw_cell(self.selected_cell[0], self.selected_cell[1])
self.parent.set_title("Velocity: %d" % (self.velocity), None, None, 2)
elif self.edit_mode == EDIT_MODE_ALL:
libseq.changeVelocityAll(value)
self.parent.set_title("ALL Velocity", None, None, 2)
else:
self.select_cell(None, self.selected_cell[1] - value)
elif encoder == ENC_SELECT:
if self.edit_mode == EDIT_MODE_SINGLE:
if value > 0:
self.duration = self.duration + 1
if value < 0:
self.duration = self.duration - 1
if self.duration > libseq.getSteps():
self.duration = libseq.getSteps()
return
if self.duration < 1:
self.duration = 1
return
note = self.keymap[self.selected_cell[1]]["note"]
if libseq.getNoteDuration(self.selected_cell[0], note):
self.add_event(self.selected_cell[0], note)
else:
self.select_cell()
self.parent.set_title("Duration: %0.1f steps" % (self.duration), None, None, 2)
elif self.edit_mode == EDIT_MODE_ALL:
if value > 0:
libseq.changeDurationAll(1)
if value < 0:
libseq.changeDurationAll(-1)
self.parent.set_title("ALL DURATION", None, None, 2)
else:
self.select_cell(self.selected_cell[0] + value, None)
elif encoder == ENC_SNAPSHOT:
if self.edit_mode == EDIT_MODE_SINGLE:
if value > 0:
self.duration = self.duration + 0.1
if value < 0:
self.duration = self.duration - 0.1
if self.duration > libseq.getSteps():
self.duration = libseq.getSteps()
return
if self.duration < 0.1:
self.duration = 0.1
return
note = self.keymap[self.selected_cell[1]]["note"]
if libseq.getNoteDuration(self.selected_cell[0], note):
self.add_event(self.selected_cell[0], note)
else:
self.select_cell()
self.parent.set_title("Duration: %0.1f steps" % (self.duration), None, None, 2)
elif self.edit_mode == EDIT_MODE_ALL:
if value > 0:
libseq.changeDurationAll(0.1)
if value < 0:
libseq.changeDurationAll(-0.1)
self.parent.set_title("ALL DURATION", None, None, 2)
# elif encoder == ENC_LAYER and not self.parent.lst_menu.winfo_viewable():
# Show menu
# self.parent.toggle_menu()
# return
# Function to handle switch press
# switch: Switch index [0=Layer, 1=Back, 2=Snapshot, 3=Select]
# type: Press type ["S"=Short, "B"=Bold, "L"=Long]
# returns True if action fully handled or False if parent action should be triggered
def on_switch(self, switch, type):
if self.parent.lst_menu.winfo_viewable():
return False
if self.parent.param_editor_item:
return False
if switch == ENC_SELECT:
if type == "S":
if self.edit_mode:
self.enable_edit(EDIT_MODE_NONE)
return True
note = self.toggle_event(self.selected_cell[0], self.selected_cell[1])
if note:
self.play_note(note)
elif type == "B":
if self.edit_mode == EDIT_MODE_NONE:
self.enable_edit(EDIT_MODE_SINGLE)
else:
self.enable_edit(EDIT_MODE_ALL)
return True
elif switch == ENC_SNAPSHOT:
if type == "B":
libseq.setTransportToStartOfBar()
return True
if libseq.getPlayState(self.bank, self.sequence) == zynthian_gui_stepsequencer.SEQ_STOPPED:
libseq.setPlayState(self.bank, self.sequence, zynthian_gui_stepsequencer.SEQ_STARTING)
else:
libseq.setPlayState(self.bank, self.sequence, zynthian_gui_stepsequencer.SEQ_STOPPED)
return True
elif switch == ENC_BACK:
self.enable_edit(EDIT_MODE_NONE)
return True
return False
#------------------------------------------------------------------------------
|
zynthian/zynthian-ui
|
zyngui/zynthian_gui_patterneditor.py
|
Python
|
gpl-3.0
| 40,318
|
[
"Brian"
] |
4c045a93b20811c5aac5276d78fcc6dcc37e9ba500747581cbf39d34242cbad2
|
import numpy as np
try:
from scipy import constants
C_LIGHT = constants.c/1000.0
except TypeError: # This can happen during documentation builds.
C_LIGHT = 299792458.0/1000.0
# code to make mock Lya spectra following McDonald et al. (2006)
# copied from c++ code in Cosmology/LNLyaF
def power_amplitude(z):
"""Add redshift evolution to the Gaussian power spectrum."""
return 58.6*pow((1+z)/4.0,-2.82)
def power_kms(z_c,k_kms,dv_kms,white_noise):
"""Return Gaussian P1D at different wavenumbers k_kms (in s/km), fixed z_c.
Other arguments:
dv_kms: if non-zero, will multiply power by top-hat kernel of this width
white_noise: if set to True, will use constant power of 100 km/s
"""
if white_noise: return np.ones_like(k_kms)*100.0
# power used to make mocks in from McDonald et al. (2006)
A = power_amplitude(z_c)
k1 = 0.001
n = 0.7
R1 = 5.0
# compute term without smoothing
P = A * (1.0+pow(0.01/k1,n)) / (1.0+pow(k_kms/k1,n))
# smooth with Gaussian and top hat
kdv = np.fmax(k_kms*dv_kms,0.000001)
P *= np.exp(-pow(k_kms*R1,2)) * pow(np.sin(kdv/2)/(kdv/2),2)
return P
def get_tau(z,density):
"""transform lognormal density to optical depth, at each z"""
# add redshift evolution to mean optical depth
A = 0.374*pow((1+z)/4.0,5.10)
return A*density
class MockMaker(object):
"""Class to generate 1D mock Lyman alpha skewers."""
# central redshift, sets center of skewer and pivot point in z-evolution
z_c=3.0
def __init__(self, N2=15, dv_kms=10.0, seed=666, white_noise=False):
"""Construct object to make 1D Lyman alpha mocks.
Optional arguments:
N2: the number of cells in the skewer will be 2^N2
dv_kms: cell width (in km/s)
seed: starting seed for the random number generator
white_noise: use constant power instead of realistic P1D. """
self.N = np.power(2,N2)
self.dv_kms = dv_kms
# setup random number generator using seed
self.gen = np.random.RandomState(seed)
self.white_noise = white_noise
def get_density(self,var_delta,z,delta):
"""Transform Gaussian field delta to lognormal density, at each z."""
tau_pl=2.0
# relative amplitude
rel_amp = power_amplitude(z)/power_amplitude(self.z_c)
return np.exp(tau_pl*(delta*np.sqrt(rel_amp)-0.5*var_delta*rel_amp))
def get_redshifts(self):
"""Get redshifts for each cell in the array (centered at z_c)."""
N = self.N
L_kms = N * self.dv_kms
c_kms = C_LIGHT
if (L_kms > 4 * c_kms):
print('Array is too long, approximations break down.')
raise SystemExit
# get indices
i = range(N)
z = (1+self.z_c)*pow(1-(i-N/2+1)*self.dv_kms/2.0/c_kms,-2)-1
return z
def get_gaussian_fields(self,Ns=1,new_seed=None):
"""Generate Ns Gaussian fields at redshift z_c.
If new_seed is set, it will reset random generator with it."""
if new_seed:
self.gen = np.random.RandomState(new_seed)
# length of array
N = self.N
# number of Fourier modes
NF=int(N/2+1)
# get frequencies (wavenumbers in units of s/km)
k_kms = np.fft.rfftfreq(N)*2*np.pi/self.dv_kms
# get power evaluated at each k
P_kms = power_kms(self.z_c,k_kms,self.dv_kms,self.white_noise)
# compute also expected variance, will be used in lognormal transform
dk_kms = 2*np.pi/(N*self.dv_kms)
var_delta=np.sum(P_kms)*dk_kms/np.pi
# Nyquist frecuency is counted twice in variance, and it should not be
var_delta *= NF/(NF+1)
# generate random Fourier modes
modes = np.empty([Ns,NF], dtype=complex)
modes[:].real = np.reshape(self.gen.normal(size=Ns*NF),[Ns,NF])
modes[:].imag = np.reshape(self.gen.normal(size=Ns*NF),[Ns,NF])
# normalize to desired power (and enforce real for i=0, i=NF-1)
modes[:,0] = modes[:,0].real * np.sqrt(P_kms[0])
modes[:,-1] = modes[:,-1].real * np.sqrt(P_kms[-1])
modes[:,1:-1] *= np.sqrt(0.5*P_kms[1:-1])
# inverse FFT to get (normalized) delta field
delta = np.fft.irfft(modes) * np.sqrt(N/self.dv_kms)
return delta, var_delta
def get_lya_skewers(self,Ns=10,new_seed=None):
"""Return Ns Lyman alpha skewers (wavelength, flux).
If new_seed is set, it will reset random generator with it."""
if new_seed:
self.gen = np.random.RandomState(new_seed)
# get redshift for all cells in the skewer
z = self.get_redshifts()
delta, var_delta = self.get_gaussian_fields(Ns)
#var_delta = np.var(delta)
density = self.get_density(var_delta,z,delta)
tau = get_tau(z,density)
flux = np.exp(-tau)
wave = 1215.67*(1+z)
return wave, flux
|
desihub/desisim
|
py/desisim/lya_mock_p1d.py
|
Python
|
bsd-3-clause
| 5,006
|
[
"Gaussian"
] |
37d2eeb0e4acafdf82fe35292e42287564a152f05c6e0ba5d264f377068108aa
|
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
####################################################################################
### Copyright (C) 2015-2019 by ABLIFE
####################################################################################
####################################################################################
####################################################################################
# Date Version Author ChangeLog
#
#
#
#
#####################################################################################
"""
程序功能说明:
1.clip-seq第一步:将有overlap的reads进行cluster
程序设计思路:
利用HTSeq包
"""
import re, os, sys, logging, time, datetime
from optparse import OptionParser, OptionGroup
reload(sys)
sys.setdefaultencoding('utf-8')
import subprocess
import threading
sys.path.insert(1, os.path.split(os.path.realpath(__file__))[0] + "/../../../")
from ablib.utils.tools import *
from ablib.utils.iv_cluster import *
import gffutils
import HTSeq
import numpy
import multiprocessing
from matplotlib import pyplot
if sys.version_info < (2, 7):
print("Python Version error: please use phthon2.7")
sys.exit(-1)
_version = 'v0.1'
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
def configOpt():
"""Init for option
"""
usage = 'Usage: %prog [-f] [other option] [-h]'
p = OptionParser(usage)
##basic options
p.add_option(
'-c', '--chrlen', dest='chrlen', action='store',
type='string', help='chrlen file')
p.add_option(
'-b', '--bam', dest='bam', action='store',
type='string', help='bam file')
p.add_option(
'-o', '--outfile', dest='outfile', default='ctss', action='store',
type='string', help='ctss')
p.add_option(
'-n', '--samplename', dest='samplename', default='', action='store',
type='string', help='sample name,default is ""')
group = OptionGroup(p, "Preset options")
##preset options
group.add_option(
'-O', '--outDir', dest='outDir', default='./', action='store',
type='string', help='output directory', metavar="DIR")
group.add_option(
'-L', '--logDir', dest='logDir', default='', action='store',
type='string', help='log dir ,default is same as outDir')
group.add_option(
'-P', '--logPrefix', dest='logPrefix', default='', action='store',
type='string', help='log file prefix')
group.add_option(
'-E', '--email', dest='email', default='none', action='store',
type='string', help='email address, if you want get a email when this job is finished,default is no email',
metavar="EMAIL")
group.add_option(
'-Q', '--quiet', dest='quiet', default=False, action='store_true',
help='do not print messages to stdout')
group.add_option(
'-K', '--keepTemp', dest='keepTemp', default=False, action='store_true',
help='keep temp dir')
group.add_option(
'-T', '--test', dest='isTest', default=False, action='store_true',
help='run this program for test')
p.add_option_group(group)
if len(sys.argv) == 1:
p.print_help()
sys.exit(1)
opt, args = p.parse_args()
return (p, opt, args)
def listToString(x):
"""获得完整的命令
"""
rVal = ''
for a in x:
rVal += a + ' '
return rVal
opt_parser, opt, args = configOpt()
if opt.logDir == "":
opt.logDir = opt.outDir + '/log/'
sample = ""
if opt.samplename != "":
sample = opt.samplename + '.'
if opt.outfile == 'ctss':
opt.outfile = sample + 'ctss'
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
scriptPath = os.path.abspath(os.path.dirname(__file__)) # absolute script path
binPath = scriptPath + '/bin' # absolute bin path
outPath = os.path.abspath(opt.outDir) # absolute output path
os.mkdir(outPath) if not os.path.isdir(outPath) else None
logPath = os.path.abspath(opt.logDir)
os.mkdir(logPath) if not os.path.isdir(logPath) else None
tempPath = outPath + '/temp/' # absolute bin path
# os.mkdir(tempPath) if not os.path.isdir(tempPath) else None
resultPath = outPath + '/result/'
# os.mkdir(resultPath) if not os.path.isdir(resultPath) else None
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
def initLogging(logFilename):
"""Init for logging
"""
logging.basicConfig(
level=logging.DEBUG,
format='[%(asctime)s : %(levelname)s] %(message)s',
datefmt='%y-%m-%d %H:%M',
filename=logFilename,
filemode='w')
if not opt.quiet:
# define a Handler which writes INFO messages or higher to the sys.stderr
console = logging.StreamHandler()
console.setLevel(logging.INFO)
# set a format which is simpler for console use
formatter = logging.Formatter('[%(asctime)s : %(levelname)s] %(message)s', datefmt='%y-%m-%d %H:%M')
# tell the handler to use this format
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
dt = datetime.datetime.now()
logFile = logPath + '/' + opt.logPrefix + 'log.' + str(dt.strftime('%Y%m%d.%H%M%S.%f')) + '.txt'
initLogging(logFile)
logging.debug(sys.modules[__name__].__doc__)
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
logging.debug('Program version: %s' % _version)
logging.debug('Start the program with [%s]\n', listToString(sys.argv))
startTime = datetime.datetime.now()
logging.debug("计时器:Program start at %s" % startTime)
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
### S
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
### E
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
def main():
print("Main procedure start...")
if not os.path.isfile(opt.bam):
print(opt.bam + 'is not exit,please check your file')
sys.exit(1)
os.chdir(opt.outDir)
chrlen_dict = {}
for eachLine in open(opt.chrlen):
line = eachLine.strip().split('\t')
chrlen_dict[line[0]] = line[1]
server = multiprocessing.Manager()
jobs = []
m = 0
n = 0
chr_dict = readBamHeader(opt.bam)
for chr in chrlen_dict:
if not chr in chr_dict:
continue
chr_iv = HTSeq.GenomicInterval(chr, 0, int(chrlen_dict[chr]), ".")
t1 = multiprocessing.Process(target=CTSS, args=(opt.bam, chr_iv, True))
jobs.append(t1)
n += 1
while m < n:
for i in range(m, m + 25):
if i >= n:
break
jobs[i].start()
for i in range(m, m + 25):
if i >= n:
break
jobs[i].join()
m = m + 25
os.system("cat _ctss_* > " + opt.outfile + " && rm -rf _ctss_*")
if __name__ == '__main__':
main()
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# if not opt.keepTemp:
# os.system('rm -rf ' + tempPath)
# logging.debug("Temp folder is deleted..")
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
logging.debug("Program ended")
currentTime = datetime.datetime.now()
runningTime = (currentTime - startTime).seconds # in seconds
logging.debug("计时器:Program start at %s" % startTime)
logging.debug("计时器:Program end at %s" % currentTime)
logging.debug("计时器:Program ran %.2d:%.2d:%.2d" % (runningTime / 3600, (runningTime % 3600) / 60, runningTime % 60))
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
if opt.email != "none":
run_cmd = listToString(sys.argv)
sendEmail(opt.email, str(startTime), str(currentTime), run_cmd, outPath)
logging.info("发送邮件通知到 %s" % opt.email)
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
|
ablifedev/ABLIRC
|
ABLIRC/bin/public/iv_cluster/get_CTSS.py
|
Python
|
mit
| 10,604
|
[
"HTSeq"
] |
45fed5b6f78ce9fcbf258b7d53892ca4d977be9ce26605ce2186e5f6d567517b
|
# Sample module in the public domain. Feel free to use this as a template
# for your modules (and you can remove this header and take complete credit
# and liability)
#
# Contact: Brian Carrier [carrier <at> sleuthkit [dot] org]
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
# Simple data source-level ingest module for Autopsy.
# Used as part of Python tutorials from Basis Technology - August 2015
#
# Looks for files of a given name, opens then in SQLite, queries the DB,
# and makes artifacts
import jarray
import inspect
from java.lang import Class
from java.lang import System
from java.util.logging import Level
from java.util import ArrayList
from java.io import File
from org.sleuthkit.datamodel import SleuthkitCase
from org.sleuthkit.datamodel import AbstractFile
from org.sleuthkit.autopsy.ingest import IngestModule
from org.sleuthkit.autopsy.ingest.IngestModule import IngestModuleException
from org.sleuthkit.autopsy.ingest import DataSourceIngestModule
from org.sleuthkit.autopsy.ingest import IngestModuleFactoryAdapter
from org.sleuthkit.autopsy.ingest import IngestMessage
from org.sleuthkit.autopsy.ingest import IngestServices
from org.sleuthkit.autopsy.ingest import ModuleDataEvent
from org.sleuthkit.autopsy.coreutils import Logger
from org.sleuthkit.autopsy.casemodule import Case
from org.sleuthkit.datamodel import TskCoreException
from org.sleuthkit.datamodel.Blackboard import BlackboardException
from org.sleuthkit.autopsy.casemodule import NoCurrentCaseException
from org.sleuthkit.datamodel import Account
from org.sleuthkit.datamodel.blackboardutils import CommunicationArtifactsHelper
from java.sql import ResultSet
from java.sql import SQLException
from org.sleuthkit.autopsy.coreutils import AppSQLiteDB
# Factory that defines the name and details of the module and allows Autopsy
# to create instances of the modules that will do the analysis.
class ContactsDbIngestModuleFactory(IngestModuleFactoryAdapter):
# TODO - Replace with your modules name
moduleName = "Contacts Db Analyzer"
def getModuleDisplayName(self):
return self.moduleName
def getModuleDescription(self):
return "Sample module that parses contacts.db"
def getModuleVersionNumber(self):
return "1.0"
def isDataSourceIngestModuleFactory(self):
return True
def createDataSourceIngestModule(self, ingestOptions):
return ContactsDbIngestModule()
# Data Source-level ingest module. One gets created per data source.
class ContactsDbIngestModule(DataSourceIngestModule):
_logger = Logger.getLogger(ContactsDbIngestModuleFactory.moduleName)
def log(self, level, msg):
self._logger.logp(level, self.__class__.__name__, inspect.stack()[1][3], msg)
def __init__(self):
self.context = None
# Where any setup and configuration is done
# 'context' is an instance of org.sleuthkit.autopsy.ingest.IngestJobContext.
# See: http://sleuthkit.org/autopsy/docs/api-docs/latest/classorg_1_1sleuthkit_1_1autopsy_1_1ingest_1_1_ingest_job_context.html
def startUp(self, context):
self.context = context
# Where the analysis is done.
# The 'data_source' object being passed in is of type org.sleuthkit.datamodel.Content.
# See: http://www.sleuthkit.org/sleuthkit/docs/jni-docs/latest/interfaceorg_1_1sleuthkit_1_1datamodel_1_1_content.html
# 'progress_bar' is of type org.sleuthkit.autopsy.ingest.DataSourceIngestModuleProgress
# See: http://sleuthkit.org/autopsy/docs/api-docs/latest/classorg_1_1sleuthkit_1_1autopsy_1_1ingest_1_1_data_source_ingest_module_progress.html
def process(self, data_source, progress_bar):
# we don't know how much work there is yet
progress_bar.switchToIndeterminate()
# Find files named contacts.db anywhere in the data source.
# TODO - replace with your database name and parent path.
app_databases = AppSQLiteDB.findAppDatabases(data_source, "contacts.db", True, "")
num_databases = len(app_databases)
progress_bar.switchToDeterminate(num_databases)
databases_processed = 0
try:
# Iterate through all the database files returned
for app_database in app_databases:
# Check if the user pressed cancel while we were busy
if self.context.isJobCancelled():
return IngestModule.ProcessResult.OK
self.log(Level.INFO, "Processing file: " + app_database.getDBFile().getName())
# Query the contacts table in the database and get all columns.
try:
# TODO - replace with your query
result_set = app_database.runQuery("SELECT * FROM contacts")
except SQLException as e:
self.log(Level.INFO, "Error querying database for contacts table (" + e.getMessage() + ")")
return IngestModule.ProcessResult.OK
try:
#Get the current case for the CommunicationArtifactsHelper.
current_case = Case.getCurrentCaseThrows()
except NoCurrentCaseException as ex:
self.log(Level.INFO, "Case is closed (" + ex.getMessage() + ")")
return IngestModule.ProcessResult.OK
# Create an instance of the helper class
# TODO - Replace with your parser name and Account.Type
helper = CommunicationArtifactsHelper(current_case.getSleuthkitCase(),
ContactsDbIngestModuleFactory.moduleName, app_database.getDBFile(), Account.Type.DEVICE, context.getJobId())
# Iterate through each row and create artifacts
while result_set.next():
try:
# TODO - Replace these calls with your column names and types
# Ex of other types: result_set.getInt("contact_type") or result_set.getLong("datetime")
name = result_set.getString("name")
email = result_set.getString("email")
phone = result_set.getString("phone")
except SQLException as e:
self.log(Level.INFO, "Error getting values from contacts table (" + e.getMessage() + ")")
helper.addContact(name, phone, "", "", email)
app_database.close()
databases_processed += 1
progress_bar.progress(databases_processed)
except TskCoreException as e:
self.log(Level.INFO, "Error inserting or reading from the Sleuthkit case (" + e.getMessage() + ")")
except BlackboardException as e:
self.log(Level.INFO, "Error posting artifact to the Blackboard (" + e.getMessage() + ")")
# After all databases, post a message to the ingest messages in box.
# TODO - update your module name here
message = IngestMessage.createMessage(IngestMessage.MessageType.DATA,
ContactsDbIngestModuleFactory.moduleName, "Found %d files" % num_databases)
IngestServices.getInstance().postMessage(message)
return IngestModule.ProcessResult.OK
|
sleuthkit/autopsy
|
pythonExamples/Aug2015DataSourceTutorial/FindContactsDb_v2.py
|
Python
|
apache-2.0
| 8,495
|
[
"Brian"
] |
3dcca3457c4eab8fe17ed241fd41b911fa6c324381a3c3cb98a697e349ed81d2
|
"""
Trim GTFS feeds according to user input.
"""
import csv
import curses
import functools
import logging
import operator
import os
import sys
import typing
import zipfile
from . import chooser
from . import gtfs
logger = logging.getLogger(__name__)
ROUTE_TYPES_FILENAME: str = os.path.join(
os.path.dirname(__file__), 'route_types.csv'
)
def _get_choice(
window,
name: str,
rows: typing.Collection[gtfs.CSV_Row],
id_field: str,
display_fields: typing.Sequence[str],
) -> str:
def display(row: gtfs.CSV_Row) -> str:
for display_field in display_fields:
if display_field in row and row[display_field]:
return row[display_field]
raise KeyError((
'Row {row} has no value for any of display fields {dfs}.'
).format(row=row, dfs=display_fields))
curses.curs_set(False)
question: str = 'Please choose a{vow} {nam}.'.format(
nam=name, vow='n' if name and name[0] in 'aeiou' else ''
)
#TODO: we'll get a ValueError here if `rows` is empty.
choices, sorted_rows = zip(*sorted(
zip(map(display, rows), rows),
#We don't want lexicographic sorting, since we likely can't compare
#elements of `rows` with one another.
key=lambda pair: pair[0],
))
index: int = chooser.Chooser(question, choices)(window)
return sorted_rows[index][id_field]
get_choice = functools.partial(curses.wrapper, _get_choice)
def stop_sequences(
rows: typing.Iterable[gtfs.CSV_Row]
) -> typing.Iterable[int]:
return map(int, map(operator.itemgetter('stop_sequence'), rows))
def trips_through_stop(
rows: typing.Iterable[gtfs.CSV_Row],
stop_id: str
) -> typing.Set[str]:
"""
Get trip IDs of trips going through a given stop.
Parameters
----------
rows
Rows of `stop_times.txt` to be filtered.
stop_id
ID of stop of interest.
Returns
-------
set
IDs of trips going through stop `stop_id`.
"""
return {row['trip_id'] for row in rows if row['stop_id'] == stop_id}
def trim_gtfs(
filename_original: str,
filename_trimmed: str
) -> gtfs.CSV_Row:
config: gtfs.CSV_Row = {}
logger.info('Reading original feed from %s.', filename_original)
with zipfile.ZipFile(filename_original, 'r') as original:
route_restrictions: gtfs.CSV_Restrictions = {}
agency: gtfs.FilteredCSV = gtfs.FilteredCSV(original, 'agency.txt', {})
agency_id: str = get_choice(
'agency',
agency.rows,
'agency_id',
('agency_name',),
)
logger.debug('Agency ID %s selected.', agency_id)
agency.update_restrictions(agency_id={agency_id})
route_restrictions.update(agency_id={agency_id})
with open(ROUTE_TYPES_FILENAME, 'r') as f:
reader: csv.DictReader = csv.DictReader(
f, fieldnames=('route_type_id', 'description'), dialect='unix'
)
#Skip header. Could also leave `fieldnames` unspecified above.
next(reader)
ROUTE_TYPES: typing.Tuple[gtfs.CSV_Row, ...] = tuple(reader)
route_type: str = get_choice(
'route type',
ROUTE_TYPES,
'route_type_id',
('description',),
)
config.update({'route_type_id': route_type})
logger.debug('Route type %s selected.', route_type)
route_restrictions.update(route_type={route_type})
routes: gtfs.FilteredCSV = gtfs.FilteredCSV(
original, 'routes.txt', route_restrictions
)
route_id: str = get_choice(
'route',
routes.rows,
'route_id',
('route_long_name', 'route_short_name',),
)
logger.debug('Route ID %s selected.', route_id)
routes.update_restrictions(route_id={route_id})
trips: gtfs.FilteredCSV = gtfs.FilteredCSV(
original, 'trips.txt', {'route_id': routes.values('route_id')},
)
stop_times: gtfs.FilteredCSV = gtfs.FilteredCSV(
original, 'stop_times.txt', {'trip_id': trips.values('trip_id')}
)
stops: gtfs.FilteredCSV = gtfs.FilteredCSV(
original, 'stops.txt', {'stop_id': stop_times.values('stop_id')}
)
departure_stop_id: str = get_choice(
'departure stop',
stops.rows,
'stop_id',
('stop_name', 'stop_code', 'stop_desc')
)
logger.debug('Departure stop ID %s selected.', departure_stop_id)
config.update({'stop_id_departure': departure_stop_id})
trip_ids_through_departure: typing.Set[str] = trips_through_stop(
stop_times.rows, departure_stop_id
)
stop_times.update_restrictions(trip_id=trip_ids_through_departure)
arrival_stop_ids: set = set()
for trip_stop_times in gtfs.binned_by_field(
stop_times.rows, 'trip_id'
).values():
#`trip_stop_times` is a list of the stops ('StopTimes') that a
#particular trip going through stop `departure_stop_id` makes
binned_stop_times: typing.Dict[str, typing.List[gtfs.CSV_Row]] = (
gtfs.binned_by_field(trip_stop_times, 'stop_id')
)
#I don't see why you'd ever have multiple visits to a single stop
#during one trip, but I don't want to read the reference closely
#enough to rule the possibility out. So, we'll find loop over all
#the stop times for the departure stop and find the minimum of the
#stop sequences values, which will correspond to the first visit.
departure_stop_sequence = min(stop_sequences(
binned_stop_times[departure_stop_id]
))
#We can get to a stop from stop `departure_stop_id` if it stop has
#a StopTime with stop sequence greater than
#`departure_stop_sequence`.
arrival_stop_ids.update(
#For each station ('Stop'), we find last stop ('StopTime') made
#there and decide whether it comes after the first stop at the
#departure stop.
stop_id for stop_id, _stop_times in binned_stop_times.items()
if max(stop_sequences(_stop_times)) > departure_stop_sequence
)
arrival_stop_id: str = get_choice(
'arrival stop',
tuple(
row for row in stops.rows if row['stop_id'] in arrival_stop_ids
),
'stop_id',
('stop_name', 'stop_code', 'stop_desc')
)
logging.debug('Arrival stop ID %s selected.', arrival_stop_id)
config.update({'stop_id_arrival': arrival_stop_id})
trip_ids_through_arrival: typing.Set[str] = trips_through_stop(
stop_times.rows, arrival_stop_id
)
stops.update_restrictions(stop_id={departure_stop_id, arrival_stop_id})
#Applying the same restriction by a different name.
stop_times.update_restrictions(stop_id=stops.values('stop_id'))
trips.update_restrictions(trip_id=(
trip_ids_through_departure.intersection(trip_ids_through_arrival)
))
routes.update_restrictions(route_id=trips.values('route_id'))
#`agency_id` isn't a required field for `routes.txt`, so this isn't a
#great thing to be doing.
agency.update_restrictions(agency_id=routes.values('agency_id'))
service_ids: typing.Set[str] = trips.values('service_id')
calendar: gtfs.FilteredCSV = gtfs.FilteredCSV(
original, 'calendar.txt', {'service_id': service_ids}
)
filtereds: typing.List[gtfs.FilteredCSV] = [
agency, stops, routes, trips, stop_times, calendar
]
if 'calendar_dates.txt' in original.namelist():
calendar_dates: gtfs.FilteredCSV = gtfs.FilteredCSV(
original, 'calendar_dates.txt', {'service_id': service_ids}
)
filtereds.append(calendar_dates)
logger.info('Writing filtered feed to %s.', filename_trimmed)
with zipfile.ZipFile(filename_trimmed, 'w') as trimmed:
for filtered in filtereds:
filtered.write(trimmed)
return config
|
ben-e-whitney/next-train
|
next_train/wizard.py
|
Python
|
gpl-3.0
| 8,373
|
[
"VisIt"
] |
85536cf5cbd58365e6a6bb04138be7443b1bfe1276ceaa260a4a9c9f4c874445
|
#! /usr/bin/env python
# MMapArea.py
# This file is part of Labyrinth
#
# Copyright (C) 2006 - Don Scorgie <Don@Scorgie.org>
#
# Labyrinth is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Labyrinth is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Labyrinth; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA
#
import math
import time
import string
import gettext
import copy
import cairo
import logging
_ = gettext.gettext
import xml.dom.minidom as dom
from gi.repository import Gtk
from gi.repository import Gdk
from gi.repository import Pango
from gi.repository import GObject
import Links
import TextThought
import LabelThought
import ImageThought
import DrawingThought
import ResourceThought
import UndoManager
import utils
from BaseThought import BaseThought
from Links import Link
RAD_UP = (- math.pi / 2.)
RAD_DOWN = (math.pi / 2.)
RAD_LEFT = (math.pi)
RAD_RIGHT = (0)
MODE_NULL = 0
MODE_TEXT = 1
MODE_IMAGE = 2
MODE_DRAW = 3
MODE_RESOURCE = 4
MODE_LABEL = 5
VIEW_LINES = 0
VIEW_BEZIER = 1
# TODO: Need to expand to support popup menus
MENU_EMPTY_SPACE = 0
# UNDO actions
UNDO_MOVE = 0
UNDO_CREATE = 1
UNDO_DELETE = 2
UNDO_DELETE_SINGLE = 3
UNDO_COMBINE_DELETE_NEW = 4
UNDO_DELETE_LINK = 5
UNDO_STRENGTHEN_LINK = 6
UNDO_CREATE_LINK = 7
UNDO_ALIGN = 8
# Note: This is (atm) very broken. It will allow you to create new canvases, but not
# create new thoughts or load existing maps.
# To get it working either fix the TODO list at the bottom of the class, implement the
# necessary features within all the thought types. If you do, please send a patch ;)
# OR: Change this class to MMapAreaNew and MMapAreaOld to MMapArea
class MMapArea (Gtk.DrawingArea):
'''A MindMapArea Widget. A blank canvas with a collection of child thoughts.\
It is responsible for processing signals and such from the whole area and \
passing these on to the correct child. It also informs things when to draw'''
__gsignals__ = dict (
title_changed = (GObject.SIGNAL_RUN_FIRST,
GObject.TYPE_NONE, (GObject.TYPE_STRING, )),
change_mode = (GObject.SIGNAL_RUN_LAST,
GObject.TYPE_NONE,
(GObject.TYPE_INT, )),
change_buffer = (GObject.SIGNAL_RUN_LAST,
GObject.TYPE_NONE,
(GObject.TYPE_OBJECT, )),
text_selection_changed = (GObject.SIGNAL_RUN_FIRST,
GObject.TYPE_NONE,
(GObject.TYPE_INT, GObject.TYPE_INT,
GObject.TYPE_STRING)),
thought_selection_changed = (GObject.SIGNAL_RUN_FIRST,
GObject.TYPE_NONE,
(GObject.TYPE_PYOBJECT,
GObject.TYPE_PYOBJECT)),
set_focus = (GObject.SIGNAL_RUN_FIRST, GObject.TYPE_NONE,
(GObject.TYPE_PYOBJECT, GObject.TYPE_BOOLEAN)),
set_attrs = (GObject.SIGNAL_RUN_LAST,
GObject.TYPE_NONE,
(GObject.TYPE_BOOLEAN, GObject.TYPE_BOOLEAN,
GObject.TYPE_BOOLEAN, Pango.FontDescription)),
link_selected = (GObject.SIGNAL_RUN_FIRST,
GObject.TYPE_NONE,
()))
def __init__(self, undo):
super (MMapArea, self).__init__()
self.thoughts = []
self.links = []
self.selected = []
self.num_selected = 0
self.primary = None
self.pango_context = self.create_pango_context()
self.undo = undo
self.scale_fac = 1.0
self.translate = False
self.translation = [0.0,0.0]
self.timeout = -1
self.current_cursor = None
self.do_filter = True
self.is_bbox_selecting = False
self.nthoughts = 0
impl = dom.getDOMImplementation()
self.save = impl.createDocument("http://www.donscorgie.blueyonder.co.uk/labns", "MMap", None)
self.element = self.save.documentElement
self.im_context = Gtk.IMMulticontext ()
self.mode = MODE_NULL
self.old_mode = MODE_NULL
self.connect ("draw", self.draw)
self.connect ("button_release_event", self.button_release)
self.connect ("button_press_event", self.button_down)
self.connect ("motion_notify_event", self.motion)
self.connect ("key_press_event", self.key_press)
self.connect ("key_release_event", self.key_release)
self.connect ("scroll_event", self.scroll)
self.commit_handler = None
self.title_change_handler = None
self.drag_mode = False
self._dragging = False
self.sw = None
self.hadj = 0
self.vadj = 0
self.origin_x = None
self.origin_y = None
self.moving = False
self.move_mode = False
self.move_origin = None
self.move_origin_new = None
self.focus = None
self.move_action = None
self.current_root = []
self.rotation = 0
self.text_attributes = {}
self.set_events (Gdk.EventMask.KEY_PRESS_MASK |
Gdk.EventMask.KEY_RELEASE_MASK |
Gdk.EventMask.BUTTON_PRESS_MASK |
Gdk.EventMask.BUTTON_RELEASE_MASK |
Gdk.EventMask.POINTER_MOTION_MASK |
Gdk.EventMask.SCROLL_MASK
)
self.set_can_focus(True)
# set theme colors
w = Gtk.Window()
w.realize()
style = Gtk.Style() ##w.get_style()
c = w.get_style_context()
font = c.get_font(Gtk.StateFlags.NORMAL)
self.pango_context.set_font_description(font)
self.font_name = font.get_family()
utils.default_font = self.font_name
self.font_size = font.get_size() / Pango.SCALE
utils.default_colors["text"] = utils.gtk_to_cairo_color(c.get_color(Gtk.StateFlags.NORMAL)) ##utils.gtk_to_cairo_color(style.text[gtk.STATE_NORMAL])
utils.default_colors["base"] = utils.gtk_to_cairo_color(c.get_background_color(Gtk.StateFlags.NORMAL)) ##utils.gtk_to_cairo_color(style.base[gtk.STATE_NORMAL])
# Match the fixed white canvas colour (makes thought focus visible)
self.background_color = style.white
self.foreground_color = style.black
utils.default_colors["bg"] = utils.gtk_to_cairo_color(c.get_background_color(Gtk.StateFlags.NORMAL)) ##utils.gtk_to_cairo_color(style.bg[gtk.STATE_NORMAL])
utils.default_colors["fg"] = utils.gtk_to_cairo_color(c.get_color(Gtk.StateFlags.NORMAL)) ##utils.gtk_to_cairo_color(style.fg[gtk.STATE_NORMAL])
utils.selected_colors["text"] = utils.gtk_to_cairo_color(c.get_color(Gtk.StateFlags.SELECTED)) ##utils.gtk_to_cairo_color(style.text[gtk.STATE_SELECTED])
utils.selected_colors["bg"] = utils.gtk_to_cairo_color(c.get_background_color(Gtk.StateFlags.SELECTED)) ##utils.gtk_to_cairo_color(style.bg[gtk.STATE_SELECTED])
utils.selected_colors["fg"] = utils.gtk_to_cairo_color(c.get_color(Gtk.StateFlags.SELECTED)) ##utils.gtk_to_cairo_color(style.fg[gtk.STATE_SELECTED])
utils.selected_colors["fill"] = utils.gtk_to_cairo_color(c.get_background_color(Gtk.StateFlags.SELECTED)) ##utils.gtk_to_cairo_color(style.base[gtk.STATE_SELECTED])
def set_text_attributes(self, text_attributes):
return
'''
self.font_combo_box = text_attributes.props.page.fonts_combo_box.combo
self.font_sizes_combo_box = utils.default_font_size #text_attributes.props.page.font_sizes_combo_box.combo
'''
def transform_coords(self, loc_x, loc_y):
if hasattr(self, "transform"):
return self.transform.transform_point(loc_x, loc_y)
def untransform_coords(self, loc_x, loc_y):
if hasattr(self, "untransform"):
return self.untransform.transform_point(loc_x, loc_y)
def button_down (self, widget, event):
if self.drag_mode:
self.set_cursor(Gdk.CursorType.HAND2)
self.origin_x = event.x
self.origin_y = event.y
self._dragging = True
return
if event.button == 2 or \
event.button == 1 and self.translate == True:
self.set_cursor (Gdk.CursorType.FLEUR)
self.original_translation = self.translation
self.origin_x = event.x
self.origin_y = event.y
return
coords = self.transform_coords (event.get_coords()[0], event.get_coords()[1])
obj = self.find_object_at (coords)
if obj:
if event.button == 3 or self.move_mode:
if self.move_mode:
self.moving = True
else:
self.moving = not (event.state & Gdk.ModifierType.CONTROL_MASK)
if self.moving:
self.set_cursor(Gdk.CursorType.FLEUR)
self.move_origin = (coords[0], coords[1])
self.move_origin_new = self.move_origin
if obj == self.focus:
self.focus.enter()
else:
self.set_focus(obj, event.state)
obj.process_button_down(event, coords)
elif self.mode and event.button == 1 and widget:
self.embody_thought(event)
elif event.button == 1 and self.mode == MODE_NULL:
self.bbox_origin = coords
self.is_bbox_selecting = True
def undo_move (self, action, mode):
self.undo.block ()
move_thoughts = action.args[1]
old_coords = action.args[0]
new_coords = action.args[2]
move_x = old_coords[0] - new_coords[0]
move_y = old_coords[1] - new_coords[1]
if mode == UndoManager.REDO:
move_x = -move_x
move_y = -move_y
self.unselect_all ()
for t in move_thoughts:
self.select_thought (t, -1)
t.move_by (move_x, move_y)
self.undo.unblock ()
self.invalidate ((old_coords[0], old_coords[1], new_coords[0], new_coords[1]))
def button_release (self, widget, event):
if self._dragging:
self.set_cursor(Gdk.CursorType.LEFT_PTR)
self._dragging = False
coords = self.transform_coords (event.get_coords()[0], event.get_coords()[1])
if self.is_bbox_selecting:
self.is_bbox_selecting = False
self.invalidate ()
try:
if abs(self.bbox_origin[0] - coords[0]) > 2.0:
return True
except AttributeError: # no bbox_current
pass
if self.translate:
self.translate = False
return True
if self.moving and self.move_action:
self.move_action.add_arg (coords)
self.undo.add_undo (self.move_action)
self.move_action = None
was_moving = False
if self.moving:
was_moving = True
self.stop_moving()
obj = self.find_object_at (coords)
if event.button == 2:
self.undo.add_undo (UndoManager.UndoAction (self, UndoManager.TRANSFORM_CANVAS, \
self.undo_transform_cb,
self.scale_fac, self.scale_fac,
self.original_translation,
self.translation))
if obj:
if not obj.process_button_release(event, coords):
# prolonged creation was failed
self.undo.forget_action()
else:
self.update_view(obj)
if len(self.selected) != 1:
self.invalidate() # does not invalidate correctly with obj.get_max_area()
return True
self.invalidate ()
if was_moving:
self.start_moving(self.move_button)
return True
def undo_transform_cb (self, action, mode):
if mode == UndoManager.UNDO:
self.scale_fac = action.args[0]
self.translation = action.args[2]
else:
self.scale_fac = action.args[1]
self.translation = action.args[3]
self.invalidate ()
def scroll (self, widget, event):
scale = self.scale_fac
if event.direction == Gdk.ScrollDirection.UP:
self.scale_fac *= 1.2
elif event.direction == Gdk.ScrollDirection.DOWN:
self.scale_fac /= 1.2
self.undo.add_undo (UndoManager.UndoAction (self, UndoManager.TRANSFORM_CANVAS, \
self.undo_transform_cb,
scale, self.scale_fac, self.translation,
self.translation))
self.invalidate()
def undo_joint_cb (self, action, mode):
delete = action.args[0]
create = action.args[1]
if mode == UndoManager.UNDO:
self.undo_create_cb (create, mode)
self.undo_deletion (delete, mode)
else:
self.undo_deletion (delete, mode)
self.undo_create_cb (create, mode)
self.invalidate ()
def key_press (self, widget, event):
# Support for canvas panning keys ('hand' on XO, 'cmd' on Macs)
if event.hardware_keycode == 133 or event.hardware_keycode == 134:
self.translate = True
if not self.do_filter or not self.im_context.filter_keypress (event):
if self.focus:
if self.focus.creating or \
not self.focus.process_key_press (event, self.mode):
return self.global_key_handler (event)
return True
if len(self.selected) != 1 or not self.selected[0].process_key_press (event, self.mode):
return self.global_key_handler (event)
return True
def key_release (self, widget, event):
# Support for canvas panning keys ('hand' on XO, 'cmd' on Macs)
if event.hardware_keycode == 133 or event.hardware_keycode == 134:
self.translate = False
self.im_context.filter_keypress (event)
return True
def motion (self, widget, event):
if self._dragging:
if self.origin_x is None:
self.origin_x = event.get_coords()[0]
self.origin_y = event.get_coords()[1]
dx = self.origin_x - event.get_coords()[0]
dy = self.origin_y - event.get_coords()[1]
self.origin_x = event.get_coords()[0]
self.origin_y = event.get_coords()[1]
self._adjust_sw(dx, dy)
return True
coords = self.transform_coords (event.get_coords()[0], event.get_coords()[1])
if event.state & Gdk.ModifierType.BUTTON1_MASK and self.is_bbox_selecting:
self.bbox_current = coords
self.invalidate()
ul = [ self.bbox_origin[0], self.bbox_origin[1] ]
lr = [ coords[0], coords[1] ]
if self.bbox_origin[0] > coords[0]:
if self.bbox_origin[1] < coords[1]:
ul[0] = coords[0]
ul[1] = self.bbox_origin[1]
lr[0] = self.bbox_origin[0]
lr[1] = coords[1]
else:
ul = coords
lr = self.bbox_origin
elif self.bbox_origin[1] > coords[1]:
ul[0] = self.bbox_origin[0]
ul[1] = coords[1]
lr[0] = coords[0]
lr[1] = self.bbox_origin[1]
# FIXME: O(n) runtime is bad
for t in self.thoughts:
if t.lr[0] > ul[0] and t.ul[1] < lr[1] and t.ul[0] < lr[0] and t.lr[1] > ul[1] :
if t not in self.selected:
self.select_thought(t, Gdk.ModifierType.SHIFT_MASK)
else:
if t in self.selected:
t.unselect()
self.selected.remove(t)
return True
elif self.moving:
self.set_cursor(Gdk.CursorType.FLEUR)
if not self.move_action:
self.move_action = UndoManager.UndoAction (self, UNDO_MOVE, self.undo_move, self.move_origin,
self.selected)
for t in self.selected:
t.move_by (coords[0] - self.move_origin_new[0], coords[1] - self.move_origin_new[1])
self.move_origin_new = (coords[0], coords[1])
self.invalidate ()
return True
elif event.state & Gdk.ModifierType.BUTTON2_MASK or \
event.state & Gdk.ModifierType.BUTTON1_MASK and self.translate:
self.translate = True
self.translation[0] -= (self.origin_x - event.x) / self.scale_fac
self.translation[1] -= (self.origin_y - event.y) / self.scale_fac
self.origin_x = event.x
self.origin_y = event.y
self.invalidate()
return True
obj = self.find_object_at (coords)
if obj and obj.handle_motion(event, coords):
self.update_links_cb(obj)
self.update_view(obj)
return True
def find_object_at (self, coords):
if self.focus and self.focus.includes(coords):
return self.focus
for x in reversed(self.thoughts):
if x != self.focus and not isinstance(x, Link) and x.includes (coords):
return x
for x in self.links:
if x != self.focus and not isinstance(x, Link) and x.includes (coords):
return x
return None
def realize_cb (self, widget):
self.disconnect (self.realize_handle)
if self.mode == MODE_IMAGE or self.mode == MODE_DRAW:
self.set_cursor (Gdk.CursorType.CROSSHAIR)
else:
self.set_cursor (Gdk.CursorType.LEFT_PTR)
return False
def set_cursor(self, kind):
new_cursor = CursorFactory().get_cursor(kind)
if self.current_cursor != new_cursor:
self.current_cursor = new_cursor
self.window.set_cursor(self.current_cursor)
def set_mode (self, mode):
if mode == self.mode:
return
self.old_mode = self.mode
self.mode = mode
self.hookup_im_context ()
if self.get_window():
self.window = self.get_window()
if mode == MODE_IMAGE or mode == MODE_DRAW:
self.set_cursor (Gdk.CursorType.CROSSHAIR)
else:
self.set_cursor (Gdk.CursorType.LEFT_PTR)
else:
self.realize_handle = self.connect ("realize", self.realize_cb)
self.mode = mode
if self.window:
self.invalidate ()
def title_changed_cb (self, widget, new_title):
self.emit ("title_changed", new_title)
def make_primary (self, thought):
if self.primary:
print "Warning: Already have a primary root"
if self.title_change_handler:
self.primary.disconnect (self.title_change_handler)
self.title_change_handler = thought.connect ("title_changed", self.title_changed_cb)
self.emit ("title_changed", thought.text)
self.primary = thought
thought.make_primary ()
def hookup_im_context (self, thought = None):
if self.commit_handler:
self.im_context.disconnect (self.commit_handler)
self.im_context.disconnect (self.delete_handler)
self.im_context.disconnect (self.preedit_changed_handler)
self.im_context.disconnect (self.preedit_end_handler)
self.im_context.disconnect (self.preedit_start_handler)
self.im_context.disconnect (self.retrieve_handler)
self.commit_handler = None
if thought:
try:
self.commit_handler = self.im_context.connect ("commit", thought.commit_text, self.mode, None, None)
# self.font_combo_box, self.font_sizes_combo_box)
self.delete_handler = self.im_context.connect ("delete-surrounding", thought.delete_surroundings, self.mode)
self.preedit_changed_handler = self.im_context.connect ("preedit-changed", thought.preedit_changed, self.mode)
self.preedit_end_handler = self.im_context.connect ("preedit-end", thought.preedit_end, self.mode)
self.preedit_start_handler = self.im_context.connect ("preedit-start", thought.preedit_start, self.mode)
self.retrieve_handler = self.im_context.connect ("retrieve-surrounding", thought.retrieve_surroundings, \
self.mode)
self.do_filter = True
except AttributeError:
self.do_filter = False
else:
self.do_filter = False
def unselect_all (self):
self.hookup_im_context ()
for t in self.selected:
t.unselect ()
self.selected = []
def select_link (self, link, modifiers):
if modifiers and modifiers & Gdk.ModifierType.SHIFT_MASK and len (self.selected) > 1 and self.selected.count (link) > 0:
self.selected.remove (link)
link.unselect ()
return
self.hookup_im_context()
self.set_focus(None, None)
if modifiers and (modifiers & Gdk.ModifierType.SHIFT_MASK or modifiers == -1):
if self.selected.count (link) == 0:
self.selected.append (link)
else:
map (lambda t : t.unselect(), self.selected)
self.selected = [link]
link.select()
self.emit("change_buffer", None)
def set_focus(self, thought, modifiers):
if self.focus == thought:
return
if self.focus:
self.focus.leave()
if thought:
self.select_thought(thought, modifiers)
self.focus = thought
def select_thought (self, thought, modifiers):
self.hookup_im_context ()
if thought in self.selected and self.moving:
return
if thought not in self.thoughts:
self.thoughts.append(thought)
if modifiers and (modifiers & Gdk.ModifierType.SHIFT_MASK or modifiers == -1):
if self.selected.count (thought) == 0:
self.selected.append (thought)
else:
map(lambda x : x.unselect(), self.selected)
self.selected = [thought]
if thought.can_be_parent():
self.current_root = []
for x in self.selected:
if x.can_be_parent():
self.current_root.append(x)
thought.select ()
if len(self.selected) == 1:
self.emit ("thought_selection_changed", thought.background_color, thought.foreground_color)
self.background_color = thought.background_color
self.foreground_color = thought.foreground_color
try:
self.emit ("change_buffer", thought.extended_buffer)
except AttributeError:
self.emit ("change_buffer", None)
self.hookup_im_context (thought)
else:
self.emit ("change_buffer", None)
def undo_link_action (self, action, mode):
self.undo.block ()
self.set_focus(None, None)
link = action.args[0]
if action.undo_type == UNDO_CREATE_LINK:
if mode == UndoManager.REDO:
self.element.appendChild (link.element)
self.links.append (link)
else:
self.delete_link (link)
elif action.undo_type == UNDO_DELETE_LINK:
if mode == UndoManager.UNDO:
self.element.appendChild (link.element)
self.links.append (link)
else:
self.delete_link (link)
elif action.undo_type == UNDO_STRENGTHEN_LINK:
if mode == UndoManager.UNDO:
link.set_strength (action.args[1])
else:
link.set_strength (action.args[2])
self.undo.unblock ()
self.invalidate ()
def connect_link (self, link):
link.connect ("select_link", self.select_link)
link.connect ("update_view", self.update_view)
def create_link (self, thought, thought_coords, child, child_coords = None, strength = 2):
for x in self.links:
if x.connects (thought, child):
if x.change_strength (thought, child):
self.delete_link (x)
return
link = Link (self.save, parent = thought, child = child, strength = strength)
self.connect_link (link)
element = link.get_save_element ()
self.element.appendChild (element)
self.links.append (link)
return link
def set_mouse_cursor_cb (self, thought, cursor_type):
if not self.moving:
self.set_cursor (cursor_type)
def update_all_links(self):
map(lambda l : l.find_ends(), self.links)
def update_links_cb (self, thought):
for x in self.links:
if x.uses (thought):
x.find_ends ()
def update_view (self, thought):
self.invalidate ()
def invalidate (self, transformed_area = None):
'''Helper function to invalidate the entire screen, forcing a redraw'''
rect = None
if not transformed_area:
alloc = self.get_allocation ()
rect = Gdk.Rectangle()
rect.x = 0
rect.y = 0
rect.width = alloc.width
rect.height = alloc.height
else:
ul = self.untransform_coords(transformed_area[0], transformed_area[1])
lr = self.untransform_coords(transformed_area[2], transformed_area[3])
rect = Gdk.Rectangle (int(ul[0]), int(ul[1]), int(lr[0]-ul[0]), int(lr[1]-ul[1]))
if self.window:
self.window.invalidate_rect (rect, True)
def draw (self, widget, context):
'''Draw the map and all the associated thoughts'''
##area = event.area
alloc = self.get_allocation()
area = Gdk.Rectangle()
area.x = 0
area.y = 0
if type(alloc) == Gdk.Rectangle:
area.width = alloc.width
area.height = alloc.height
else:
area.width = alloc.get_width()
area.height = alloc.get_height()
context.rectangle (area.x, area.y, area.width, area.height)
context.clip ()
context.set_source_rgb (1.0,1.0,1.0)
context.move_to (area.x, area.y)
context.paint ()
context.set_source_rgb (0.0,0.0,0.0)
alloc = self.get_allocation ()
context.translate(alloc.width/2., alloc.height/2.)
context.scale(self.scale_fac, self.scale_fac)
context.translate(-alloc.width/2., -alloc.height/2.)
context.translate(self.translation[0], self.translation[1])
for l in self.links:
l.draw (context)
self.untransform = context.get_matrix()
self.transform = context.get_matrix()
self.transform.invert()
ax, ay = self.transform_coords(area.x, area.y)
width = area.width / self.scale_fac
height = area.height / self.scale_fac
for t in self.thoughts:
try:
if t.lr[0] >= ax and t.ul[0] <= ax + width and t.lr[1] >= ay and t.ul[1] <= ay + height:
t.draw (context)
except:
t.draw(context)
if self.is_bbox_selecting:
xs = self.bbox_origin[0]
ys = self.bbox_origin[1]
xe = self.bbox_current[0] - xs
ye = self.bbox_current[1] - ys
xs,ys = context.user_to_device(xs, ys)
xe,ye = context.user_to_device_distance(xe, ye)
xs = int(xs) + 0.5
ys = int(ys) + 0.5
xe = int(xe)
ye = int(ye)
xs,ys = context.device_to_user(xs, ys)
xe,ye = context.device_to_user_distance(xe, ye)
color = utils.selected_colors["border"]
context.set_line_width(2.0)
context.set_source_rgb(color[0], color[1], color[2])
context.rectangle(xs, ys, xe, ye)
context.stroke()
#color = utils.selected_colors["fill"]
#context.set_source_rgba(color[0], color[1], color[2], 0.3)
#context.rectangle(xs, ys, xe, ye)
#context.fill()
#context.set_line_width(2.0)
#context.set_source_rgba(0.0, 0.0, 0.0, 1.0)
return False
def undo_create_cb (self, action, mode):
self.undo.block ()
if mode == UndoManager.UNDO:
self.unselect_all ()
for t in action.args[1]:
self.select_thought (t, -1)
self.delete_thought (action.args[0])
self.emit ("change_mode", action.args[3])
else:
self.emit ("change_mode", action.args[2])
thought = action.args[0]
self.thoughts.append (thought)
for t in action.args[1]:
self.unselect_all ()
self.select_thought (t, -1)
self.hookup_im_context (thought)
self.emit ("change_buffer", thought.extended_buffer)
self.element.appendChild (thought.element)
for l in action.args[5:]:
self.links.append (l)
self.element.appendChild (l.element)
self.emit ("set_focus", None, False)
self.undo.unblock ()
self.invalidate ()
def create_new_thought (self, coords, type = None, loading = False):
self.set_focus(None, None)
if not type:
type = self.mode
if type == MODE_TEXT:
# fixed<-_vbox<-_sw<-_main_area
thought = TextThought.TextThought (coords, self.pango_context, self.nthoughts, self.save, self.undo, loading, self.background_color, self.foreground_color, fixed=self.get_parent().get_parent().get_parent().get_parent(), parent=self)
elif type == MODE_LABEL:
thought = LabelThought.LabelThought (coords, self.pango_context, self.nthoughts, self.save, self.undo, loading, self.background_color, self.foreground_color)
elif type == MODE_IMAGE:
thought = ImageThought.ImageThought (coords, self.pango_context, self.nthoughts, self.save, self.undo, loading, self.background_color, self.foreground_color)
elif type == MODE_DRAW:
thought = DrawingThought.DrawingThought (coords, self.pango_context, self.nthoughts, self.save, self.undo, \
loading,self.background_color, self.foreground_color)
elif type == MODE_RESOURCE:
thought = ResourceThought.ResourceThought (coords, self.pango_context, self.nthoughts, self.save, self.undo, loading, self.background_color, self.foreground_color)
if not thought.okay ():
return None
if type == MODE_IMAGE:
self.emit ("change_mode", self.old_mode)
self.nthoughts += 1
element = thought.element
self.element.appendChild (thought.element)
thought.connect ("select_thought", self.select_thought)
thought.connect ("create_link", self.create_link)
thought.connect ("update_view", self.update_view)
thought.connect ("text_selection_changed", self.text_selection_cb)
thought.connect ("change_mouse_cursor", self.set_mouse_cursor_cb)
thought.connect ("update_links", self.update_links_cb)
thought.connect ("grab_focus", self.regain_focus_cb)
thought.connect ("update-attrs", self.update_attr_cb)
self.thoughts.append (thought)
return thought
def regain_focus_cb (self, thought, ext):
self.emit ("set_focus", None, ext)
def update_attr_cb (self, widget, bold, italics, underline, pango_font):
self.emit ("set_attrs", bold, italics, underline, pango_font)
def delete_thought (self, thought, undo = True):
if undo:
action = UndoManager.UndoAction (self, UNDO_DELETE_SINGLE, self.undo_deletion, [thought])
else:
action = None
if hasattr(thought, 'textview'):
thought.remove_textview()
if thought.element in self.element.childNodes:
self.element.removeChild (thought.element)
self.thoughts.remove (thought)
try:
self.selected.remove (thought)
except:
pass
if self.focus == thought:
self.hookup_im_context ()
self.focus = None
if self.primary == thought:
thought.disconnect (self.title_change_handler)
self.title_change_handler = None
self.primary = None
if self.thoughts:
self.make_primary (self.thoughts[0])
rem_links = []
for l in self.links:
if l.uses (thought):
if action: action.add_arg (l)
rem_links.append (l)
for l in rem_links:
self.delete_link (l)
for i, obj in enumerate(self.current_root):
if obj == thought:
del self.current_root[i]
break
if action:
self.undo.add_undo (action)
return True
def undo_deletion (self, action, mode):
self.undo.block ()
if mode == UndoManager.UNDO:
self.unselect_all ()
for l in action.args[1:]:
self.links.append (l)
self.element.appendChild (l.element)
for t in action.args[0]:
self.thoughts.append (t)
self.select_thought (t, -1)
self.element.appendChild (t.element)
if t.am_primary and not self.primary:
self.emit ("change_buffer", action.args[0][0].extended_buffer)
self.make_primary(t)
else:
for t in action.args[0]:
self.delete_thought (t, False)
for l in action.args[1:]:
self.delete_link (l)
self.emit ("set_focus", None, False)
self.undo.unblock ()
self.invalidate ()
def delete_selected_elements (self):
if len(self.selected) == 0:
return
action = UndoManager.UndoAction (self, UNDO_DELETE, self.undo_deletion, copy.copy(self.selected))
try:
# delete_thought as a callback adds it's own undo action. Block that here
self.undo.block ()
tmp = self.selected
t = tmp.pop()
while t:
if t in self.thoughts:
for l in self.links:
if l.uses (t):
action.add_arg (l)
self.delete_thought (t)
if t in self.links:
self.delete_link (t)
if len (tmp) == 0:
t = None
else:
t = tmp.pop()
finally:
self.undo.unblock ()
self.undo.add_undo (action)
self.invalidate ()
def delete_link (self, link):
if link.element in self.element.childNodes:
self.element.removeChild (link.element)
#link.element.unlink ()
try:
self.links.remove (link)
except:
pass
def find_related_thought (self, radians):
# Find thought within angle
best = None
bestangle = 1000.
bestdist = 10000.
def do_find (one, two, currentangle, curdist, sensitivity):
init_x = (one.ul[0] + one.lr[0]) / 2.
init_y = (one.ul[1] + one.lr[1]) / 2.
other_x = (two.ul[0] + two.lr[0]) / 2.
other_y = (two.ul[1] + two.lr[1]) / 2.
angle = math.atan2 ((other_y - init_y), (other_x - init_x))
while angle > math.pi:
angle -= math.pi
while angle < -math.pi:
angle += math.pi
# We have to special-case left due to stupidity of tan's
# We shift it by pi radians
if radians == RAD_LEFT:
relangle = abs((angle+math.pi) - (radians+math.pi))
if relangle > math.pi*2.:
relangle -= math.pi*2.
else:
relangle = abs(angle - radians)
newdist = math.sqrt ((init_x - other_x)**2 + (init_y - other_y)**2)
magicnum = newdist + (50. * relangle)
# Used for debugging. Spits out lots of useful info
# to determine interesting things about the thought relations
#print "angle: "+str(angle)+" rel: "+str(magicnum)+" rads: "+str(radians),
#print " , "+str(math.pi / 3.0)+" , "+str(currentangle)+"\n: "+str(relangle)
if (relangle < sensitivity) and \
(magicnum < currentangle):
return (magicnum, newdist)
return (currentangle, curdist)
if len(self.selected) != 1:
return None
initial = self.selected[0]
for x in self.links:
if x.parent == initial:
other = x.child
elif x.child == initial:
other = x.parent
else:
continue
(curr, dist) = do_find (initial, other, bestangle, bestdist, math.pi/3.)
if curr < bestangle:
bestangle = curr
best = other
bestdist = dist
if not best:
for x in self.thoughts:
if x == self.selected[0]:
continue
(curr, dist) = do_find (initial, x, bestangle, bestdist, math.pi/4.)
if curr < bestangle:
best = x
bestangle = curr
bestdist = dist
return best
def undo_align(self, action, mode):
self.undo.block ()
dic = action.args[0]
if mode == UndoManager.UNDO:
for t in dic:
t.move_by(-dic[t][0], -dic[t][1])
else:
for t in dic:
t.move_by(dic[t][0], dic[t][1])
self.undo.unblock ()
def align_top_left(self, vertical=True):
dic = {}
if len(self.selected) != 0:
x = self.selected[0].ul[0]
y = self.selected[0].ul[1]
for t in self.selected:
if vertical:
vec = (-(t.ul[0]-x), 0)
else:
vec = (0, -(t.ul[1]-y))
t.move_by(vec[0], vec[1])
dic[t] = vec
self.undo.add_undo (UndoManager.UndoAction (self, UNDO_ALIGN, self.undo_align, dic))
def align_bottom_right(self, vertical=True):
dic = {}
if len(self.selected) != 0:
x = self.selected[0].lr[0]
y = self.selected[0].lr[1]
for t in self.selected:
if vertical:
vec = (-(t.lr[0]-x), 0)
else:
vec = (0, -(t.lr[1]-y))
t.move_by(vec[0], vec[1])
dic[t] = vec
self.undo.add_undo (UndoManager.UndoAction (self, UNDO_ALIGN, self.undo_align, dic))
def align_centered(self, vertical=True):
dic = {}
if len(self.selected) != 0:
x = self.selected[0].ul[0] + (self.selected[0].lr[0] - self.selected[0].ul[0]) / 2.0
y = self.selected[0].ul[1] + (self.selected[0].lr[1] - self.selected[0].ul[1]) / 2.0
for t in self.selected:
if vertical:
vec = (-((t.ul[0] + (t.lr[0]-t.ul[0])/2.0)-x), 0)
else:
vec = (0, -((t.ul[1] + (t.lr[1]-t.ul[1])/2.0)-y))
t.move_by(vec[0], vec[1])
dic[t] = vec
self.undo.add_undo (UndoManager.UndoAction (self, UNDO_ALIGN, self.undo_align, dic))
def global_key_handler (self, event):
## FIXME
"""thought = None
if event.keyval == gtk.keysyms.Up:
thought = self.find_related_thought (RAD_UP)
elif event.keyval == gtk.keysyms.Down:
thought = self.find_related_thought (RAD_DOWN)
elif event.keyval == gtk.keysyms.Left:
thought = self.find_related_thought (RAD_LEFT)
elif event.keyval == gtk.keysyms.Right:
thought = self.find_related_thought (RAD_RIGHT)
elif event.keyval == gtk.keysyms.Delete:
self.delete_selected_elements ()
elif event.keyval == gtk.keysyms.BackSpace:
self.delete_selected_elements ()
elif event.keyval == gtk.keysyms.Return:
if self.focus:
self.focus.enter()
elif event.keyval == gtk.keysyms.Escape:
if self.focus and self.focus.creating:
self.undo.forget_action()
self.unselect_all ()
self.focus = None
elif event.keyval == gtk.keysyms.a and event.state & Gdk.ModifierType.CONTROL_MASK:
self.unselect_all ()
for t in self.thoughts:
t.select ()
self.selected.append (t)
else:
return False
"""
return False
##if thought:
## self.set_focus(thought, None)
##self.invalidate ()
##return True
def load_thought (self, node, type, tar):
thought = self.create_new_thought (None, type, loading = True)
thought.creating = False
thought.load (node, tar)
def load_link (self, node):
link = Link (self.save)
self.connect_link (link)
link.load (node)
self.links.append (link)
element = link.get_save_element ()
self.element.appendChild (element)
def load_thyself (self, top_element, doc, tar):
for node in top_element.childNodes:
if node.nodeName == "thought":
self.load_thought (node, MODE_TEXT, tar)
elif node.nodeName == "label_thought":
self.load_thought (node, MODE_LABEL, tar)
elif node.nodeName == "image_thought":
self.load_thought (node, MODE_IMAGE, tar)
elif node.nodeName == "drawing_thought":
self.load_thought (node, MODE_DRAW, tar)
elif node.nodeName == "res_thought":
self.load_thought (node, MODE_RESOURCE, tar)
elif node.nodeName == "link":
self.load_link (node)
else:
print "Warning: Unknown element type. Ignoring: "+node.nodeName
self.finish_loading ()
def finish_loading (self):
# Possible TODO: This all assumes we've been given a proper,
# consistant file. It should fallback nicely, but...
# First, find the primary root:
for t in self.thoughts:
if t.am_primary:
self.make_primary (t)
if t.am_selected:
self.selected.append (t)
t.select ()
if t.identity >= self.nthoughts:
self.nthoughts = t.identity + 1
if self.selected:
self.current_root = self.selected
else:
self.current_root = [self.primary]
if len(self.selected) == 1:
self.emit ("change_buffer", self.selected[0].extended_buffer)
self.hookup_im_context (self.selected[0])
self.emit ("thought_selection_changed", self.selected[0].background_color, \
self.selected[0].foreground_color)
else:
self.emit ("change_buffer", None)
del_links = []
for l in self.links:
if (l.parent_number == -1 and l.child_number == -1) or \
(l.parent_number == l.child_number):
del_links.append (l)
continue
parent = child = None
for t in self.thoughts:
if t.identity == l.parent_number:
parent = t
elif t.identity == l.child_number:
child = t
if parent and child:
break
l.set_parent_child (parent, child)
if not l.parent or not l.child:
del_links.append (l)
for l in del_links:
self.delete_link (l)
def update_save(self):
for t in self.thoughts:
t.update_save ()
for l in self.links:
l.update_save ()
def save_thyself(self, tar):
for t in self.thoughts:
t.save(tar)
def text_selection_cb (self, thought, start, end, text):
self.emit ("text_selection_changed", start, end, text)
def copy_clipboard (self, clip):
if len (self.selected) != 1:
return
self.selected[0].copy_text (clip)
def cut_clipboard (self, clip):
if len (self.selected) != 1:
return
self.selected[0].cut_text (clip)
def paste_clipboard (self, clip):
if len (self.selected) != 1:
return
self.selected[0].paste_text (clip)
def export (self, context, width, height, native):
context.rectangle (0, 0, width, height)
context.clip ()
context.set_source_rgb (1.0,1.0,1.0)
context.move_to (0,0)
context.paint ()
context.set_source_rgb (0.0,0.0,0.0)
if not native:
move_x = self.move_x
move_y = self.move_y
else:
move_x = 0
move_y = 0
for l in self.links:
l.export (context, move_x, move_y)
for t in self.thoughts:
t.export (context, move_x, move_y)
def get_max_area (self):
minx = 999
maxx = -999
miny = 999
maxy = -999
for t in self.thoughts:
mx,my,mmx,mmy = t.get_max_area ()
if mx < minx:
minx = mx
if my < miny:
miny = my
if mmx > maxx:
maxx = mmx
if mmy > maxy:
maxy = mmy
# Add a 10px border around all
self.move_x = 10-minx
self.move_y = 10-miny
maxx = maxx-minx+20
maxy = maxy-miny+20
return (maxx,maxy)
def get_selection_bounds (self):
if len (self.selected) == 1:
try:
return self.selected[0].index, self.selected[0].end_index
except AttributeError:
return None, None
else:
return None, None
def thoughts_are_linked (self):
if len (self.selected) != 2:
return False
for l in self.links:
if l.connects (self.selected[0], self.selected[1]):
return True
return False
def drag_menu_cb(self, sw, mode):
if len(self.selected) == 1:
if hasattr(self.selected[0], 'textview'):
self.selected[0].remove_textview()
if mode == True:
self.sw = sw
self.drag_mode = True
else:
self.drag_mode = False
def is_dragging(self):
return self.drag_mode
def _adjust_sw(self, dx, dy):
if self.sw is None:
return
if not self.drag_mode:
return
hadj = self.sw.get_hadjustment()
hvalue = hadj.get_value() + dx
try:
if hvalue < hadj.get_lower():
hvalue = hadj.get_lower()
elif hvalue > hadj.get_upper():
hvalue = hadj.get_upper()
except AttributeError:
pass
hadj.set_value(hvalue)
self.sw.set_hadjustment(hadj)
vadj = self.sw.get_vadjustment()
vvalue = vadj.get_value() + dy
try:
if vvalue < vadj.get_lower():
vvalue = vadj.get_lower()
elif vvalue > vadj.get_upper():
vvalue = vadj.get_upper()
except AttributeError:
pass
self.sw.set_vadjustment(vadj)
def stop_moving(self):
self.moving = False
self.move_mode = False
self.move_origin = None
def start_moving(self, move_button):
if len(self.selected) == 1:
if hasattr(self.selected[0], 'textview'):
self.selected[0].remove_textview()
self.move_mode = True
self.move_button = move_button
def link_menu_cb (self):
if len (self.selected) != 2:
return
if not self.selected[0].can_be_parent() or \
not self.selected[1].can_be_parent():
return
lnk = None
for l in self.links:
if l.connects (self.selected[0], self.selected[1]):
lnk = l
break
if lnk:
self.undo.add_undo (UndoManager.UndoAction (self, UNDO_DELETE_LINK, self.undo_link_action, lnk))
self.delete_link (lnk)
else:
lnk = self.create_link (self.selected[0], None, self.selected[1])
self.undo.add_undo (UndoManager.UndoAction (self, UNDO_CREATE_LINK, self.undo_link_action, lnk))
self.invalidate ()
def set_bold (self, active):
if len(self.selected) != 1:
return
self.selected[0].set_bold (active)
self.invalidate()
def set_italics (self, active):
if len(self.selected) != 1:
return
self.selected[0].set_italics (active)
self.invalidate()
def set_underline (self, active):
if len(self.selected) != 1:
return
self.selected[0].set_underline (active)
self.invalidate()
def set_background_color(self, color):
for s in self.selected:
s.background_color = color
self.background_color = color
if len(self.selected) > 1:
self.invalidate()
def set_foreground_color(self, color):
for s in self.selected:
s.foreground_color = color
self.foreground_color = color
if len(self.selected) > 1:
self.invalidate()
def set_font(self, font_name, font_size):
if len (self.selected) == 1 and hasattr(self.selected[0], "set_font"):
self.selected[0].set_font(font_name, font_size)
self.font_name = font_name
self.font_size = font_size
self.invalidate()
def embody_thought(self, event):
coords = self.transform_coords (event.get_coords()[0], event.get_coords()[1])
thought = self.create_new_thought(coords)
sel = self.selected
if not thought:
return True
if not self.primary and \
thought.can_be_parent():
self.make_primary (thought)
self.select_thought (thought, None)
else:
self.emit ("change_buffer", thought.extended_buffer)
self.hookup_im_context (thought)
# Creating links adds an undo action. Block it here
self.undo.block ()
if not self.current_root:
self.current_root.append(self.primary)
if thought.can_be_parent():
for x in self.current_root:
if x.can_be_parent():
self.create_link (x, None, thought)
for x in self.selected:
x.unselect ()
self.selected = [thought]
thought.select ()
self.undo.unblock ()
thought.foreground_color = self.foreground_color
thought.background_color = self.background_color
act = UndoManager.UndoAction (self, UNDO_CREATE, self.undo_create_cb, thought, sel, \
self.mode, self.old_mode, event.get_coords())
for l in self.links:
if l.uses (thought):
act.add_arg (l)
"""
if self.undo.peak ().undo_type == UNDO_DELETE_SINGLE:
last_action = self.undo.pop ()
action = UndoManager.UndoAction (self, UNDO_COMBINE_DELETE_NEW, self.undo_joint_cb, \
last_action, act)
self.undo.add_undo (action)
else:
"""
self.undo.add_undo (act)
thought.enter()
thought.includes(coords)
event.button = 1
thought.process_button_down(event, coords)
self.focus = thought
class CursorFactory:
__shared_state = {"cursors": {}}
def __init__(self):
self.__dict__ = self.__shared_state
def get_cursor(self, cur_type):
if not self.cursors.has_key(cur_type):
cur = Gdk.Cursor(cur_type)
self.cursors[cur_type] = cur
return self.cursors[cur_type]
|
cristian99garcia/laybrinth-activity
|
src/MMapArea.py
|
Python
|
gpl-2.0
| 53,597
|
[
"FLEUR"
] |
447b283eb187133f6a5b5eb21e3402c8561ad210be5cce4c376c734d17f76356
|
#!/usr/bin/env python
#
# Electrum - Lightweight Bitcoin Client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from functools import partial
from threading import Thread
import re
from decimal import Decimal
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from electrum_dgb_gui.qt.util import *
from electrum_dgb_gui.qt.qrcodewidget import QRCodeWidget
from electrum_dgb_gui.qt.amountedit import AmountEdit
from electrum_dgb_gui.qt.main_window import StatusBarButton
from electrum_dgb.i18n import _
from electrum_dgb.plugins import hook
from trustedcoin import TrustedCoinPlugin, server
class Plugin(TrustedCoinPlugin):
@hook
def on_new_window(self, window):
wallet = window.wallet
if not isinstance(wallet, self.wallet_class):
return
if wallet.can_sign_without_server():
msg = ' '.join([
_('This wallet is was restored from seed, and it contains two master private keys.'),
_('Therefore, two-factor authentication is disabled.')
])
action = lambda: window.show_message(msg)
else:
action = partial(self.settings_dialog, window)
button = StatusBarButton(QIcon(":icons/trustedcoin-status.png"),
_("TrustedCoin"), action)
window.statusBar().addPermanentWidget(button)
t = Thread(target=self.request_billing_info, args=(wallet,))
t.setDaemon(True)
t.start()
def auth_dialog(self, window):
d = WindowModalDialog(window, _("Authorization"))
vbox = QVBoxLayout(d)
pw = AmountEdit(None, is_int = True)
msg = _('Please enter your Google Authenticator code')
vbox.addWidget(QLabel(msg))
grid = QGridLayout()
grid.setSpacing(8)
grid.addWidget(QLabel(_('Code')), 1, 0)
grid.addWidget(pw, 1, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
return pw.get_amount()
@hook
def sign_tx(self, window, tx):
wallet = window.wallet
if not isinstance(wallet, self.wallet_class):
return
if not wallet.can_sign_without_server():
self.print_error("twofactor:sign_tx")
auth_code = None
if wallet.keystores['x3/'].get_tx_derivations(tx):
auth_code = self.auth_dialog(window)
else:
self.print_error("twofactor: xpub3 not needed")
window.wallet.auth_code = auth_code
def waiting_dialog(self, window, on_finished=None):
task = partial(self.request_billing_info, window.wallet)
return WaitingDialog(window, 'Getting billing information...', task,
on_finished)
@hook
def abort_send(self, window):
wallet = window.wallet
if not isinstance(wallet, self.wallet_class):
return
if not wallet.can_sign_without_server():
if wallet.billing_info is None:
# request billing info before forming the transaction
waiting_dialog(self, window).wait()
if wallet.billing_info is None:
window.show_message('Could not contact server')
return True
return False
def settings_dialog(self, window):
self.waiting_dialog(window, partial(self.show_settings_dialog, window))
def show_settings_dialog(self, window, success):
if not success:
window.show_message(_('Server not reachable.'))
return
wallet = window.wallet
d = WindowModalDialog(window, _("TrustedCoin Information"))
d.setMinimumSize(500, 200)
vbox = QVBoxLayout(d)
hbox = QHBoxLayout()
logo = QLabel()
logo.setPixmap(QPixmap(":icons/trustedcoin-status.png"))
msg = _('This wallet is protected by TrustedCoin\'s two-factor authentication.') + '<br/>'\
+ _("For more information, visit") + " <a href=\"https://api.trustedcoin.com/#/electrum-help\">https://api.trustedcoin.com/#/electrum-help</a>"
label = QLabel(msg)
label.setOpenExternalLinks(1)
hbox.addStretch(10)
hbox.addWidget(logo)
hbox.addStretch(10)
hbox.addWidget(label)
hbox.addStretch(10)
vbox.addLayout(hbox)
vbox.addStretch(10)
msg = _('TrustedCoin charges a fee per co-signed transaction. You may pay on each transaction (an extra output will be added to your transaction), or you may purchase prepaid transaction using this dialog.') + '<br/>'
label = QLabel(msg)
label.setWordWrap(1)
vbox.addWidget(label)
vbox.addStretch(10)
grid = QGridLayout()
vbox.addLayout(grid)
price_per_tx = wallet.price_per_tx
v = price_per_tx.get(1)
grid.addWidget(QLabel(_("Price per transaction (not prepaid):")), 0, 0)
grid.addWidget(QLabel(window.format_amount(v) + ' ' + window.base_unit()), 0, 1)
i = 1
for k, v in sorted(price_per_tx.items()):
if k == 1:
continue
grid.addWidget(QLabel("Price for %d prepaid transactions:"%k), i, 0)
grid.addWidget(QLabel("%d x "%k + window.format_amount(v/k) + ' ' + window.base_unit()), i, 1)
b = QPushButton(_("Buy"))
b.clicked.connect(lambda b, k=k, v=v: self.on_buy(window, k, v, d))
grid.addWidget(b, i, 2)
i += 1
n = wallet.billing_info.get('tx_remaining', 0)
grid.addWidget(QLabel(_("Your wallet has %d prepaid transactions.")%n), i, 0)
# tranfer button
#def on_transfer():
# server.transfer_credit(self.user_id, recipient, otp, signature_callback)
# pass
#b = QPushButton(_("Transfer"))
#b.clicked.connect(on_transfer)
#grid.addWidget(b, 1, 2)
#grid.addWidget(QLabel(_("Next Billing Address:")), i, 0)
#grid.addWidget(QLabel(self.billing_info['billing_address']), i, 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def on_buy(self, window, k, v, d):
d.close()
if window.pluginsdialog:
window.pluginsdialog.close()
wallet = window.wallet
uri = "bitcoin:" + wallet.billing_info['billing_address'] + "?message=TrustedCoin %d Prepaid Transactions&amount="%k + str(Decimal(v)/100000000)
wallet.is_billing = True
window.pay_to_URI(uri)
window.payto_e.setFrozen(True)
window.message_e.setFrozen(True)
window.amount_e.setFrozen(True)
def accept_terms_of_use(self, window):
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Terms of Service")))
tos_e = QTextEdit()
tos_e.setReadOnly(True)
vbox.addWidget(tos_e)
vbox.addWidget(QLabel(_("Please enter your e-mail address")))
email_e = QLineEdit()
vbox.addWidget(email_e)
next_button = window.next_button
prior_button_text = next_button.text()
next_button.setText(_('Accept'))
def request_TOS():
tos = server.get_terms_of_service()
self.TOS = tos
window.emit(SIGNAL('twofactor:TOS'))
def on_result():
tos_e.setText(self.TOS)
def set_enabled():
next_button.setEnabled(re.match(regexp,email_e.text()) is not None)
window.connect(window, SIGNAL('twofactor:TOS'), on_result)
t = Thread(target=request_TOS)
t.setDaemon(True)
t.start()
regexp = r"[^@]+@[^@]+\.[^@]+"
email_e.textChanged.connect(set_enabled)
email_e.setFocus(True)
window.set_main_layout(vbox, next_enabled=False)
next_button.setText(prior_button_text)
return str(email_e.text())
def request_otp_dialog(self, window, _id, otp_secret):
vbox = QVBoxLayout()
if otp_secret is not None:
uri = "otpauth://totp/%s?secret=%s"%('trustedcoin.com', otp_secret)
l = QLabel("Please scan the following QR code in Google Authenticator. You may as well use the following key: %s"%otp_secret)
l.setWordWrap(True)
vbox.addWidget(l)
qrw = QRCodeWidget(uri)
vbox.addWidget(qrw, 1)
msg = _('Then, enter your Google Authenticator code:')
else:
label = QLabel(
"This wallet is already registered with Trustedcoin. "
"To finalize wallet creation, please enter your Google Authenticator Code. "
)
label.setWordWrap(1)
vbox.addWidget(label)
msg = _('Google Authenticator code:')
hbox = QHBoxLayout()
hbox.addWidget(WWLabel(msg))
pw = AmountEdit(None, is_int = True)
pw.setFocus(True)
pw.setMaximumWidth(50)
hbox.addWidget(pw)
vbox.addLayout(hbox)
cb_lost = QCheckBox(_("I have lost my Google Authenticator account"))
cb_lost.setToolTip(_("Check this box to request a new secret. You will need to retype your seed."))
vbox.addWidget(cb_lost)
cb_lost.setVisible(otp_secret is None)
def set_enabled():
b = True if cb_lost.isChecked() else len(pw.text()) == 6
window.next_button.setEnabled(b)
pw.textChanged.connect(set_enabled)
cb_lost.toggled.connect(set_enabled)
window.set_main_layout(vbox, next_enabled=False,
raise_on_cancel=False)
return pw.get_amount(), cb_lost.isChecked()
|
protonn/Electrum-Cash
|
plugins/trustedcoin/qt.py
|
Python
|
mit
| 10,704
|
[
"VisIt"
] |
e71e7e5a216f727e05dac6aaf1002332940f9bc34b11b36b312e890cef9230f7
|
"""
Connects to the Galaxy Eggs distribution site and downloads any eggs needed.
If eggs for your platform are unavailable, fetch_eggs.py will direct you to run
scramble.py.
"""
import os, sys, logging
from optparse import OptionParser
parser = OptionParser()
parser.add_option( '-c', '--config', dest='config', help='Path to Galaxy config file (config/galaxy.ini)', default='config/galaxy.ini' )
parser.add_option( '-e', '--egg-name', dest='egg_name', help='Egg name (as defined in eggs.ini) to fetch, or "all" for all eggs, even those not needed by your configuration' )
parser.add_option( '-p', '--platform', dest='platform', help='Fetch for a specific platform (by default, eggs are fetched for *this* platform' )
( options, args ) = parser.parse_args()
if not os.path.exists( options.config ):
print "Config file does not exist (see 'python %s --help'): %s" % ( sys.argv[0], options.config )
sys.exit( 1 )
root = logging.getLogger()
root.setLevel( 10 )
root.addHandler( logging.StreamHandler( sys.stdout ) )
lib = os.path.abspath( os.path.join( os.path.dirname( __file__ ), "..", "lib" ) )
sys.path.append( lib )
from galaxy.eggs import Crate, EggNotFetchable
import pkg_resources
if options.platform:
c = Crate( options.config, platform = options.platform )
else:
c = Crate( options.config )
try:
if not options.egg_name:
c.resolve() # Only fetch eggs required by the config
elif options.egg_name == 'all':
c.resolve( all=True ) # Fetch everything
else:
# Fetch a specific egg
name = options.egg_name
try:
egg = c[name]
except:
print "error: %s not in eggs.ini" % name
sys.exit( 1 )
dist = egg.resolve()[0]
print "%s %s is installed at %s" % ( dist.project_name, dist.version, dist.location )
except EggNotFetchable, e:
config_arg = ''
if options.config != 'config/galaxy.ini':
config_arg = '-c %s ' % options.config
try:
assert options.egg_name != 'all'
egg = e.eggs[0]
print "%s %s couldn't be downloaded automatically. You can try" % ( egg.name, egg.version )
print "building it by hand with:"
print " python scripts/scramble.py %s-e %s" % ( config_arg, egg.name )
except ( AssertionError, IndexError ):
print "One or more of the python eggs necessary to run Galaxy couldn't be"
print "downloaded automatically. You can try building them by hand (all"
print "at once) with:"
print " python scripts/scramble.py"
print "Or individually:"
for egg in e.eggs:
print " python scripts/scramble.py %s-e %s" % ( config_arg, egg.name )
sys.exit( 1 )
sys.exit( 0 )
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/scripts/fetch_eggs.py
|
Python
|
gpl-3.0
| 2,732
|
[
"Galaxy"
] |
c484f83b19d13e380d7db8cb7083bc86cadba47df5fdf7222c1c963b23341081
|
#!/usr/bin/env python
###############################################################################
# Copyright (C) 1994 - 2004, Performance Dynamics Company #
# #
# This software is licensed as described in the file COPYING, which #
# you should have received as part of this distribution. The terms #
# are also available at http://www.perfdynamics.com/Tools/copyright.html. #
# #
# You may opt to use, copy, modify, merge, publish, distribute and/or sell #
# copies of the Software, and permit persons to whom the Software is #
# furnished to do so, under the terms of the COPYING file. #
# #
# This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY #
# KIND, either express or implied. #
###############################################################################
#
# A Python PDQ model of the SimPy simulator example
# http://simpy.sourceforge.net/examples/Jackson%20network.htm
# which is a Jackson network comprising 3 queues and a set of branching
# probabilities for the workflow.
#
# Created by NJG on Thu Oct 28 10:16:59 PDT 2004
# Updated by NJG on Fri Oct 29 17:53:04 PDT 2004
import pdq
import sys
class JackNet:
""" Constructor for the queueing network to be solved """
def __init__(self, netname, debugFlag):
""" Globals should be contained :-) """
self.name = netname
self.debugging = debugFlag
self.arrivRate = 0.50
self.work = "Traffic"
self.router = ["Router1", "Router2", "Router3"]
self.servTime = [1.0, 2.0, 1.0]
self.routeP =[ # Table of routing probabilities
[0.0, 0.5, 0.5],
[0.0, 0.0, 0.8],
[0.2, 0.0, 0.0]
]
self.visitRatio = []
self.GetVisitRatios()
self.totalMsgs = 0
def GetVisitRatios(self):
""" Compute visit ratios from the traffic equations """
# Traffic equations
lambda0 = self.arrivRate
lambda3 = (1 + self.routeP[1][2]) * self.routeP[0][1] * lambda0 \
/ (1 - (1 + self.routeP[1][2]) * self.routeP[0][1] * self.routeP[2][0])
lambda1 = lambda0 + (0.2 * lambda3)
lambda2 = 0.5 * lambda1
# Visit ratios
self.visitRatio.append(lambda1 / lambda0)
self.visitRatio.append(lambda2 / lambda0)
self.visitRatio.append(lambda3 / lambda0)
if self.debugging:
dbstr0 = "GetVisitRatios(self)"
dbst1 = "Probs: %5.4f %5.4f %5.4f" % \
(self.routeP[0][1], self.routeP[1][2], self.routeP[2][0])
dbstr2 = "Lambdas: %5.4f %5.4f %5.4f %5.4f" % (lambda1, \
lambda2, lambda3, lambda0)
dbstr3 = "Visits: %5.4f %5.4f %5.4f" % (self.visitRatio[0], \
self.visitRatio[1], self.visitRatio[2])
ShowState ("%s\n%s\n%s\n%s" % (dbstr0, dbst1, dbstr2, dbstr3))
def ShowState ( *L ):
""" L is a list of strings. Displayed by sys.stderr as concatenated
elements of L and then stops execution
"""
sys.stderr.write ( "*** Trace state *** \n%s\n***\n" % "".join(L) )
sys.exit(1)
# PDQ modeling code starts here ...
jNet = JackNet("SimPy Jackson Network", 1) # create an instance
pdq.Init(jNet.name)
pdq.SetWUnit("Msgs")
pdq.SetTUnit("Time")
# Create PDQ context and workload for the network
streams = pdq.CreateOpen(jNet.work, jNet.arrivRate)
# Create PDQ queues
for i in range(len(jNet.router)):
nodes = pdq.CreateNode(jNet.router[i], pdq.CEN, pdq.FCFS)
pdq.SetVisits(jNet.router[i], jNet.work, jNet.visitRatio[i], \
jNet.servTime[i])
# Solve the model and report the peformance measures
pdq.Solve(pdq.CANON)
pdq.Report(); # generic PDQ format
# Collect particular PDQ statistics
print "---------- Selected PDQ Metrics for SimPy Comparison ---------"
for i in range(len(jNet.router)):
msgs = pdq.GetQueueLength(jNet.router[i], jNet.work, pdq.TRANS)
print "Mean queue%d: %7.4f (%s)" % (i+1, msgs, "Not in SimPy report")
jNet.totalMsgs += msgs
print "Mean number: %7.4f" % jNet.totalMsgs
print "Mean delay: %7.4f %11s" % (pdq.GetResponse(pdq.TRANS, jNet.work), \
"(Little's law Q = X R holds!)")
print "Mean stime: %7.4f %11s" % (0, "(Not in this version of PyDQ)")
print "Mean rate: %7.4f" % pdq.GetThruput(pdq.TRANS, jNet.work)
#print "Max rate: %5.4f" % pdq.GetThruMax(pdq.TRANS, work)
print "Max rate: %7.4f %11s" % (0, "(Not in this version of PyDQ)")
|
peterlharding/PDQ
|
examples/misc/jackpdq.py
|
Python
|
mit
| 4,760
|
[
"VisIt"
] |
80ddbc28c06937adef82e36145acf8359208604a50ca51d43c1105f474b4ad87
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Visit.survey_sent'
db.add_column(u'clinics_visit', 'survey_sent',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Visit.survey_sent'
db.delete_column(u'clinics_visit', 'survey_sent')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'clinics.clinic': {
'Meta': {'object_name': 'Clinic'},
'category': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'code': ('django.db.models.fields.PositiveIntegerField', [], {'unique': 'True'}),
'contact': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['rapidsms.Contact']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_renovated': ('django.db.models.fields.CharField', [], {'max_length': '4', 'blank': 'True'}),
'lga': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'lga_rank': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'location': ('django.contrib.gis.db.models.fields.PointField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'pbf_rank': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'town': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'ward': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'year_opened': ('django.db.models.fields.CharField', [], {'max_length': '4', 'blank': 'True'})
},
u'clinics.clinicstaff': {
'Meta': {'object_name': 'ClinicStaff'},
'clinic': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Clinic']"}),
'contact': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['rapidsms.Contact']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_manager': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'staff_type': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'year_started': ('django.db.models.fields.CharField', [], {'max_length': '4', 'blank': 'True'})
},
u'clinics.clinicstatistic': {
'Meta': {'unique_together': "[('clinic', 'statistic', 'month')]", 'object_name': 'ClinicStatistic'},
'clinic': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Clinic']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'float_value': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'int_value': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'month': ('django.db.models.fields.DateField', [], {}),
'rank': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'statistic': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['statistics.Statistic']"}),
'text_value': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'clinics.patient': {
'Meta': {'object_name': 'Patient'},
'clinic': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Clinic']", 'null': 'True', 'blank': 'True'}),
'contact': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['rapidsms.Contact']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mobile': ('django.db.models.fields.CharField', [], {'max_length': '11', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'serial': ('django.db.models.fields.PositiveIntegerField', [], {})
},
u'clinics.region': {
'Meta': {'unique_together': "(('external_id', 'type'),)", 'object_name': 'Region'},
'alternate_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'boundary': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {}),
'external_id': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'lga'", 'max_length': '16'})
},
u'clinics.service': {
'Meta': {'object_name': 'Service'},
'code': ('django.db.models.fields.PositiveIntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
},
u'clinics.visit': {
'Meta': {'object_name': 'Visit'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Patient']"}),
'service': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Service']", 'null': 'True', 'blank': 'True'}),
'staff': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.ClinicStaff']", 'null': 'True', 'blank': 'True'}),
'survey_sent': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'visit_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
u'clinics.visitregistrationerror': {
'Meta': {'object_name': 'VisitRegistrationError'},
'error_type': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sender': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
u'clinics.visitregistrationerrorlog': {
'Meta': {'object_name': 'VisitRegistrationErrorLog'},
'error_type': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '160'}),
'message_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'sender': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'rapidsms.contact': {
'Meta': {'object_name': 'Contact'},
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '6', 'blank': 'True'}),
'modified_on': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
u'statistics.statistic': {
'Meta': {'object_name': 'Statistic'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['statistics.StatisticGroup']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'statistic_type': ('django.db.models.fields.CharField', [], {'max_length': '32'})
},
u'statistics.statisticgroup': {
'Meta': {'object_name': 'StatisticGroup'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
}
}
complete_apps = ['clinics']
|
myvoice-nigeria/myvoice
|
myvoice/clinics/migrations/0017_auto__add_field_visit_survey_sent.py
|
Python
|
bsd-2-clause
| 13,210
|
[
"VisIt"
] |
46e236d9fac616f7808186ec093649151163b4e8c49491dbbede3d42d48768df
|
#!/usr/bin/env python3
"""
This script is used to create a release of biocode for submission to PyPi.
Version numbering: This project uses a sequence-based (micro) scheme for
assigning version numbers:
http://the-hitchhikers-guide-to-packaging.readthedocs.io/en/latest/specification.html#sequence-based-scheme
These numbers are like X.Y.Z where X is a major release (rare, involving
major restructuring of the code and binary incompatibility with previous
releases). Y are the standard 'new features added' updates, and Z are
bugfix/minor releases off any given Y version.
Run it from within a GitHub checkout of Biocode, and it will:
- Create the output directory if necessary (should be called 'biocode')
- Creates the biocode/biocode/__init__.py file (empty)
- Creates and populates the biocode/setup.py file
- All files ending in .py under 'lib/' are assumed to be packages
- Copies all lib/biocode/*.py to biocode/biocode/
- Copies all .py scripts to biocode/bin/
- Copy all data/* to biocode/data/
"""
import argparse
import json
import os
import re
import shutil
def main():
parser = argparse.ArgumentParser( description='Creates a directory structure of biocode for PyPi (or install)')
## output file to be written
parser.add_argument('-o', '--output_directory', type=str, required=True, help='This is the directory where the output will be written' )
parser.add_argument('-v', '--version', type=str, required=True, help='Version number of the release to be made' )
args = parser.parse_args()
# make sure it ends with lower-case biocode
if not args.output_directory.endswith('biocode'):
raise Exception("The last element of the path for --output_directory needs to be lower-case 'biocode'")
if not os.path.exists(args.output_directory):
os.mkdir(args.output_directory)
# create the subdirectory of the same name required for PyPi submission
lib_path = "{0}/biocode".format(args.output_directory)
os.mkdir(lib_path)
# create the biocode/biocode/__init__.py file
init_fh = open("{0}/__init__.py".format(lib_path), 'wt')
init_fh.close()
# place the README
shutil.copy('README.rst', "{0}/".format(args.output_directory))
# get packages under lib/*.py and copy to release directory
package_names = get_biocode_package_names('lib/biocode/')
place_package_files(args.output_directory, package_names)
script_paths = get_biocode_script_paths('.')
bin_dir = "{0}/bin".format(args.output_directory)
os.mkdir(bin_dir)
place_script_files(bin_dir, script_paths)
placed_script_paths = get_placed_script_paths(bin_dir)
# make a JSON file to use for the script index
data_dir = "{0}/data".format(lib_path)
os.mkdir(data_dir)
create_script_index_json('.', data_dir)
# place the data files
datafile_paths = get_biocode_datafile_paths('.')
place_data_files(data_dir, datafile_paths)
with open(os.path.join(data_dir, '__init__.py'), 'w'):
pass
# creates the biocode/setup.py file
setup_fh = open("{0}/setup.py".format(args.output_directory), 'wt')
populate_setup_file(setup_fh, args.version, placed_script_paths)
setup_fh.close()
# create the manifest
manifest_fh = open("{0}/MANIFEST.in".format(args.output_directory), 'wt')
manifest_fh.write("include README.rst\n")
# perhaps do this instead of graft? recursive-include biocode/data *.template
manifest_fh.write("graft biocode/data\n")
# include the license
shutil.copy('LICENSE', "{0}/".format(args.output_directory))
manifest_fh.write("include LICENSE\n")
manifest_fh.close()
print("Steps to tag (on GitHub) and release (on PyPi) version {0}:".format(args.version))
print("\nNext do:\n\t$ cd {0} && python3 setup.py sdist && twine upload dist/*".format(args.output_directory))
print("Then: \n\t$ cd - && git tag v{0} && git push --tags".format(args.version))
def create_script_index_json(script_base, data_dir):
d = get_biocode_script_dict(script_base)
json_data = dict()
for category in sorted(d):
json_data[category] = list()
for script in sorted(d[category]):
desc = None
for line in open("{0}/{1}".format(category, script)):
m = re.match('.+ArgumentParser.+description=\'(.+?)\'', line)
if m:
desc = m.group(1)
json_data[category].append({'name': script, 'desc': desc})
# Don't change the path of this JSON file without also changing it in the general/list_biocode utility
with open("{0}/biocode_script_index.json".format(data_dir), 'w') as f:
f.write(json.dumps(json_data))
def get_biocode_datafile_paths(base):
datafiles = list()
for directory in os.listdir(base):
if directory not in ['data']: continue
dir_path = "{0}/{1}".format(base, directory)
if os.path.isdir(dir_path):
for thing in os.listdir(dir_path):
if thing == '__init__.py': continue
thing_path = "{0}/{1}".format(dir_path, thing)
if os.path.isfile(thing_path):
datafiles.append(thing_path)
if len(datafiles) == 0:
raise Exception("Error: failed to find any data files under {0}/data".format(base))
return datafiles
def get_biocode_package_names(base):
packages = list()
for thing in os.listdir(base):
if thing.endswith('.py'):
thing = thing.rstrip('.py')
packages.append(thing)
if len(packages) == 0:
raise Exception("Error: failed to find any .py files under lib/")
return packages
def get_biocode_script_paths(base):
scripts = list()
for directory in os.listdir(base):
if directory in ['build', '.git', 'sandbox', 'tests']: continue
dir_path = "{0}/{1}".format(base, directory)
if os.path.isdir(dir_path):
for thing in os.listdir(dir_path):
if thing == '__init__.py': continue
if thing.endswith('.py'):
scripts.append("{0}/{1}".format(dir_path, thing))
if len(scripts) == 0:
raise Exception("Error: failed to find any .py files under */")
return scripts
def get_biocode_script_dict(base):
"""
Returns a dict where each key is a script category 'fasta', 'blast', etc.
and the values are the names of the scripts within that directory
"""
d = dict()
for directory in os.listdir(base):
if directory in ['build', '.git', 'sandbox', 'tests']: continue
dir_path = "{0}/{1}".format(base, directory)
if os.path.isdir(dir_path):
d[directory] = list()
for thing in os.listdir(dir_path):
if thing == '__init__.py': continue
if thing.endswith('.py'):
d[directory].append(thing)
return d
def get_placed_datafile_paths(base):
paths = list()
for thing in os.listdir(base):
if os.path.isfile("{0}/{1}".format(base, thing)):
paths.append("data/{0}".format(thing))
if len(paths) == 0:
raise Exception("Failed to find any data files in base: {0}".format(base))
return paths
def get_placed_script_paths(base):
paths = list()
for thing in os.listdir(base):
if thing.endswith('.py'):
paths.append("bin/{0}".format(thing))
if len(paths) == 0:
raise Exception("Failed to find any py scripts in base: {0}".format(base))
return paths
def place_data_files(dest_dir, paths):
for path in paths:
shutil.copy(path, dest_dir)
def place_package_files(base, names):
for name in names:
shutil.copyfile("lib/biocode/{0}.py".format(name), "{0}/biocode/{1}.py".format(base, name))
def place_script_files(dest_dir, script_paths):
for path in script_paths:
shutil.copy(path, dest_dir)
def populate_setup_file(fh, version, script_paths):
settings = {
'author': 'Joshua Orvis',
'email': 'jorvis@gmail.com',
'version': version,
'packages': ['biocode'],
'dependencies': ['python-igraph', 'jinja2', 'matplotlib', 'numpy', 'taxadb'],
'scripts': script_paths
}
fh.write("""
from setuptools import setup
read = lambda f: open(f, 'r').read()
setup(name='biocode',
author='{author}',
author_email='{email}',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering :: Bio-Informatics'
],
description='Bioinformatics code libraries and scripts',
include_package_data=True,
install_requires={dependencies},
keywords='bioinformatics scripts modules gff3 fasta fastq bam sam',
license='MIT',
long_description=read('README.rst'),
packages={packages},
scripts={scripts},
url='http://github.com/jorvis/biocode',
version='{version}',
zip_safe=False)
""".format(**settings))
if __name__ == '__main__':
main()
|
jorvis/biocode
|
build/make_pypi_release.py
|
Python
|
mit
| 9,328
|
[
"BLAST"
] |
310b7935a423c4de34f1a026156e20cca4750d9b4a3908665e73c7081ecbee5d
|
#!/usr/bin/env python
########################################################################
# $HeadURL$
########################################################################
__RCSID__ = "$Id$"
from DIRAC import exit as DIRACExit
from DIRAC.Core.Base import Script
if __name__ == "__main__":
Script.setUsageMessage( """
Remove the given file replica or a list of file replicas from the File Catalog
and from the storage.
Usage:
%s <LFN | fileContainingLFNs> SE [SE]
""" % Script.scriptName )
Script.parseCommandLine()
from DIRAC.Core.Utilities.List import sortList, breakListIntoChunks
from DIRAC.DataManagementSystem.Client.DataManager import DataManager
dm = DataManager()
import os
inputFileName = ""
storageElementNames = []
args = Script.getPositionalArgs()
if len(args) < 2:
Script.showHelp()
DIRACExit( 1 )
else:
inputFileName = args[0]
storageElementNames = args[1:]
if os.path.exists( inputFileName ):
inputFile = open( inputFileName, 'r' )
string = inputFile.read()
lfns = [ lfn.strip() for lfn in string.splitlines() ]
inputFile.close()
else:
lfns = [inputFileName]
for lfnList in breakListIntoChunks( sortList( lfns, True ), 500 ):
for storageElementName in storageElementNames:
res = dm.removeReplica( storageElementName, lfnList )
if not res['OK']:
print 'Error:', res['Message']
continue
for lfn in sortList( res['Value']['Successful'].keys() ):
print 'Successfully removed %s replica of %s' % ( storageElementName, lfn )
for lfn in sortList( res['Value']['Failed'].keys() ):
message = res['Value']['Failed'][lfn]
print 'Error: failed to remove %s replica of %s: %s' % ( storageElementName, lfn, message )
|
Sbalbp/DIRAC
|
DataManagementSystem/scripts/dirac-dms-remove-replicas.py
|
Python
|
gpl-3.0
| 1,803
|
[
"DIRAC"
] |
5d5d4cf863252b4ff4a0c4fa077c0f957c653d7e3ed1eadd75df7aac73cc7199
|
#!/usr/bin/env python
"""
This script generates API changes between two different versions of VTK.
The script uses git and ctags to preset a list of classes added/removed, public
methods added/removed when going from one version to the other.
"""
# ctags generated by the command:
# ctags -R --sort=yes --c++-kinds=cf --fields=aiksz --language-force=C++
# --exclude=*.in --exclude=*.java --exclude=*.py --exclude=*.js --exclude=*.bmp
# -f vtk ~/Projects/vtk/src
import distutils.spawn
import os.path
import re
import shutil
import subprocess
import sys
import tempfile
try:
import argparse
except ImportError:
import _argparse as argparse
tagMatcherType = re.compile('^(.+)\t(\S+)\t/\^(.*)\$/;"\t(.*)\n')
class Tag:
def __init__(self):
self._name = None
self._access = None
self._kind = None
self._fyle = None
self._decl = None
self._class = None
self._inherits = None
def __repr__(self):
return "<Tag %s in file %s>" % (self.name, self.fyle)
def parse(self, line):
"""
Parse a CTags line and set myself from it.
"""
# example of CTags lines:
# Type1: ARangeFunctor /home/sankhesh/Projects/vtk/src/Common/Core/Testing/Cxx/TestSMP.cxx /^class ARangeFunctor$/;" kind:c
# Type2: APIDiagram /home/sankhesh/Projects/vtk/src/Charts/Core/Testing/Cxx/TestDiagram.cxx /^class APIDiagram : public vtkContextItem$/;" kind:c inherits:vtkContextItem
# Type3: AbortFlagOff /home/sankhesh/Projects/vtk/src/Common/Core/vtkCommand.h /^ void AbortFlagOff()$/;" kind:f class:vtkCommand access:public
# Type4: BooleanSet /home/sankhesh/Projects/vtk/src/Filters/General/vtkMultiThreshold.h /^ class BooleanSet : public Set {$/;" kind:c class:vtkMultiThreshold inherits:Set access:public
tag = tagMatcherType.search(line)
if not tag:
raise KeyError, "line %s is not an expected ctags line" %line
self._name = tag.expand('\\1').strip()
self._fyle = tag.expand('\\2').strip()
self._decl = tag.expand('\\3').strip()
exts = tag.expand('\\4').strip().split('\t')
for ext in exts:
ext_stripped = ext.strip()
pre = ext_stripped.split(':')[0]
suf = ext_stripped.split(':')[1]
if pre == 'kind':
self._kind = suf
continue
if pre == 'class' or pre == 'struct':
first_colon = ext_stripped.find(':')
self._class = ext_stripped[(first_colon+1):]
if "anon" in self._class:
first_anon = self._class.find('__anon')
first_dcolon = self._class[first_anon:].find("::")
if first_dcolon > -1:
self._class = self._class[(first_anon+first_dcolon+2):]
else:
self._class = ""
continue
if pre == 'access':
self._access = suf
continue
if pre == 'inherits':
self._inherits = suf
continue
if pre == 'line':
self._fyle = self._fyle + ":" + suf
class CTags():
def __init__(self):
self._tagFile = "" # path to tag file
self._classes = []
self._pub_methods = []
self._class_file = {}
self._met_file = {}
def set_tag_file(self, path):
"""
Parse tags from the given file.
"""
handle = open(path, "r")
self._parse(handle.readlines())
def _parse(self, lines):
for line in lines:
if line.startswith('!_TAG_'):
continue
if line.startswith('ctags: Warning: ignoring null tag'):
continue
t = Tag()
t.parse(line)
# Now make append to list of classes and public methods
if t._kind == 'c':
self._classes.append(t._name)
self._class_file[t._name] = t._fyle
if t._kind == 'f' and t._access == 'public':
met = t._class + ":" + t._name
self._pub_methods.append(met)
self._met_file[met] = t._fyle
class CompareVersions():
def __init__(self, src_dir, version1, version2, tmp_dir):
self.tmp_dir = os.path.abspath(tmp_dir)
self.src_dir = self.tmp_dir + os.sep + os.path.basename(src_dir)
self.src_work_dir = os.path.abspath(src_dir)
self.version1 = version1
self.version2 = version2
self.classes_removed = []
self._classes_removed = {}
self.classes_added = []
self._classes_added = {}
self.pub_methods_removed = []
self._pub_methods_removed = {}
self.pub_methods_added = []
self._pub_methods_added = {}
self.tag_f1 = None
self.tag_f2 = None
self._git_HEAD = None
self._git_diff = None
self.dep_methods = {}
# Get system executable paths for git and ctags
self.svar = SystemVar()
# Setup the temporary working tree
self.setup_src_dir()
# Get newly deprecated methods list
self.get_deprecated_methods()
# Compare the two versions provided
self.generate_ctags()
self.get_diff()
def get_diff(self):
print "Parsing tag file %s..." % self.tag_f1
T1 = CTags()
T1.set_tag_file(self.tag_f1)
print "Parsing tag file %s...Done" % self.tag_f1
print "Parsing tag file %s..." % self.tag_f2
T2 = CTags()
T2.set_tag_file(self.tag_f2)
print "Parsing tag file %s...Done" % self.tag_f2
self.classes_added = list(set(T2._classes) - set(T1._classes))
self.classes_removed = list(set(T1._classes) - set(T2._classes))
self.pub_methods_added = list(set(T2._pub_methods) - set(T1._pub_methods))
self.pub_methods_removed = list(set(T1._pub_methods) - set(T2._pub_methods))
for cls in self.classes_added:
self._classes_added[cls] = T2._class_file[cls].split(":")[0]
for cls in self.classes_removed:
self._classes_removed[cls] = T1._class_file[cls].split(":")[0]
for met in self.pub_methods_added:
self._pub_methods_added[met] = T2._met_file[met].split(":")[0]
for met in self.pub_methods_removed:
self._pub_methods_removed[met] = T1._met_file[met].split(":")[0]
def git_checkout_version(self, ver):
git_cmd = self.svar.git_exe + ' --git-dir=' + self.src_dir + os.sep + '.git --work-tree=' +\
self.src_dir
# Make sure working tree does not have any local modifications
git_modified = git_cmd + ' ls-files -m'
git_m_proc = subprocess.Popen(git_modified.split(), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
git_m_proc.wait()
git_m_proc_stdout = git_m_proc.stdout.read()
if git_m_proc_stdout:
print "Working tree has local modifications."
print "Please commit or reset the following files:"
print git_m_proc_stdout
sys.exit(1)
# Reset the working tree to version specified
git_reset_cmd = git_cmd + ' reset -q --hard ' + ver
git_proc = subprocess.Popen(git_reset_cmd.split(), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
git_proc.wait()
git_proc_stderr = git_proc.stderr.read()
if git_proc_stderr:
print "Error while git reset to version:", ver
print git_proc_stderr
sys.exit(1)
def create_tagfile(self, src_dir, fname):
ctags_proc = subprocess.Popen([self.svar.ctags_exe, '-R', '--sort=yes',
'--c++-kinds=cf', '--fields=aiknsz', '--language-force=C++',
'--exclude=*.in', '--exclude=*.java', '--exclude=*.py',
'--exclude=*.js', '--exclude=*.bmp', '-f', fname, src_dir],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
ctags_proc.wait()
ctags_proc_stderr = ctags_proc.stderr.read()
if ctags_proc_stderr:
print "Error while creating tagfile with ctags:", fname
print ctags_proc_stderr
sys.exit(1)
def generate_ctags(self):
# Store the current HEAD SHA-1 sum
print "Getting the SHA-1 sum of HEAD of current working tree..."
self._git_HEAD = self.git_current_version(self.src_dir)
print "Getting the SHA-1 sum of HEAD of current working tree... %s" %\
self._git_HEAD
# Reset working tree to version1
print "Checking out version %s..." % self.version1
self.git_checkout_version(self.version1)
# Create tag file for version1
self.tag_f1 = self.gen_tagfile_name(self.version1)
print "Creating tagfile (%s) for version %s..." % (self.tag_f1,
self.version1)
self.create_tagfile(self.src_dir, self.tag_f1)
print "Creating tagfile (%s) for version %s...Done" % (self.tag_f1,
self.version1)
# Reset working tree to version2
print "Checking out version %s..." % self.version2
self.git_checkout_version(self.version2)
# Create tag file for version2
self.tag_f2 = self.gen_tagfile_name(self.version2)
print "Creating tagfile (%s) for version %s..." % (self.tag_f2,
self.version2)
self.create_tagfile(self.src_dir, self.tag_f2)
print "Creating tagfile (%s) for version %s...Done" % (self.tag_f2,
self.version2)
def gen_tagfile_name(self, ver):
tag_f_name = self.tmp_dir + os.sep + os.path.basename(self.src_dir) +\
"_" + ver + '.ctags'
return tag_f_name
def git_current_version(self, src_dir):
git_cmd = self.svar.git_exe + ' --git-dir=' + src_dir + os.sep +\
'.git --work-tree=' + src_dir + ' rev-parse HEAD'
git_proc = subprocess.Popen(git_cmd.split(), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
git_proc.wait()
git_proc_stderr = git_proc.stderr.read()
if git_proc_stderr:
print "Error while getting current HEAD SHA-1 sum:"
print git_proc_stderr
sys.exit(1)
return git_proc.stdout.read()
def git_diff_versions(self):
git_cmd = self.svar.git_exe + ' --git-dir=' + self.src_dir + os.sep +\
'.git --work-tree=' + self.src_dir + ' diff ' + self.version1 +\
'..' + self.version2
self._git_diff = self.tmp_dir + os.sep + os.path.basename(self.src_dir)\
+ "_" + self.version1 + "_" + self.version2 + ".diff"
with open(self._git_diff, "w") as diff_file:
git_proc = subprocess.Popen(git_cmd.split(), stdout=diff_file,
stderr=subprocess.PIPE)
git_proc.wait()
git_proc_stderr = git_proc.stderr.read()
if git_proc_stderr:
print "Error while getting git diff between versions %s and %s" %\
(self.version1, self.version2)
print git_proc_stderr
sys.exit(1)
def get_deprecated_methods(self):
fnameMatcher = re.compile('diff --git .* b/(.*)\n')
depMatcher = re.compile('^\+.*VTK_LEGACY\((.*)\).*\n')
rtDepMatcher = re.compile('^\+.*VTK_LEGACY_BODY\((.*),\s*"(.*)"\).*\n')
self.git_diff_versions()
fname = ''
with open(self._git_diff, "r+") as diff_file:
for line in diff_file:
fnamematch = fnameMatcher.search(line)
if fnamematch:
fname = fnamematch.expand('\\1').strip()#.split('/')[-1]
if "vtkSetGet.h" in fname:
continue
dep_diff = depMatcher.search(line)
if dep_diff:
self.dep_methods[dep_diff.expand('\\1').strip()] = [fname, "?"]
else:
dep_diff = rtDepMatcher.search(line)
if dep_diff:
self.dep_methods[dep_diff.expand('\\1').strip()] =\
[fname, dep_diff.expand('\\2').strip()]
def setup_src_dir(self):
print "Setting up a clone of working tree (%s) in %s..." %\
(self.src_work_dir, self.src_dir)
git_cmd = self.svar.git_exe + ' clone -l --no-hardlinks -q ' +\
self.src_work_dir + ' ' + self.src_dir
git_proc = subprocess.Popen(git_cmd.split(), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
git_proc.wait()
git_proc_stderr = git_proc.stderr.read()
if git_proc_stderr:
print "Error while git clone into temporary working directory."
print "Command Used:", git_cmd
print git_proc_stderr
sys.exit(1)
print "Setting up a clone of working tree (%s) in %s...Done" %\
(self.src_work_dir, self.src_dir)
def cleanup(self):
if os.path.exists(self.tag_f1):
os.remove(self.tag_f1)
if os.path.exists(self.tag_f2):
os.remove(self.tag_f2)
if os.path.exists(self._git_diff):
os.remove(self._git_diff)
if os.path.exists(self.src_dir):
shutil.rmtree(self.src_dir)
class SystemVar():
def __init__(self):
self.git_exe = None
self.ctags_exe = None
self.get_exes()
def get_exes(self):
self.git_exe = self.get_exe("git")
self.ctags_exe = self.get_exe("ctags")
def get_exe(self, exe):
exe_file = distutils.spawn.find_executable(exe)
if not exe_file:
print "%s not found in system PATH" % exe
for trial in range(3):
exe_file = raw_input("Enter full path to %s executable:\n"\
% exe)
if os.path.isfile(exe_file) and os.access(exe_file, os.X_OK):
break
else:
print "The provided path is not an executable"
exe_file = None
if not exe_file:
print "Could not locate %s executable in 3 tries. Exiting..."\
% exe
sys.exit(1)
return exe_file
def printReport(comp, args):
# Remove location of temp dir from printout
str_to_replace = os.path.abspath(args.tmp) + os.sep +\
os.path.basename(args.src[0]) + os.sep
def cleanListIndices(alist, parentList=None):
# Remove stuff we don't care about and sort so module/kits are apparent
def removeExtras(cls):
tmp = alist[cls]
if isinstance(tmp, list):
tmp = tmp[0]
if parentList:
# don't report new/removed methods that are part of new/removed classes
clsname = tmp.split('/')[-1].split('.')[0]
if clsname in parentList:
return False
return ("Testing/Cxx" not in tmp and
"Examples" not in tmp and
"ThirdParty" not in tmp and
"Utilities" not in tmp)
def getDir(cls):
tmp = alist[cls]
if isinstance(tmp, list):
tmp = tmp[0]
return tmp.replace(str_to_replace, '')
return sorted(
filter(removeExtras, alist),
key=lambda cls: getDir(cls))
if args.wikiOutput:
print "==API differences when going from version %s to version %s=="\
%(args.versions[0], args.versions[1])
print "===Classes/Structs added in version %s===" %args.versions[1]
print r'{| class="wikitable sortable" border="1" cellpadding="5" '\
'cellspacing="0"'
print r'!Class Name'
print r'!File'
for cls in cleanListIndices(comp._classes_added):
print r'|-'
print r'|%s' %cls
print r'|%s' %comp._classes_added[cls].replace(str_to_replace, '')
print r'|}'
print "===Classes/Structs removed from version %s===" %args.versions[0]
print r'{| class="wikitable sortable" border="1" cellpadding="5" '\
'cellspacing="0"'
print r'!Class Name'
print r'!File'
for cls in cleanListIndices(comp._classes_removed):
print r'|-'
print r'|%s' %cls
print r'|%s' %comp._classes_removed[cls].replace(str_to_replace, '')
print r'|}'
print "===Public methods added in version %s===" %args.versions[1]
print r'{| class="wikitable sortable" border="1" cellpadding="5" '\
'cellspacing="0"'
print r'!Method'
print r'!File'
for cls in cleanListIndices(comp._pub_methods_added, comp._classes_added):
print r'|-'
print r'|%s' %cls
print r'|%s' %comp._pub_methods_added[cls].replace(str_to_replace, '')
print r'|}'
print "===Public methods removed from version %s===" %args.versions[0]
print r'{| class="wikitable sortable" border="1" cellpadding="5" '\
'cellspacing="0"'
print r'!Method'
print r'!File'
for cls in cleanListIndices(comp._pub_methods_removed, comp._classes_removed):
print r'|-'
print r'|%s' %cls
print r'|%s' %comp._pub_methods_removed[cls].replace(str_to_replace, '')
print r'|}'
print "==Deprecated Methods=="
print r'{| class="wikitable sortable" border="1" cellpadding="5" '\
'cellspacing="0"'
print r'!Method'
print r'!Deprecated in'
print r'!As of'
for cls in cleanListIndices(comp.dep_methods):
print r'|-'
print r'|%s' %cls
print r'|%s' %comp.dep_methods[cls][0]
print r'|%s' %comp.dep_methods[cls][1]
print r'|}'
else:
print "-------------------- REPORT --------------------"
print "==API differences when going from version %s to version %s=="\
%(args.versions[0], args.versions[1])
print "~~~~~~~~~~~~~Classes/Structs Added~~~~~~~~~~~~~~~"
for cls in cleanListIndices(comp._classes_added):
print "%s\t%s" %(cls, comp._classes_added[cls].replace(str_to_replace, ''))
print "~~~~~~~~~~~~Classes/Structs Removed~~~~~~~~~~~~~~"
for cls in cleanListIndices(comp._classes_removed):
print "%s\t%s" %(cls, comp._classes_removed[cls].replace(str_to_replace, ''))
print "~~~~~~~~~~~~~Public Methods Added~~~~~~~~~~~~~~~"
for cls in cleanListIndices(comp._pub_methods_added, comp._classes_added):
print "%s\t%s" %(cls, comp._pub_methods_added[cls].replace(str_to_replace, ''))
print "~~~~~~~~~~~~Public Methods Removed~~~~~~~~~~~~~~"
for cls in cleanListIndices(comp._pub_methods_removed, comp._classes_removed):
print "%s\t%s" %(cls, comp._pub_methods_removed[cls].replace(str_to_replace, ''))
print "~~~~~~~~~~~~~~Deprecated Methods~~~~~~~~~~~~~~~~~"
for cls in cleanListIndices(comp.dep_methods):
print "%s\tdeprecated in %s as of %s" %(cls, comp.dep_methods[cls][0], comp.dep_methods[cls][1])
def start(args):
# Create a clone of the current working tree in temp dir
C = CompareVersions(args.src[0], args.versions[0], args.versions[1],
args.tmp)
printReport(C, args)
if not args.dontClean:
C.cleanup()
else:
# Restore the working directory to its original state
C.git_checkout_version(C._git_HEAD)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Compare two different'\
' revisions of a C++ source tree under GIT revision control')
parser.add_argument("src", type=str, nargs=1,
help="Path to the working directory")
parser.add_argument("versions", type=str, nargs=2,
help="Two versions to compare. (GIT SHA/branch/tag)")
parser.add_argument("-t", "--tmp", type=str,
default=tempfile.gettempdir(),
help="Path to a temporary directory where the current working tree"\
" can be cloned.(default: System Temp Directory")
parser.add_argument("-w", "--wikiOutput", action='store_true',
help="Print output with wiki markup.")
parser.add_argument("-d", "--dontClean", action='store_true',
help="Do not delete temporary files and directories.")
args = parser.parse_args()
start(args)
|
hendradarwin/VTK
|
Utilities/Maintenance/semanticDiffVersion.py
|
Python
|
bsd-3-clause
| 20,571
|
[
"VTK"
] |
8bd385da149f6c36394ed24876a80376e0fbb5576314e613cb6b83e98a0afb40
|
# Copyright (C) 2009 by Eric Talevich (eric.talevich@gmail.com)
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Utilities for handling, displaying and exporting Phylo trees.
Third-party libraries are loaded when the corresponding function is called.
"""
__docformat__ = "restructuredtext en"
import math
import sys
from Bio import MissingPythonDependencyError
def to_networkx(tree):
"""Convert a Tree object to a networkx graph.
The result is useful for graph-oriented analysis, and also interactive
plotting with pylab, matplotlib or pygraphviz, though the resulting diagram
is usually not ideal for displaying a phylogeny.
Requires NetworkX version 0.99 or later.
"""
try:
import networkx
except ImportError:
raise MissingPythonDependencyError(
"Install NetworkX if you want to use to_networkx.")
# NB (1/2010): the networkx API stabilized at v.1.0
# 1.0+: edges accept arbitrary data as kwargs, weights are floats
# 0.99: edges accept weight as a string, nothing else
# pre-0.99: edges accept no additional data
# Ubuntu Lucid LTS uses v0.99, let's support everything
if networkx.__version__ >= '1.0':
def add_edge(graph, n1, n2):
graph.add_edge(n1, n2, weight=n2.branch_length or 1.0)
# Copy branch color value as hex, if available
if hasattr(n2, 'color') and n2.color is not None:
graph[n1][n2]['color'] = n2.color.to_hex()
elif hasattr(n1, 'color') and n1.color is not None:
# Cascading color attributes
graph[n1][n2]['color'] = n1.color.to_hex()
n2.color = n1.color
# Copy branch weight value (float) if available
if hasattr(n2, 'width') and n2.width is not None:
graph[n1][n2]['width'] = n2.width
elif hasattr(n1, 'width') and n1.width is not None:
# Cascading width attributes
graph[n1][n2]['width'] = n1.width
n2.width = n1.width
elif networkx.__version__ >= '0.99':
def add_edge(graph, n1, n2):
graph.add_edge(n1, n2, (n2.branch_length or 1.0))
else:
def add_edge(graph, n1, n2):
graph.add_edge(n1, n2)
def build_subgraph(graph, top):
"""Walk down the Tree, building graphs, edges and nodes."""
for clade in top:
graph.add_node(clade.root)
add_edge(graph, top.root, clade.root)
build_subgraph(graph, clade)
if tree.rooted:
G = networkx.DiGraph()
else:
G = networkx.Graph()
G.add_node(tree.root)
build_subgraph(G, tree.root)
return G
def draw_graphviz(tree, label_func=str, prog='twopi', args='',
node_color='#c0deff', **kwargs):
"""Display a tree or clade as a graph, using the graphviz engine.
Requires NetworkX, matplotlib, Graphviz and either PyGraphviz or pydot.
The third and fourth parameters apply to Graphviz, and the remaining
arbitrary keyword arguments are passed directly to networkx.draw(), which
in turn mostly wraps matplotlib/pylab. See the documentation for Graphviz
and networkx for detailed explanations.
The NetworkX/matplotlib parameters are described in the docstrings for
networkx.draw() and pylab.scatter(), but the most reasonable options to try
are: *alpha, node_color, node_size, node_shape, edge_color, style,
font_size, font_color, font_weight, font_family*
:Parameters:
label_func : callable
A function to extract a label from a node. By default this is str(),
but you can use a different function to select another string
associated with each node. If this function returns None for a node,
no label will be shown for that node.
The label will also be silently skipped if the throws an exception
related to ordinary attribute access (LookupError, AttributeError,
ValueError); all other exception types will still be raised. This
means you can use a lambda expression that simply attempts to look
up the desired value without checking if the intermediate attributes
are available:
>>> Phylo.draw_graphviz(tree, lambda n: n.taxonomies[0].code)
prog : string
The Graphviz program to use when rendering the graph. 'twopi'
behaves the best for large graphs, reliably avoiding crossing edges,
but for moderate graphs 'neato' looks a bit nicer. For small
directed graphs, 'dot' may produce a normal-looking cladogram, but
will cross and distort edges in larger graphs. (The programs 'circo'
and 'fdp' are not recommended.)
args : string
Options passed to the external graphviz program. Normally not
needed, but offered here for completeness.
Example
-------
>>> import pylab
>>> from Bio import Phylo
>>> tree = Phylo.read('ex/apaf.xml', 'phyloxml')
>>> Phylo.draw_graphviz(tree)
>>> pylab.show()
>>> pylab.savefig('apaf.png')
"""
try:
import networkx
except ImportError:
raise MissingPythonDependencyError(
"Install NetworkX if you want to use to_networkx.")
G = to_networkx(tree)
try:
# NetworkX version 1.8 or later (2013-01-20)
Gi = networkx.convert_node_labels_to_integers(G,
label_attribute='label')
int_labels = {}
for integer, nodeattrs in Gi.node.items():
int_labels[nodeattrs['label']] = integer
except TypeError:
# Older NetworkX versions (before 1.8)
Gi = networkx.convert_node_labels_to_integers(G,
discard_old_labels=False)
int_labels = Gi.node_labels
try:
posi = networkx.graphviz_layout(Gi, prog, args=args)
except ImportError:
raise MissingPythonDependencyError(
"Install PyGraphviz or pydot if you want to use draw_graphviz.")
def get_label_mapping(G, selection):
"""Apply the user-specified node relabeling."""
for node in G.nodes():
if (selection is None) or (node in selection):
try:
label = label_func(node)
if label not in (None, node.__class__.__name__):
yield (node, label)
except (LookupError, AttributeError, ValueError):
pass
if 'nodelist' in kwargs:
labels = dict(get_label_mapping(G, set(kwargs['nodelist'])))
else:
labels = dict(get_label_mapping(G, None))
kwargs['nodelist'] = list(labels.keys())
if 'edge_color' not in kwargs:
kwargs['edge_color'] = [isinstance(e[2], dict) and
e[2].get('color', 'k') or 'k'
for e in G.edges(data=True)]
if 'width' not in kwargs:
kwargs['width'] = [isinstance(e[2], dict) and
e[2].get('width', 1.0) or 1.0
for e in G.edges(data=True)]
posn = dict((n, posi[int_labels[n]]) for n in G)
networkx.draw(G, posn, labels=labels, with_labels=True,
node_color=node_color, **kwargs)
def draw_ascii(tree, file=None, column_width=80):
"""Draw an ascii-art phylogram of the given tree.
The printed result looks like::
_________ Orange
______________|
| |______________ Tangerine
______________|
| | _________________________ Grapefruit
_| |_________|
| |______________ Pummelo
|
|__________________________________ Apple
:Parameters:
file : file-like object
File handle opened for writing the output drawing. (Default:
standard output)
column_width : int
Total number of text columns used by the drawing.
"""
if file is None:
file = sys.stdout
taxa = tree.get_terminals()
# Some constants for the drawing calculations
max_label_width = max(len(str(taxon)) for taxon in taxa)
drawing_width = column_width - max_label_width - 1
drawing_height = 2 * len(taxa) - 1
def get_col_positions(tree):
"""Create a mapping of each clade to its column position."""
depths = tree.depths()
# If there are no branch lengths, assume unit branch lengths
if not max(depths.values()):
depths = tree.depths(unit_branch_lengths=True)
# Potential drawing overflow due to rounding -- 1 char per tree layer
fudge_margin = int(math.ceil(math.log(len(taxa), 2)))
cols_per_branch_unit = ((drawing_width - fudge_margin)
/ float(max(depths.values())))
return dict((clade, int(blen * cols_per_branch_unit + 1.0))
for clade, blen in depths.items())
def get_row_positions(tree):
positions = dict((taxon, 2 * idx) for idx, taxon in enumerate(taxa))
def calc_row(clade):
for subclade in clade:
if subclade not in positions:
calc_row(subclade)
positions[clade] = ((positions[clade.clades[0]] +
positions[clade.clades[-1]]) // 2)
calc_row(tree.root)
return positions
col_positions = get_col_positions(tree)
row_positions = get_row_positions(tree)
char_matrix = [[' ' for x in range(drawing_width)]
for y in range(drawing_height)]
def draw_clade(clade, startcol):
thiscol = col_positions[clade]
thisrow = row_positions[clade]
# Draw a horizontal line
for col in range(startcol, thiscol):
char_matrix[thisrow][col] = '_'
if clade.clades:
# Draw a vertical line
toprow = row_positions[clade.clades[0]]
botrow = row_positions[clade.clades[-1]]
for row in range(toprow + 1, botrow + 1):
char_matrix[row][thiscol] = '|'
# NB: Short terminal branches need something to stop rstrip()
if (col_positions[clade.clades[0]] - thiscol) < 2:
char_matrix[toprow][thiscol] = ','
# Draw descendents
for child in clade:
draw_clade(child, thiscol + 1)
draw_clade(tree.root, 0)
# Print the complete drawing
for idx, row in enumerate(char_matrix):
line = ''.join(row).rstrip()
# Add labels for terminal taxa in the right margin
if idx % 2 == 0:
line += ' ' + str(taxa[idx // 2])
file.write(line + '\n')
file.write('\n')
def draw(tree, label_func=str, do_show=True, show_confidence=True,
# For power users
axes=None, branch_labels=None, *args, **kwargs):
"""Plot the given tree using matplotlib (or pylab).
The graphic is a rooted tree, drawn with roughly the same algorithm as
draw_ascii.
Additional keyword arguments passed into this function are used as pyplot
options. The input format should be in the form of:
pyplot_option_name=(tuple), pyplot_option_name=(tuple, dict), or
pyplot_option_name=(dict).
Example using the pyplot options 'axhspan' and 'axvline':
>>> Phylo.draw(tree, axhspan=((0.25, 7.75), {'facecolor':'0.5'}),
... axvline={'x':'0', 'ymin':'0', 'ymax':'1'})
Visual aspects of the plot can also be modified using pyplot's own functions
and objects (via pylab or matplotlib). In particular, the pyplot.rcParams
object can be used to scale the font size (rcParams["font.size"]) and line
width (rcParams["lines.linewidth"]).
:Parameters:
label_func : callable
A function to extract a label from a node. By default this is str(),
but you can use a different function to select another string
associated with each node. If this function returns None for a node,
no label will be shown for that node.
do_show : bool
Whether to show() the plot automatically.
show_confidence : bool
Whether to display confidence values, if present on the tree.
axes : matplotlib/pylab axes
If a valid matplotlib.axes.Axes instance, the phylogram is plotted
in that Axes. By default (None), a new figure is created.
branch_labels : dict or callable
A mapping of each clade to the label that will be shown along the
branch leading to it. By default this is the confidence value(s) of
the clade, taken from the ``confidence`` attribute, and can be
easily toggled off with this function's ``show_confidence`` option.
But if you would like to alter the formatting of confidence values,
or label the branches with something other than confidence, then use
this option.
"""
try:
import matplotlib.pyplot as plt
except ImportError:
try:
import pylab as plt
except ImportError:
raise MissingPythonDependencyError(
"Install matplotlib or pylab if you want to use draw.")
import matplotlib.collections as mpcollections
# Arrays that store lines for the plot of clades
horizontal_linecollections = []
vertical_linecollections = []
# Options for displaying branch labels / confidence
def conf2str(conf):
if int(conf) == conf:
return str(int(conf))
return str(conf)
if not branch_labels:
if show_confidence:
def format_branch_label(clade):
if hasattr(clade, 'confidences'):
# phyloXML supports multiple confidences
return '/'.join(conf2str(cnf.value)
for cnf in clade.confidences)
if clade.confidence:
return conf2str(clade.confidence)
return None
else:
def format_branch_label(clade):
return None
elif isinstance(branch_labels, dict):
def format_branch_label(clade):
return branch_labels.get(clade)
else:
assert callable(branch_labels), \
"branch_labels must be either a dict or a callable (function)"
format_branch_label = branch_labels
# Layout
def get_x_positions(tree):
"""Create a mapping of each clade to its horizontal position.
Dict of {clade: x-coord}
"""
depths = tree.depths()
# If there are no branch lengths, assume unit branch lengths
if not max(depths.values()):
depths = tree.depths(unit_branch_lengths=True)
return depths
def get_y_positions(tree):
"""Create a mapping of each clade to its vertical position.
Dict of {clade: y-coord}.
Coordinates are negative, and integers for tips.
"""
maxheight = tree.count_terminals()
# Rows are defined by the tips
heights = dict((tip, maxheight - i)
for i, tip in enumerate(reversed(tree.get_terminals())))
# Internal nodes: place at midpoint of children
def calc_row(clade):
for subclade in clade:
if subclade not in heights:
calc_row(subclade)
# Closure over heights
heights[clade] = (heights[clade.clades[0]] +
heights[clade.clades[-1]]) / 2.0
if tree.root.clades:
calc_row(tree.root)
return heights
x_posns = get_x_positions(tree)
y_posns = get_y_positions(tree)
# The function draw_clade closes over the axes object
if axes is None:
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
elif not isinstance(axes, plt.matplotlib.axes.Axes):
raise ValueError("Invalid argument for axes: %s" % axes)
def draw_clade_lines(use_linecollection=False, orientation='horizontal',
y_here=0, x_start=0, x_here=0, y_bot=0, y_top=0,
color='black', lw='.1'):
"""Create a line with or without a line collection object.
Graphical formatting of the lines representing clades in the plot can be
customized by altering this function.
"""
if (use_linecollection is False and orientation == 'horizontal'):
axes.hlines(y_here, x_start, x_here, color=color, lw=lw)
elif (use_linecollection is True and orientation == 'horizontal'):
horizontal_linecollections.append(mpcollections.LineCollection(
[[(x_start, y_here), (x_here, y_here)]], color=color, lw=lw),)
elif (use_linecollection is False and orientation == 'vertical'):
axes.vlines(x_here, y_bot, y_top, color=color)
elif (use_linecollection is True and orientation == 'vertical'):
vertical_linecollections.append(mpcollections.LineCollection(
[[(x_here, y_bot), (x_here, y_top)]], color=color, lw=lw),)
def draw_clade(clade, x_start, color, lw):
"""Recursively draw a tree, down from the given clade."""
x_here = x_posns[clade]
y_here = y_posns[clade]
# phyloXML-only graphics annotations
if hasattr(clade, 'color') and clade.color is not None:
color = clade.color.to_hex()
if hasattr(clade, 'width') and clade.width is not None:
lw = clade.width * plt.rcParams['lines.linewidth']
# Draw a horizontal line from start to here
draw_clade_lines(use_linecollection=True, orientation='horizontal',
y_here=y_here, x_start=x_start, x_here=x_here, color=color, lw=lw)
# Add node/taxon labels
label = label_func(clade)
if label not in (None, clade.__class__.__name__):
axes.text(x_here, y_here, ' %s' %
label, verticalalignment='center')
# Add label above the branch (optional)
conf_label = format_branch_label(clade)
if conf_label:
axes.text(0.5 * (x_start + x_here), y_here, conf_label,
fontsize='small', horizontalalignment='center')
if clade.clades:
# Draw a vertical line connecting all children
y_top = y_posns[clade.clades[0]]
y_bot = y_posns[clade.clades[-1]]
# Only apply widths to horizontal lines, like Archaeopteryx
draw_clade_lines(use_linecollection=True, orientation='vertical',
x_here=x_here, y_bot=y_bot, y_top=y_top, color=color, lw=lw)
# Draw descendents
for child in clade:
draw_clade(child, x_here, color, lw)
draw_clade(tree.root, 0, 'k', plt.rcParams['lines.linewidth'])
# If line collections were used to create clade lines, here they are added
# to the pyplot plot.
for i in horizontal_linecollections:
axes.add_collection(i)
for i in vertical_linecollections:
axes.add_collection(i)
# Aesthetics
if hasattr(tree, 'name') and tree.name:
axes.set_title(tree.name)
axes.set_xlabel('branch length')
axes.set_ylabel('taxa')
# Add margins around the tree to prevent overlapping the axes
xmax = max(x_posns.values())
axes.set_xlim(-0.05 * xmax, 1.25 * xmax)
# Also invert the y-axis (origin at the top)
# Add a small vertical margin, but avoid including 0 and N+1 on the y axis
axes.set_ylim(max(y_posns.values()) + 0.8, 0.2)
# Parse and process key word arguments as pyplot options
for key, value in kwargs.items():
try:
# Check that the pyplot option input is iterable, as required
[i for i in value]
except TypeError:
raise ValueError('Keyword argument "%s=%s" is not in the format '
'pyplot_option_name=(tuple), pyplot_option_name=(tuple, dict),'
' or pyplot_option_name=(dict) '
% (key, value))
if isinstance(value, dict):
getattr(plt, str(key))(**dict(value))
elif not (isinstance(value[0], tuple)):
getattr(plt, str(key))(*value)
elif (isinstance(value[0], tuple)):
getattr(plt, str(key))(*value[0], **dict(value[1]))
if do_show:
plt.show()
|
poojavade/Genomics_Docker
|
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/Bio/Phylo/_utils.py
|
Python
|
apache-2.0
| 20,833
|
[
"Biopython"
] |
6a22bd6933d5a87f99d0d4faf654baf686a68cecef561f91c27e06e7ea63b85e
|
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
#
# Authors: Robert McGibbon
# Contributors: Kyle A. Beauchamp
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
from __future__ import print_function, division
__all__ = ['baker_hubbard', 'shrake_rupley', 'kabsch_sander', 'compute_distances',
'compute_displacements', 'compute_angles', 'compute_dihedrals',
'compute_phi', 'compute_psi', 'compute_chi1', 'compute_chi2',
'compute_chi3', 'compute_chi4', 'compute_omega', 'compute_rg',
'compute_contacts', 'compute_drid', 'compute_center_of_mass',
'wernet_nilsson', 'compute_dssp', 'compute_neighbors', 'compute_rdf',
'compute_nematic_order', 'compute_inertia_tensor',
# from thermodynamic_properties
'dipole_moments', 'static_dielectric', 'isothermal_compressability_kappa_T',
'thermal_expansion_alpha_P', 'density'
]
from mdtraj.geometry.rg import *
from mdtraj.geometry.angle import *
from mdtraj.geometry.distance import *
from mdtraj.geometry.dihedral import *
from mdtraj.geometry.hbond import *
from mdtraj.geometry.sasa import *
from mdtraj.geometry.contact import *
from mdtraj.geometry.drid import *
from mdtraj.geometry.dssp import *
from mdtraj.geometry.neighbors import *
from mdtraj.geometry.thermodynamic_properties import *
from mdtraj.geometry.rdf import *
from mdtraj.geometry.order import *
|
daviddesancho/mdtraj
|
mdtraj/geometry/__init__.py
|
Python
|
lgpl-2.1
| 2,293
|
[
"MDTraj"
] |
f4559801e9d46864424b6ac76b38a2f2f31946a2e32a1843b9a1a873860a8859
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
import numpy as np
from pymatgen.core.structure import Structure
from pymatgen.util.coord_utils import get_linear_interpolated_value
from monty.json import MSONable
"""
This module defines classes to represent the phonon density of states, etc.
"""
class PhononDos(MSONable):
"""
Basic DOS object. All other DOS objects are extended versions of this
object.
Args:
frequencies: A sequences of frequencies in THz
densities: A list representing the density of states.
"""
def __init__(self, frequencies, densities):
self.frequencies = np.array(frequencies)
self.densities = np.array(densities)
def get_smeared_densities(self, sigma):
"""
Returns the densities, but with a Gaussian smearing of
std dev sigma applied.
Args:
sigma: Std dev of Gaussian smearing function.
Returns:
Gaussian-smeared densities.
"""
from scipy.ndimage.filters import gaussian_filter1d
diff = [self.frequencies[i + 1] - self.frequencies[i]
for i in range(len(self.frequencies) - 1)]
avgdiff = sum(diff) / len(diff)
smeared_dens = gaussian_filter1d(self.densities, sigma / avgdiff)
return smeared_dens
def __add__(self, other):
"""
Adds two DOS together. Checks that frequency scales are the same.
Otherwise, a ValueError is thrown.
Args:
other: Another DOS object.
Returns:
Sum of the two DOSs.
"""
if not all(np.equal(self.frequencies, other.frequencies)):
raise ValueError("Frequencies of both DOS are not compatible!")
densities = self.frequencies + other.frequencies
return PhononDos(self.frequencies, densities)
def __radd__(self, other):
"""
Reflected addition of two DOS objects
Args:
other: Another DOS object.
Returns:
Sum of the two DOSs.
"""
return self.__add__(other)
def get_interpolated_value(self, frequency):
"""
Returns interpolated density for a particular frequency.
Args:
frequency: frequency to return the density for.
"""
return get_linear_interpolated_value(self.frequencies,
self.densities, frequency)
def __str__(self):
"""
Returns a string which can be easily plotted (using gnuplot).
"""
stringarray = ["#{:30s} {:30s}".format("Frequency", "Density")]
for i, frequency in enumerate(self.frequencies):
stringarray.append("{:.5f} {:.5f}"
.format(frequency, self.densities[i]))
return "\n".join(stringarray)
@classmethod
def from_dict(cls, d):
"""
Returns PhononDos object from dict representation of PhononDos.
"""
return cls(d["frequencies"], d["densities"])
def as_dict(self):
"""
Json-serializable dict representation of PhononDos.
"""
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"frequencies": list(self.frequencies),
"densities": list(self.densities)}
class CompletePhononDos(PhononDos):
"""
This wrapper class defines a total dos, and also provides a list of PDos.
Args:
structure: Structure associated with this particular DOS.
total_dos: total Dos for structure
pdoss: The pdoss are supplied as an {Site: Densities}
.. attribute:: pdos
Dict of partial densities of the form {Site:Densities}
"""
def __init__(self, structure, total_dos, pdoss):
super(CompletePhononDos, self).__init__(
frequencies=total_dos.frequencies, densities=total_dos.densities)
self.pdos = pdoss
self.structure = structure
def get_site_dos(self, site):
"""
Get the Dos for a site.
Args:
site: Site in Structure associated with CompletePhononDos.
Returns:
PhononDos containing summed orbital densities for site.
"""
return PhononDos(self.frequencies, self.pdos[site])
def get_element_dos(self):
"""
Get element projected Dos.
Returns:
dict of {Element: Dos}
"""
el_dos = {}
for site, atom_dos in self.pdos.items():
el = site.specie
if el not in el_dos:
el_dos[el] = atom_dos
else:
el_dos[el] += atom_dos
return {el: PhononDos(self.frequencies, densities)
for el, densities in el_dos.items()}
@classmethod
def from_dict(cls, d):
"""
Returns CompleteDos object from dict representation.
"""
tdos = PhononDos.from_dict(d)
struct = Structure.from_dict(d["structure"])
pdoss = {}
for at, pdos in zip(struct, d["pdos"]):
pdoss[at] = pdos
return cls(struct, tdos, pdoss)
def as_dict(self):
"""
Json-serializable dict representation of CompletePhononDos.
"""
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"structure": self.structure.as_dict(),
"frequencies": list(self.frequencies),
"densities": list(self.densities),
"pdos": []}
if len(self.pdos) > 0:
for at in self.structure:
d["pdos"].append(list(self.pdos[at]))
return d
def __str__(self):
return "Complete phonon DOS for " + str(self.structure)
|
xhqu1981/pymatgen
|
pymatgen/phonon/dos.py
|
Python
|
mit
| 5,900
|
[
"Gaussian",
"pymatgen"
] |
562a6b7025f8d000915b471a0e225661637567a2c6ce569d54ff0ebed20713e8
|
# TODO: to be moved to TestDIRAC and transformed into a real test
########################################################################
# File: ReplicateAndRegisterTests.py
# Author: Krzysztof.Ciba@NOSPAMgmail.com
# Date: 2013/05/13 18:38:55
########################################################################
""" :mod: FullChainTest
===================
.. module: FullChainTests
:synopsis: full chain integration test for DMS operation handlers
.. moduleauthor:: Krzysztof.Ciba@NOSPAMgmail.com
unittest for replicateAndRegister operation handler
"""
# #
# @file ReplicateAndRegisterTests.py
# @author Krzysztof.Ciba@NOSPAMgmail.com
# @date 2013/05/13 18:39:13
# @brief Definition of ReplicateAndRegisterTests class.
# # imports
import random
import os
import sys
# # from DIRAC
from DIRAC.Core.Base.Script import parseCommandLine
parseCommandLine()
from DIRAC import gLogger
# # from Core
from DIRAC.Core.Utilities.Adler import fileAdler
from DIRAC.Core.Utilities.File import makeGuid
from DIRAC.Interfaces.API.DiracAdmin import DiracAdmin
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getGroupsForUser, getDNForUsername
# # from RMS and DMS
from DIRAC.RequestManagementSystem.Client.Request import Request
from DIRAC.RequestManagementSystem.Client.Operation import Operation
from DIRAC.RequestManagementSystem.Client.File import File
from DIRAC.RequestManagementSystem.Client.ReqClient import ReqClient
########################################################################
class FullChainTest( object ):
"""
.. class:: FullChainTest
creates and puts to the ReqDB full chain tests for RMS and DMS operations
* RemoveFile
* PutAndRegister
* ReplicateAndRegister
* RemoveReplica
* RemoveFile
"""
def buildRequest( self, owner, group, sourceSE, targetSE1, targetSE2 ):
files = self.files( owner, group )
putAndRegister = Operation()
putAndRegister.Type = "PutAndRegister"
putAndRegister.TargetSE = sourceSE
for fname, lfn, size, checksum, guid in files:
putFile = File()
putFile.LFN = lfn
putFile.PFN = fname
putFile.Checksum = checksum
putFile.ChecksumType = "adler32"
putFile.Size = size
putFile.GUID = guid
putAndRegister.addFile( putFile )
replicateAndRegister = Operation()
replicateAndRegister.Type = "ReplicateAndRegister"
replicateAndRegister.TargetSE = "%s,%s" % ( targetSE1, targetSE2 )
for fname, lfn, size, checksum, guid in files:
repFile = File()
repFile.LFN = lfn
repFile.Size = size
repFile.Checksum = checksum
repFile.ChecksumType = "adler32"
replicateAndRegister.addFile( repFile )
removeReplica = Operation()
removeReplica.Type = "RemoveReplica"
removeReplica.TargetSE = sourceSE
for fname, lfn, size, checksum, guid in files:
removeReplica.addFile( File( {"LFN": lfn } ) )
removeFile = Operation()
removeFile.Type = "RemoveFile"
for fname, lfn, size, checksum, guid in files:
removeFile.addFile( File( {"LFN": lfn } ) )
removeFileInit = Operation()
removeFileInit.Type = "RemoveFile"
for fname, lfn, size, checksum, guid in files:
removeFileInit.addFile( File( {"LFN": lfn } ) )
req = Request()
req.addOperation( removeFileInit )
req.addOperation( putAndRegister )
req.addOperation( replicateAndRegister )
req.addOperation( removeReplica )
req.addOperation( removeFile )
return req
def files( self, userName, userGroup ):
""" get list of files in user domain """
files = []
for i in range( 10 ):
fname = "/tmp/testUserFile-%s" % i
if userGroup == "lhcb_user":
lfn = "/lhcb/user/%s/%s/%s" % ( userName[0], userName, fname.split( "/" )[-1] )
else:
lfn = "/lhcb/certification/test/rmsdms/%s" % fname.split( "/" )[-1]
fh = open( fname, "w+" )
for i in range( 100 ):
fh.write( str( random.randint( 0, i ) ) )
fh.close()
size = os.stat( fname ).st_size
checksum = fileAdler( fname )
guid = makeGuid( fname )
files.append( ( fname, lfn, size, checksum, guid ) )
return files
def putRequest( self, userName, userDN, userGroup, sourceSE, targetSE1, targetSE2 ):
""" test case for user """
req = self.buildRequest( userName, userGroup, sourceSE, targetSE1, targetSE2 )
req.RequestName = "test%s-%s" % ( userName, userGroup )
req.OwnerDN = userDN
req.OwnerGroup = userGroup
gLogger.always( "putRequest: request '%s'" % req.RequestName )
for op in req:
gLogger.always( "putRequest: => %s %s %s" % ( op.Order, op.Type, op.TargetSE ) )
for f in op:
gLogger.always( "putRequest: ===> file %s" % f.LFN )
reqClient = ReqClient()
delete = reqClient.deleteRequest( req.RequestName )
if not delete["OK"]:
gLogger.error( "putRequest: %s" % delete["Message"] )
return delete
put = reqClient.putRequest( req )
if not put["OK"]:
gLogger.error( "putRequest: %s" % put["Message"] )
return put
# # test execution
if __name__ == "__main__":
if len( sys.argv ) != 5:
gLogger.error( "Usage:\n python %s userGroup SourceSE TargetSE1 TargetSE2\n" )
sys.exit( -1 )
userGroup = sys.argv[1]
sourceSE = sys.argv[2]
targetSE1 = sys.argv[3]
targetSE2 = sys.argv[4]
gLogger.always( "will use '%s' group" % userGroup )
admin = DiracAdmin()
userName = admin._getCurrentUser()
if not userName["OK"]:
gLogger.error( userName["Message"] )
sys.exit( -1 )
userName = userName["Value"]
gLogger.always( "current user is '%s'" % userName )
userGroups = getGroupsForUser( userName )
if not userGroups["OK"]:
gLogger.error( userGroups["Message"] )
sys.exit( -1 )
userGroups = userGroups["Value"]
if userGroup not in userGroups:
gLogger.error( "'%s' is not a member of the '%s' group" % ( userName, userGroup ) )
sys.exit( -1 )
userDN = getDNForUsername( userName )
if not userDN["OK"]:
gLogger.error( userDN["Message"] )
sys.exit( -1 )
userDN = userDN["Value"][0]
gLogger.always( "userDN is %s" % userDN )
fct = FullChainTest()
put = fct.putRequest( userName, userDN, userGroup, sourceSE, targetSE1, targetSE2 )
|
sposs/DIRAC
|
DataManagementSystem/test/IntegrationFCT.py
|
Python
|
gpl-3.0
| 6,261
|
[
"DIRAC"
] |
1a9fc9f73154f4eace182773634ead60e7e710223a314ad58c759a655ed50072
|
#!/usr/bin/env python
# coding: utf-8
import librosa
import logging
import numpy as np
from scipy.spatial import distance
from scipy import signal
from scipy.ndimage import filters
from msaf.algorithms.interface import SegmenterInterface
import msaf.utils as U
def median_filter(X, M=8):
"""Median filter along the first axis of the feature matrix X."""
for i in range(X.shape[1]):
X[:, i] = filters.median_filter(X[:, i], size=M)
return X
def gaussian_filter(X, M=8, axis=0):
"""Gaussian filter along the first axis of the feature matrix X."""
for i in range(X.shape[axis]):
if axis == 1:
X[:, i] = filters.gaussian_filter(X[:, i], sigma=M / 2.)
elif axis == 0:
X[i, :] = filters.gaussian_filter(X[i, :], sigma=M / 2.)
return X
def compute_gaussian_krnl(M):
"""Creates a gaussian kernel following Serra's paper."""
g = signal.gaussian(M, M / 3., sym=True)
G = np.dot(g.reshape(-1, 1), g.reshape(1, -1))
G[M // 2:, :M // 2] = -G[M // 2:, :M // 2]
G[:M // 2, M // 1:] = -G[:M // 2, M // 1:]
return G
def compute_ssm(X, metric="seuclidean"):
"""Computes the self-similarity matrix of X."""
D = distance.pdist(X, metric=metric)
D = distance.squareform(D)
D /= float(D.max())
return 1 - D
def compute_nc(X):
"""Computes the novelty curve from the structural features."""
N = X.shape[0]
# nc = np.sum(np.diff(X, axis=0), axis=1) # Difference between SF's
nc = np.zeros(N)
for i in range(N - 1):
nc[i] = distance.euclidean(X[i, :], X[i + 1, :])
# Normalize
nc += np.abs(nc.min())
nc /= float(nc.max())
return nc
def pick_peaks(nc, L=16, offset_denom=0.1):
"""Obtain peaks from a novelty curve using an adaptive threshold."""
offset = nc.mean() * float(offset_denom)
th = filters.median_filter(nc, size=L) + offset
#th = filters.gaussian_filter(nc, sigma=L/2., mode="nearest") + offset
#import pylab as plt
#plt.plot(nc)
#plt.plot(th)
#plt.show()
# th = np.ones(nc.shape[0]) * nc.mean() - 0.08
peaks = []
for i in range(1, nc.shape[0] - 1):
# is it a peak?
if nc[i - 1] < nc[i] and nc[i] > nc[i + 1]:
# is it above the threshold?
if nc[i] > th[i]:
peaks.append(i)
return peaks
def circular_shift(X):
"""Shifts circularly the X squre matrix in order to get a
time-lag matrix."""
N = X.shape[0]
L = np.zeros(X.shape)
for i in range(N):
L[i, :] = np.asarray([X[(i + j) % N, j] for j in range(N)])
return L
def embedded_space(X, m, tau=1):
"""Time-delay embedding with m dimensions and tau delays."""
N = X.shape[0] - int(np.ceil(m))
Y = np.zeros((N, int(np.ceil(X.shape[1] * m))))
for i in range(N):
# print X[i:i+m,:].flatten().shape, w, X.shape
# print Y[i,:].shape
rem = int((m % 1) * X.shape[1]) # Reminder for float m
Y[i, :] = np.concatenate((X[i:i + int(m), :].flatten(),
X[i + int(m), :rem]))
return Y
class Segmenter(SegmenterInterface):
"""
This script identifies the boundaries of a given track using the Serrà
method:
Serrà, J., Müller, M., Grosche, P., & Arcos, J. L. (2012). Unsupervised
Detection of Music Boundaries by Time Series Structure Features.
In Proc. of the 26th AAAI Conference on Artificial Intelligence
(pp. 1613–1619).Toronto, Canada.
"""
def processFlat(self):
"""Main process.
Returns
-------
est_idxs : np.array(N)
Estimated times for the segment boundaries in frame indeces.
est_labels : np.array(N-1)
Estimated labels for the segments.
"""
# Structural Features params
Mp = self.config["Mp_adaptive"] # Size of the adaptive threshold for
# peak picking
od = self.config["offset_thres"] # Offset coefficient for adaptive
# thresholding
M = self.config["M_gaussian"] # Size of gaussian kernel in beats
m = self.config["m_embedded"] # Number of embedded dimensions
k = self.config["k_nearest"] # k*N-nearest neighbors for the
# recurrence plot
# Preprocess to obtain features, times, and input boundary indeces
F = self._preprocess()
# Normalize
F = U.normalize(F, norm_type=self.config["bound_norm_feats"])
# Check size in case the track is too short
if F.shape[0] > 20:
if self.framesync:
red = 0.1
F_copy = np.copy(F)
F = librosa.util.utils.sync(
F.T, np.linspace(0, F.shape[0], num=int(F.shape[0] * red), dtype= np.int),
pad=False).T
# Emedding the feature space (i.e. shingle)
E = embedded_space(F, m)
# plt.imshow(E.T, interpolation="nearest", aspect="auto"); plt.show()
# Recurrence matrix
R = librosa.segment.recurrence_matrix(
E.T,
k=k * int(F.shape[0]),
width=1, # zeros from the diagonal
metric="euclidean",
sym=True).astype(np.float32)
# Circular shift
L = circular_shift(R)
#plt.imshow(L, interpolation="nearest", cmap=plt.get_cmap("binary"))
#plt.show()
# Obtain structural features by filtering the lag matrix
SF = gaussian_filter(L.T, M=M, axis=1)
SF = gaussian_filter(L.T, M=1, axis=0)
# plt.imshow(SF.T, interpolation="nearest", aspect="auto")
#plt.show()
# Compute the novelty curve
nc = compute_nc(SF)
# Find peaks in the novelty curve
est_bounds = pick_peaks(nc, L=Mp, offset_denom=od)
# Re-align embedded space
est_bounds = np.asarray(est_bounds) + int(np.ceil(m / 2.))
if self.framesync:
est_bounds = np.asarray(est_bounds // red, dtype=np.int)
F = F_copy
else:
est_bounds = []
# Add first and last frames
est_idxs = np.concatenate(([0], est_bounds, [F.shape[0] - 1]))
est_idxs = np.unique(est_idxs)
assert est_idxs[0] == 0 and est_idxs[-1] == F.shape[0] - 1
# Empty labels
est_labels = np.ones(len(est_idxs) - 1) * - 1
# Post process estimations
est_idxs, est_labels = self._postprocess(est_idxs, est_labels)
# plt.figure(1)
# plt.plot(nc);
# [plt.axvline(p, color="m", ymin=.6) for p in est_bounds]
# [plt.axvline(b, color="b", ymax=.6, ymin=.3) for b in brian_bounds]
# [plt.axvline(b, color="g", ymax=.3) for b in ann_bounds]
# plt.show()
return est_idxs, est_labels
|
urinieto/msaf
|
msaf/algorithms/sf/segmenter.py
|
Python
|
mit
| 7,008
|
[
"Gaussian"
] |
d6ed1f3b7b806d74b55a71536a27844f392a953b0732af18291da6d345bbe857
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2022 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
from __future__ import print_function
import os
import sys
import time
import subprocess
if len(sys.argv) not in [5, 6, 7, 8, 9, 10]:
print("""Usage: %s input_file logfile doperltest top_srcdir doreaptest alt_output_file alt_psi4_exe alt_psi4datadir""" % (sys.argv[0]))
sys.exit(1)
# extract run condition from arguments
python_exec = sys.argv[0]
infile = sys.argv[1]
logfile = sys.argv[2]
psiautotest = sys.argv[3]
top_srcdir = sys.argv[4]
sowreap = sys.argv[5]
if len(sys.argv) >= 7:
outfile = sys.argv[6]
else:
outfile = 'output.dat'
if len(sys.argv) >= 8:
psi = sys.argv[7]
else:
psi = '../../bin/psi4'
if len(sys.argv) >= 9:
psidatadir = sys.argv[8]
else:
psidatadir = os.path.dirname(os.path.realpath(psi)) + '/../share/psi4'
if len(sys.argv) >= 10:
psilibdir = sys.argv[9] + os.path.sep
else:
psilibdir = os.path.abspath('/../')
# open logfile and print test case header
try:
loghandle = open(logfile, 'a')
except IOError as e:
print("""I can't write to %s: %s""" % (logfile, e))
loghandle.write("""\n%s\n%s\n""" % (os.path.dirname(infile).split(os.sep)[-1], time.strftime("%Y-%m-%d %H:%M")))
def backtick(exelist):
"""Executes the command-argument list in *exelist*, directing the
standard output to screen and file logfile and string p4out. Returns
the system status of the call.
"""
try:
retcode = subprocess.Popen(exelist, bufsize=0, stdout=subprocess.PIPE, universal_newlines=True)
except OSError as e:
sys.stderr.write('Command %s execution failed: %s\n' % (exelist, e.strerror))
sys.exit(1)
p4out = ''
while True:
data = retcode.stdout.readline()
if not data:
break
sys.stdout.write(data) # screen
loghandle.write(data) # file
loghandle.flush()
p4out += data # string
while True:
retcode.poll()
exstat = retcode.returncode
if exstat is not None:
return exstat
time.sleep(0.1)
loghandle.close()
# not sure why 2nd while loop needed, as 1st while loop has always
# been adequate for driver interfaces. nevertheless, to collect
# the proper exit code, 2nd while loop very necessary.
# run psi4 and collect testing status from any compare_* in input file
if os.path.isfile(infile):
exelist = [psi, infile, outfile, '-l', psidatadir]
# On Windows set Python interpreter explicitly as the shebang is ignored
if sys.platform.startswith('win'):
exelist = [sys.executable] + exelist
pyexitcode = backtick(exelist)
elif os.path.isfile(infile.replace(".dat", ".py")):
infile = infile.replace(".dat", ".py")
if "PYTHONPATH" in os.environ:
os.environ["PYTHONPATH"] += os.pathsep + psilibdir
else:
os.environ["PYTHONPATH"] = psilibdir
outfile = os.path.dirname(infile) + os.path.sep + outfile
pyexitcode = backtick(["python", infile, " > ", outfile])
else:
raise Exception("\n\nError: Input file %s not found\n" % infile)
if sowreap == 'true':
try:
retcode = subprocess.Popen([sys.executable, '%s/tests/reap.py' %
(top_srcdir), infile, outfile, logfile, psi, psidatadir])
except OSError as e:
print("""Can't find reap script: %s """ % (e))
while True:
retcode.poll()
exstat = retcode.returncode
if exstat is not None:
reapexitcode = exstat
break
time.sleep(0.1)
else:
reapexitcode = None
# additionally invoke autotest script comparing output.dat to output.ref
if psiautotest == 'true':
os.environ['SRCDIR'] = os.path.dirname(infile)
try:
retcode = subprocess.Popen(['perl', '%s/tests/psitest.pl' % (top_srcdir), infile, logfile])
except IOError as e:
print("""Can't find psitest script: %s""" % (e))
while True:
retcode.poll()
exstat = retcode.returncode
if exstat is not None:
plexitcode = exstat
break
time.sleep(0.1)
else:
plexitcode = None
# combine, print, and return (0/1) testing status
exitcode = 0 if (pyexitcode == 0 and (plexitcode is None or plexitcode == 0) and (reapexitcode is None or reapexitcode == 0)) else 1
print('Exit Status: infile (', pyexitcode, '); autotest (', plexitcode, '); sowreap (', reapexitcode, '); overall (', exitcode, ')')
sys.exit(exitcode)
|
psi4/psi4
|
tests/runtest.py
|
Python
|
lgpl-3.0
| 5,306
|
[
"Psi4"
] |
61c8b3d3c9705082e7d4c0cbcb96761137f07c371cbfe348042cdba155026c48
|
# Copyright (C) 2014-2016 Codethink Limited
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
import collections
import contextlib
import cPickle
import logging
import os
import pylru
import warnings
import cliapp
import morphlib
from morphlib.util import sanitise_morphology_path
tree_cache_size = 10000
tree_cache_filename = 'trees.cache.pickle'
class PickleCacheManager(object):
'''Cache manager for PyLRU that reads and writes to Pickle files.
The 'pickle' format is less than ideal in many ways and is actually
slower than JSON in Python. However, the data we need to cache is keyed
by tuples and in JSON a dict can only be keyed with strings. For now,
using 'pickle' seems to be the least worst option.
'''
def __init__(self, filename, size):
self.filename = filename
self.size = size
def _populate_cache_from_file(self, filename, cache):
try:
with open(filename, 'r') as f:
data = cPickle.load(f)
for key, value in data.iteritems():
cache[key] = value
except (EOFError, IOError, cPickle.PickleError) as e:
logging.warning('Failed to load cache %s: %s', self.filename, e)
def load_cache(self):
'''Create a pylru.lrucache object prepopulated with saved data.'''
cache = pylru.lrucache(self.size)
# There should be a more efficient way to do this, by hooking into
# the json module directly.
self._populate_cache_from_file(self.filename, cache)
return cache
def save_cache(self, cache):
'''Save the data from a pylru.lrucache object to disk.
Any changes that have been made by other instances or processes since
load_cache() was called will be overwritten.
'''
data = {}
for key, value in cache.items():
data[key] = value
try:
with morphlib.savefile.SaveFile(self.filename, 'w') as f:
cPickle.dump(data, f)
except (IOError, cPickle.PickleError) as e:
logging.warning('Failed to save cache to %s: %s', self.filename, e)
@contextlib.contextmanager
def open(self):
cache = self.load_cache()
try:
yield cache
except BaseException as e:
raise
else:
self.save_cache(cache)
class SourceResolverError(cliapp.AppException):
pass
class MorphologyNotFoundError(SourceResolverError):
def __init__(self, filename):
SourceResolverError.__init__(
self, "Couldn't find definition file to build: %s" % filename)
class MorphologyReferenceNotFoundError(SourceResolverError):
def __init__(self, filename, reference_file):
SourceResolverError.__init__(self,
"Couldn't find morphology: %s "
"referenced in %s"
% (filename, reference_file))
class MorphologyNameError(SourceResolverError):
def __init__(self, name_in_morphology, name, filename):
SourceResolverError.__init__(self,
"Name '%s' doesn't match '%s' in "
"morphology: %s"
% (name_in_morphology, name, filename))
# Callers may want to give the user a special error message if we hit an
# InvalidRefError in the definitions.git repo. Currently a separate exception
# type seems the easiest way to do that, but adding enough detail to the
# gitdir.InvalidRefError class may make this class redundant in future.
class InvalidDefinitionsRefError(SourceResolverError):
def __init__(self, repo_url, ref):
self.repo_url = repo_url
self.ref = ref
super(InvalidDefinitionsRefError, self).__init__(
"Ref %s was not found in repo %s." % (ref, repo_url))
class SourceResolver(object):
'''Provides a way of resolving the set of sources for a given system.
There are three levels of caching involved in resolving the sources to
build.
The canonical repo for each source is specified in the build-command
(for strata and systems) or in the stratum morphology (for chunks). It will
be either a normal URL, or a keyed URL using a repo-alias like
'baserock:baserock/definitions'.
Each commit used in a build is resolved to a tree SHA1, which means that
merge commits and changes to commit messages don't affect the cache
identity of a chunk. This does mean we need to query every repo in the
build graph, though.
All requests for information on a repo use the 'repocache' module. This
maintains a local copy of all the Git repos we need to work with. A repo
cache can also use a remote 'morph-cache-server' instance, if available,
to query certain information about a repo without cloning it locally.
Using this we can resolve commits to trees without having to clone every
repo locally, which is a huge performance improvement in some cases.
The third layer of caching is a simple commit SHA1 -> tree SHA mapping. It
turns out that even if all repos are available locally, running
'git rev-parse' on hundreds of repos requires a lot of IO and can take
several minutes. Likewise, on a slow network connection it is time
consuming to keep querying the remote repo cache. This third layer of
caching works around both of those issues.
The need for 3 levels of caching highlights design inconsistencies in
Baserock, but for now it is worth the effort to maintain this code to save
users from waiting 7 minutes each time that they want to build. The level 3
cache is fairly simple because commits are immutable, so there is no danger
of this cache being stale as long as it is indexed by commit SHA1. Due to
the policy in Baserock of always using a commit SHA1 (rather than a named
ref) in the system definitions, it makes repeated builds of a system very
fast as no resolution needs to be done at all.
'''
def __init__(self, repo_cache, tree_cache_manager, status_cb=None):
self.repo_cache = repo_cache
self.tree_cache_manager = tree_cache_manager
self.update = repo_cache.update_gits
self.status = status_cb
def _resolve_ref(self, resolved_trees, reponame, ref):
'''Resolves commit and tree sha1s of the ref in a repo and returns it.
If update is True then this has the side-effect of updating or cloning
the repository into the local repo cache.
'''
# The Baserock reference definitions use absolute refs so, and, if the
# absref is cached, we can short-circuit all this code.
if (reponame, ref) in resolved_trees:
logging.debug('Returning tree (%s, %s) from tree cache',
reponame, ref)
return ref, resolved_trees[(reponame, ref)]
logging.debug('tree (%s, %s) not in cache', reponame, ref)
absref, tree = self.repo_cache.resolve_ref_to_commit_and_tree(reponame,
ref)
logging.debug('Writing tree to cache with ref (%s, %s)',
reponame, absref)
resolved_trees[(reponame, absref)] = tree
return absref, tree
def _get_file_contents_from_definitions(self, definitions_checkout_dir,
filename):
fp = os.path.join(definitions_checkout_dir, filename)
if os.path.exists(fp):
with open(fp) as f:
return f.read()
else:
logging.debug("Didn't find %s in definitions", filename)
return None
def _check_version_file(self, definitions_checkout_dir):
version_text = self._get_file_contents_from_definitions(
definitions_checkout_dir, 'VERSION')
return morphlib.definitions_version.check_version_file(version_text)
def _get_defaults(self, definitions_checkout_dir,
definitions_version=7): # pragma: no cover
'''Return the default build system commands, and default split rules.
This function returns a tuple with two dicts.
The defaults are read from a file named DEFAULTS in the definitions
directory, if the definitions follow format version 7 or later. If the
definitions follow version 6 or earlier, hardcoded defaults are used.
'''
# Read default build systems and split rules from DEFAULTS file.
defaults_text = self._get_file_contents_from_definitions(
definitions_checkout_dir, 'DEFAULTS')
if defaults_text is None:
warnings.warn("No DEFAULTS file found.")
defaults = morphlib.defaults.Defaults(definitions_version,
text=defaults_text)
return defaults.build_systems(), defaults.split_rules()
def _get_morphology(self, resolved_morphologies, definitions_checkout_dir,
morph_loader, filename):
'''Read the morphology at the specified location.
Returns None if the file does not exist in the specified commit.
'''
if filename in resolved_morphologies:
return resolved_morphologies[filename]
text = self._get_file_contents_from_definitions(
definitions_checkout_dir, filename)
if text is None:
morph = None
else:
morph = morph_loader.load_from_string(text, filename)
resolved_morphologies[filename] = morph
return morph
def _process_definitions_with_children(self,
resolved_morphologies,
definitions_checkout_dir,
definitions_repo,
definitions_ref,
definitions_absref,
definitions_tree,
morph_loader,
system_filenames,
visit,
predefined_split_rules):
# Initialise definitions_queue with tuples (name, filename).
# We don't need system's filename, so use 'None'
definitions_queue = collections.deque((None, f)
for f in system_filenames)
chunk_queue = set()
def get_morphology(filename):
return self._get_morphology(resolved_morphologies,
definitions_checkout_dir, morph_loader,
filename)
while definitions_queue:
name, filename = definitions_queue.popleft()
morphology = get_morphology(filename)
if morphology is None:
raise MorphologyNotFoundError(filename)
visit(definitions_repo, definitions_ref, filename,
definitions_absref, definitions_tree, morphology,
predefined_split_rules)
if morphology['kind'] == 'cluster':
raise cliapp.AppException(
"Cannot build a morphology of type 'cluster'.")
elif morphology['kind'] == 'system':
# name is not mandatory, use 'None' if not definied.
definitions_queue.extend((s.get('name'),
sanitise_morphology_path(s['morph']))
for s in morphology['strata'])
elif morphology['kind'] == 'stratum':
# If we have the name of the stratum, fail if it doesn't
# match with the one set in the stratum file.
if name and name != morphology.get('name'):
raise MorphologyNameError(morphology['name'], name,
filename)
if morphology['build-depends']:
# build-depends don't have names. Use 'None' as name.
definitions_queue.extend((None,
sanitise_morphology_path(s['morph']))
for s in morphology['build-depends'])
for c in morphology['chunks']:
if 'morph' in c:
# Now, does this path actually exist?
path = c['morph']
morphology = get_morphology(path)
if morphology is None:
raise MorphologyReferenceNotFoundError(
path, filename)
chunk_queue.add((c['name'], c['repo'], c['ref'],
path, None))
else:
# We invent a filename here, so that the rest of the
# Morph code doesn't need to know about the predefined
# build instructions.
chunk_filename = c['name'] + '.morph'
chunk_queue.add((c['name'], c['repo'], c['ref'],
chunk_filename, c['build-system']))
return chunk_queue
def _create_morphology_for_build_system(self, morph_loader, buildsystem,
morph_name):
morph = buildsystem.get_morphology(morph_name)
morph_loader.validate(morph)
morph_loader.set_commands(morph)
morph_loader.set_defaults(morph)
return morph
def process_chunk(self, resolved_morphologies, resolved_trees,
definitions_checkout_dir, morph_loader, chunk_name,
chunk_repo, chunk_ref, filename, chunk_buildsystem,
visit, predefined_build_systems, predefined_split_rules):
absref, tree = self._resolve_ref(resolved_trees, chunk_repo, chunk_ref)
if chunk_buildsystem is None:
# Build instructions defined in a chunk .morph file. An error is
# already raised in _process_definitions_with_children() if the
# 'morph' field points to a file that doesn't exist.
morphology = self._get_morphology(resolved_morphologies,
definitions_checkout_dir,
morph_loader, filename)
if morphology['name'] != chunk_name:
warnings.warn("Name '%s' doesn't match '%s in morpholgy: %s"
% (morphology['name'], chunk_name, filename))
else:
# Chunk uses one of the predefined build systems. In this case
# 'filename' will be faked (name of chunk + '.morph').
try:
buildsystem = predefined_build_systems[chunk_buildsystem]
except KeyError:
raise SourceResolverError("Unknown build system for %s: %s" %
(filename, chunk_buildsystem))
morphology = self._create_morphology_for_build_system(
morph_loader, buildsystem, chunk_name)
visit(chunk_repo, chunk_ref, filename, absref, tree, morphology,
predefined_split_rules)
def add_morphs_to_source_pool(self, definitions_repo, definitions_ref,
system_filenames, pool,
definitions_original_ref=None):
def add_to_pool(reponame, ref, filename, absref, tree, morphology,
predefined_split_rules):
# If there are duplicate chunks which have the same 'name' and the
# same build instructions, we might cause a stack overflow in
# cachekeycomputer.py when trying to hash the build graph. The
# _find_duplicate_chunks() function doesn't handle this case, it
# is checking for duplicates with the same name but different build
# instructions.
if morphology['kind'] != 'stratum':
if pool.lookup(reponame, ref, filename):
raise morphlib.Error(
"There are multiple versions of component '%s'" %
morphology['name'])
sources = morphlib.source.make_sources(
reponame, ref, filename, absref, tree, morphology,
predefined_split_rules)
for source in sources:
pool.add(source)
resolved_morphologies = {}
with morphlib.util.temp_dir() as definitions_checkout_dir, \
self.tree_cache_manager.open() as resolved_trees:
# Resolve the repo, ref pair for definitions repo, cache result
try:
definitions_absref, definitions_tree = self._resolve_ref(
resolved_trees, definitions_repo, definitions_ref)
except morphlib.gitdir.InvalidRefError as e:
raise InvalidDefinitionsRefError(
definitions_repo, definitions_ref)
if definitions_original_ref:
definitions_ref = definitions_original_ref
definitions_cached_repo = self.repo_cache.get_updated_repo(
repo_name=definitions_repo, ref=definitions_absref)
definitions_cached_repo.extract_commit(
definitions_absref, definitions_checkout_dir)
pool.definitions_version = self._check_version_file(
definitions_checkout_dir)
predefined_build_systems, predefined_split_rules = \
self._get_defaults(
definitions_checkout_dir, pool.definitions_version)
morph_loader = morphlib.morphloader.MorphologyLoader(
predefined_build_systems=predefined_build_systems)
# First, process the system and its stratum morphologies. These
# will all live in the same Git repository, and will point to
# various chunk morphologies.
chunk_queue = self._process_definitions_with_children(
resolved_morphologies, definitions_checkout_dir,
definitions_repo, definitions_ref, definitions_absref,
definitions_tree, morph_loader, system_filenames,
add_to_pool, predefined_split_rules)
# Now process all the chunks involved in the build.
for name, repo, ref, filename, buildsystem in chunk_queue:
self.process_chunk(resolved_morphologies, resolved_trees,
definitions_checkout_dir, morph_loader,
name, repo, ref, filename, buildsystem,
add_to_pool, predefined_build_systems,
predefined_split_rules)
class DuplicateChunkError(morphlib.Error):
def _make_msg(self, (name, sources)): # pragma: no cover
return ("Multiple `%s' chunks detected:\n\t%s"
% (name, '\n\t'.join((str(s) for s in sources))))
def __init__(self, duplicate_chunks): # pragma: no cover
msgs = (self._make_msg(dup) for dup in duplicate_chunks.iteritems())
msg = '\n'.join(msgs)
super(DuplicateChunkError, self).__init__(msg)
def _find_duplicate_chunks(sourcepool): #pragma: no cover
''' Searches the sourcepool for duplicate chunks
returns a dictionary of any duplicates keyed on chunk source name,
the value of each item is a list of duplicate chunk sources
'''
chunk_sources = ((s.name, s) for s in sourcepool
if s.morphology['kind'] == 'chunk')
chunk_sources_by_name = collections.defaultdict(list)
for (name, source) in chunk_sources:
chunk_sources_by_name[name].append(source)
return {k: v for (k, v) in chunk_sources_by_name.iteritems() if len(v) > 1}
def create_source_pool(repo_cache, repo, ref, filenames,
original_ref=None, status_cb=None):
'''Find all the sources involved in building a given system.
Given a system morphology, this function will traverse the tree of stratum
and chunk morphologies that the system points to and create appropriate
Source objects. These are added to a new SourcePool object, which is
returned.
Note that Git submodules are not considered 'sources' in the current
implementation, and so they must be handled separately.
The 'repo_cache' parameter specifies a repo cache which is used when
accessing the source repos. If a git_resolve_cache_server is set for this
repo cache, and all repos in the build are known to it, then this function
will only need the definitions.git repo available locally. If not, then all
repos must be cloned in order to resolve the refs to tree SHA1s, which is
a slow process!
'''
pool = morphlib.sourcepool.SourcePool()
tree_cache_manager = PickleCacheManager(
os.path.join(repo_cache.cachedir, tree_cache_filename),
tree_cache_size)
resolver = SourceResolver(repo_cache, tree_cache_manager, status_cb)
resolver.add_morphs_to_source_pool(repo, ref, filenames, pool,
definitions_original_ref=original_ref)
# No two chunks may have the same name
duplicate_chunks = _find_duplicate_chunks(pool)
if duplicate_chunks:
raise DuplicateChunkError(duplicate_chunks)
return pool
|
perryl/morph
|
morphlib/sourceresolver.py
|
Python
|
gpl-2.0
| 22,183
|
[
"VisIt"
] |
1b75312c4424f274a76973ef7ea670706a76714b588cec54e1232eb670147132
|
""" Tests generating files for qm torsion scan """
import unittest
from torsionfit.tests.utils import get_fun, has_openeye
import torsionfit.qmscan.torsion_scan as qmscan
import tempfile
import os
from fnmatch import fnmatch
import shutil
class TestQmscan(unittest.TestCase):
@unittest.skipUnless(has_openeye, 'Cannot test without openeye')
def test_generat_torsions(self):
""" Tests finding torsion to drive """
mol = get_fun('butane.pdb')
outfile_path = tempfile.mkdtemp()[1]
qmscan.generate_torsions(mol=mol, path=outfile_path, interval=30)
input_files = []
pattern = '*.pdb'
for path, subdir, files in os.walk(outfile_path):
for name in files:
if fnmatch(name, pattern):
input_files.append(os.path.join(path, name))
contents = open(input_files[0]).read()
pdb = get_fun('butane_10_7_4_3_0.pdb')
compare_contents = open(pdb).read()
self.assertEqual(contents, compare_contents )
shutil.rmtree(outfile_path)
def test_generate_input(self):
"""Test generate psi4 input files"""
root = get_fun('torsion_scan/10_7_4_3')
qmscan.generate_scan_input(root, 'pdb', 'butane', ['MP2'], ['aug-cc-pvtz'], symmetry='C1')
contents = open(get_fun('torsion_scan/10_7_4_3/0/butane_10_7_4_3_0.dat')).read()
compare_content = open(get_fun('butane_10_7_4_3_0.dat')).read()
self.assertEqual(contents, compare_content)
#
|
ChayaSt/Torsions
|
torsionfit/tests/test_qmscan.py
|
Python
|
gpl-2.0
| 1,512
|
[
"Psi4"
] |
d68a29377e021d064f42b95e52a41d84d763274d7879dda3631c371d34c15f56
|
#!/people/thnfs/homes/wehnerj/python-virtual/bin/python
import MDAnalysis as MD
import MDAnalysis.core.log as MDlog
import collections as col
#from MDAnalysis.core.parallel.distances import distance_array
import numpy as np
from MDAnalysis.core.distances import distance_array
from MDAnalysis.core.util import normal,angle
import numpy.linalg as lg
import sys
import MDAnalysis.KDTree.NeighborSearch as NS
from scipy.signal import argrelmin
import itertools as it
import argparse as ap
import numpy.ma as ma
parser=ap.ArgumentParser(description="Calculator for the Translational orderparameter tau and the structural order parameters S4 and S6. Requires a .gro file and an atomname for which the order parameter should be calculated, optionally uses an .xtc file as well")
parser.add_argument("-f","--structure", type=str,default="conf.gro",help="Structure .gro or .tpr file of the structure")
parser.add_argument('-t',"--trajectory", type=str,help="Trajectory .xtc file of the structure")
parser.add_argument('-s',"--steps", type=int,default=50,help="Last n timesteps of trajectory to average over")
parser.add_argument('--Atom', type=str,required=True,help="Identifer of atom e.g. ""CN8""")
args=parser.parse_args()
if args.trajectory:
u=MD.Universe(args.structure,args.trajectory)
x=u.trajectory.numframes
start=x-args.steps
else:
u=MD.Universe(args.structure)
x=u.trajectory.numframes
start=0
def transorderparameter(rhoz,begin,end,numofsteps):
if begin<0.001:
begin=0.001
d=np.linspace(begin,end,num=numofsteps)
dexpanded=np.tile(d,(np.shape(rhoz)[0],1)).T
#print np.shape(dexpanded)
rhozexpanded=np.tile(rhoz,(np.shape(d)[0],1))
#print np.shape(rhozexpanded)
real=np.sum(np.cos(2*np.pi*rhozexpanded/dexpanded)/float(np.shape(rhoz)[0]),axis=1)
imag=np.sum(np.sin(2*np.pi*rhozexpanded/dexpanded)/float(np.shape(rhoz)[0]),axis=1)
tau=np.sqrt(real**2+imag**2)
#tauofd=np.vstack((d,tau))
return tau,d
def calculateangles(centeratom,atomlist,coordinates,k,boxvector,normvecmatrix):
vectors=[]
real1=[]
imag1=[]
for atom in atomlist:
#print "coordinates",coordinates[centeratom],coordinates[atom]
distancevector=coordinates[centeratom]-coordinates[atom]
#print distancevector
if all(coordinates[centeratom]==coordinates[atom]):
print coordinates[centeratom],coordinates[atom]
distancevectorpbc=np.where(distancevector>0.5*boxvector,distancevector-boxvector,distancevector)
distancevectorpbc=np.where(distancevectorpbc<-0.5*boxvector,distancevectorpbc+boxvector,distancevectorpbc)
#print "distances",distancevector,distancevectorpbc,boxvector
vectors.append(np.dot(distancevectorpbc,normvecmatrix))
#vectors.append(distancevectorpbc)
#print vectors
vec0=vectors.pop()
for vec in vectors:
angleij=angle(vec0,vec)
if np.isnan(angleij):
print angleij,vec0,vec
real1.append(np.cos(k*angleij))
imag1.append(np.sin(k*angleij))
real1=np.array(real1)
imag1=np.array(imag1)
#sys.exit()
return real1,imag1
def latticeorderparameter(coordinates,neighborlist,boxvector,normvecmatrix):
S6s=[]
S4s=[]
for array in neighborlist:
#print array
real,imag=calculateangles(array[0],array[1:],coordinates,np.array([4,6]),boxvector,normvecmatrix)
S6=np.sqrt((np.average(real[:,1],axis=0))**2+(np.average(imag[:,1],axis=0))**2)
S4=np.sqrt((np.average(real[:4,0],axis=0))**2+(np.average(imag[:4,0],axis=0))**2)
S6s.append(S6)
S4s.append(S4)
S6=np.average(S6s)
S4=np.average(S4s)
return S6,S4
def nextneighborlist(d):
neighborlist6=[]
for line in d:
#print np.min(line[line>0]),np.argmin(line[line>0])
nN=np.argpartition(line,7)[:7]
#print nN
distance=line[nN]
#print distance
#print line[99],line[100]
#sys.exit()
sortpair=nN[np.argsort(distance)]
neighborlist6.append(sortpair)
#print neighborlist6
return neighborlist6
pm = MDlog.ProgressMeter(x, interval=10)
print "trajectory has {} frames.".format(x)
a=u.selectAtoms("name {}".format(args.Atom))
noofatoms=len(a)
distancearray=np.zeros((noofatoms,noofatoms),dtype=np.float64)
ts=u.trajectory[start]
boxvectors=MD.coordinates.core.triclinic_vectors(ts.dimensions)
S6list=[]
S4list=[]
listoftaus=[]
for ts in u.trajectory:
if ts.frame>start or x==1:
neighborlistlist=[]
normvec=np.array([0,0,1])
normvecmatrix=np.array([[1,0,0],[0,1,0],[0,0,0]])
pm.echo(ts.frame)
coords=a.coordinates()
boxvectors=MD.coordinates.core.triclinic_vectors(ts.dimensions)
boxvector=boxvectors[boxvectors>0]
boxvectorcheck=np.diag(boxvectors)
d=distance_array(coords,coords,boxvectors,distancearray)
if np.shape(boxvector)!=np.shape(boxvectorcheck):
print "The box is not orthorhombic. I am leaving."
sys.exit()
neighborlist=nextneighborlist(d)
S6,S4=latticeorderparameter(coords,neighborlist,boxvector,normvecmatrix)
S6list.append(S6)
S4list.append(S4)
#print normvec
projcoordinatestemp=np.dot(coords,normvec)
projcoordinates=projcoordinatestemp-np.average(projcoordinatestemp)
#print projcoordinates
tau,layerdistance=transorderparameter(projcoordinates,1,0.5*np.dot(boxvector,normvec),200)
listoftaus.append(tau)
#print len(listoftaus)
tauav=np.average(listoftaus,axis=0)
S6av=np.average(S6list,axis=0)
S4av=np.average(S4list,axis=0)
#print listofS
filename="translationalorder.dat"
print "Printed tau to file {}".format(filename)
np.savetxt(filename,np.vstack((layerdistance,tauav)).T,delimiter='\t',newline='\n')
print "S6 orderparameter {}".format(S6av)
print "S4 orderparameter {}".format(S4av)
print "Tau orderparamer {}".format(np.amax(tauav))
|
12AngryMen/votca-scripts
|
Gromacs/ordertrans_new.py
|
Python
|
apache-2.0
| 6,150
|
[
"MDAnalysis"
] |
f6b9a0359253b873c4d3a7b83363f23c4862e12320058838c1cfda6ac992d0a5
|
############################################
# [config.py]
# CONFIGURATION SETTINGS FOR PROSODIC
#
# Here you may change the runtime settings for prosodic.
# For more help on this file, please see the README in this folder,
# or visit it online: <https://github.com/quadrismegistus/prosodic>.
# If you have any questions, please email Ryan <heuser@stanford.edu>.
#
############################################
############################################
# PATHS USED BY PROSODIC
#
# If these are relative paths (no leading /),
# they are defined from the point of view of
# the root directory of PROSODIC.
#
# Folder used as the folder of corpora:
# [it should contain folders, each of which contains text files]
folder_corpora='corpora/'
#
# Folder to store results within (statistics, etc)
folder_results='results/'
#
# Folder in which tagged samples (hand-parsed lines) are stored:
folder_tagged_samples = 'tagged_samples/'
############################################
############################################
# SELECT THE LANGUAGE
#
# Select the language that will be used in PROSODIC,
# when typing text directly or loading text.
#
# All text is English:
lang='en'
#
# All text is Finnish:
#lang='fi'
#
# Detect language from first two characters of filename:
# e.g. "en.[filename].txt" is English, "fi.[filename].txt" is Finnish
#lang='**'
############################################
############################################
# CONFIGURE TEXT-TO-SPEECH ENGINE (for English)
#
# To parse unknown English words, you'll need a TTS engine installed.
# For instructions, please see the README.
#
# Use espeak for TTS (recommended):
# [Note: syllabification done with CMU Syllabifier]
en_TTS_ENGINE = 'espeak'
#
# Use OpenMary for TTS:
#en_TTS_ENGINE = 'openmary'
#
# Do not use TTS:
# [Lines with unknown words will be skipped during metrical parsing]
#en_TTS_ENGINE = 'none'
#
# Cache results of TTS for an unknown word so it's not necessary
# to use TTS for that word again [Change to 0 to be false]
en_TTS_cache = 1
############################################
############################################
# OPTIONS ABOUT PRINTING TO SCREEN
#
# Print loaded words, parses, etc. to screen:
print_to_screen=0
#
# Do not print loaded words, parses, etc. to screen:
# Although hiden, you may still save any output to disk
# using the /save command.
#print_to_screen=0
#
# The default length for the line used by printing
linelen=60
############################################
############################################
# METRICAL PARSING: POSITION SIZE
#
# Select how many syllables are at least *possible* in strong or weak positions
# cf. Kiparsky & Hanson's "position size" parameter ("Parametric Theory" 1996)
#
#
######
# [Maximum position size]
#
# The maximum number of syllables allowed in strong metrical positions (i.e. "s")
maxS=2
#
# The maximum number of syllables allowed in weak metrical positions (i.e. "w")
maxW=2
#
#
######
# [Minimum position size]
#
# (Recommended) Positions are at minimum one syllable in size
splitheavies=0
#
# (Unrecommended) Allow positions to be as small as a single mora
# i.e. (a split heavy syllable can straddle two metrical positions)
#splitheavies=1
############################################
############################################
# METRICAL PARSING: METRICAL CONSTRAINTS
#
# Here you can configure the constraints used by the metrical parser.
# Each constraint is expressed in the form:
# Cs['(constraint name)']=(constraint weight)
# Constraint weights do not affect harmonic bounding (i.e. which parses
# survive as possibilities), but they do affect how those possibilities
# are sorted to select the "best" parse.
#
# [Do not remove or uncomment the following line]
Cs={}
#
#
######
# [Constraints regulating the 'STRENGTH' of a syllable]
#
# A syllable is strong if it is a peak in a polysyllabic word:
# the syllables in 'liberty', stressed-unstressed-unstressed,
# are, in terms of *strength*, strong-weak-neutral, because
# the first syllable is more stressed than its neighbor;
# the second syllable less stressed; and the third equally stressed.
#
###
# [Stricter versions:]
#
# A strong metrical position should not contain any weak syllables ("troughs"):
Cs['strength.s=>-u']=3
#
# A weak metrical position may not contain any strong syllables ("peaks"):
# [Kiparsky and Hanson believe this is Shakespeare's meter]
Cs['strength.w=>-p']=3
#
###
# [Laxer versions:]
#
# A strong metrical position should contain at least one strong syllable:
#Cs['strength.s=>p']=3
#
# A weak metrical position should contain at least one weak syllable:
#Cs['strength.w=>u']=3
#
#
#
######
# [Constraints regulating the STRESS of a syllable]
#
###
# [Stricter versions:]
#
# A strong metrical position should not contain any unstressed syllables:
# [Kiparsky and Hanson believe this is Hopkins' meter]
Cs['stress.s=>-u']=2
#
# A weak metrical position should not contain any stressed syllables:
Cs['stress.w=>-p']=2
#
###
# [Laxer versions:]
#
# A strong metrical position should contain at least one stressed syllable:
#Cs['stress.s=>p']=2
#
# A weak metrical position must contain at least one unstressed syllable;
#Cs['stress.w=>u']=2
#
#
#
######
# [Constraints regulating the WEIGHT of a syllable]
#
# The weight of a syllable is its "quantity": short or long.
# These constraints are designed for "quantitative verse",
# as for example in classical Latin and Greek poetry.
#
###
# [Stricter versions:]
#
# A strong metrical position should not contain any light syllables:
#Cs['weight.s=>-u']=2
#
# A weak metrical position should not contain any heavy syllables:
#Cs['weight.w=>-p']=2
#
###
# [Laxer versions:]
#
# A strong metrical position should contain at least one heavy syllable:
#Cs['weight.s=>p']=2
#
# A weak metrical position must contain at least one light syllable;
#Cs['weight.w=>u']=2
#
#
#
######
# [Constraints regulating what's permissible as a DISYLLABIC metrical position]
# [(with thanks to Sam Bowman, who programmed many of these constraints)]
#
###
# [Based on weight:]
#
# A disyllabic metrical position should not contain more than a minimal foot:
# (i.e. allowed positions are syllables weighted light-light or light-heavy)
#Cs['footmin-noHX']=1
#
# A disyllabic metrical position should be syllables weighted light-light:
#Cs['footmin-noLH-noHX']=1
#
###
# [Categorical:]
#
# A metrical position should not contain more than one syllable:
# [use to discourage disyllabic positions]
Cs['footmin-none']=1
#
# A strong metrical position should not contain more than one syllable:
#Cs['footmin-no-s']=1
#
# A weak metrical position should not contain more than one syllable:
#Cs['footmin-no-w']=1
#
# A metrical position should not contain more than one syllable,
# *unless* that metrical position is the *first* or *second* in the line:
# [use to discourage disyllabic positions, but not trochaic inversions,
# or an initial "extrametrical" syllable]
#Cs['footmin-none-unless-in-first-two-positions']=1
#
# A metrical position should not contain more than one syllable,
# *unless* that metrical position is the *second* in the line:
# [use to discourage disyllabic positions, but not trochaic inversions]
#Cs['footmin-none-unless-in-second-position']=1
#
# A strong metrical position should not contain more than one syllable,
# *unless* it is preceded by a disyllabic *weak* metrical position:
# [use to implement the metrical pattern described by Derek Attridge,
# in The Rhythms of English Poetry (1982), and commented on by Bruce Hayes
# in his review of the book in Language 60.1 (1984).
# e.g. Shakespeare's "when.your|SWEET.IS|ue.your|SWEET.FORM|should|BEAR"
# [this implementation is different in that it only takes into account
# double-weak beats *preceding* -- due to the way in which the parser
# throws away bounded parses as it goes, it might not be possible for now
# to write a constraint referencing future positions]
Cs['footmin-no-s-unless-preceded-by-ww']=10
# [The version that does reference future positions; but appears to be unstable]:
#Cs['attridge-ss-not-by-ww']=10
#
###
# [For disyllabic positions crossing a word boundary...
# (i.e. having two syllables, each from a different word)...
#
# ...it should never cross a word boundary to begin with:
#Cs['footmin-wordbound']=1
#
# ...both words should be function words:
#Cs['footmin-wordbound-bothnotfw']=1
#
# ...at least one word should be a function word:
#Cs['footmin-wordbound-neitherfw']=1
#
# ...the left-hand syllable should be a function-word:
#Cs['footmin-wordbound-leftfw']=1
#
# ...the right-hand syllable should be a function word:
#Cs['footmin-wordbound-rightfw']=1
#
# ...neither word should be a monosyllable:
#Cs['footmin-wordbound-nomono']=1
#
###
# [Miscellaneous constraints relating to disyllabic positions]
#
# A disyllabic metrical position may contain a strong syllable
# of a lexical word only if the syllable is (i) light and
# (ii) followed within the same position by an unstressed
# syllable normally belonging to the same word.
# [written by Sam Bowman]
#Cs['footmin-strongconstraint']=1
#
# The final metrical position of the line should not be 'ww'
# [use to encourage "...LI|ber|TY" rather than "...LI|ber.ty"]
Cs['posthoc-no-final-ww']=2
#
# The final metrical position of the line should not be 'w' or 'ww'
#Cs['posthoc-no-final-w']=2
#
# A line should have all 'ww' or all 'w':
# It works by:
# Nw = Number of weak positions in the line
# Mw = Maximum number of occurrences of 'w' metrical position
# Mww = Maximum number of occurrences of 'ww' metrical position
# M = Whichever is bigger, Mw or Mww
# V = Nw - M
# Violation Score = V * [Weight]
# [use to encourage consistency of meter across line]
# [feel free to make this a decimal number, like 0.25]
Cs['posthoc-standardize-weakpos']=1
#
#
#
######
# [MISCELLANEOUS constraints]
#
# A function word can fall only in a weak position:
#Cs['functiontow']=2
#
# An initial syllable must be in a weak position:
#Cs['initialstrong']=2
#
# The first metrical position will not be evaluated
# for any metrical constraints whatsoever:
# [set to 1 to be true]
#Cs['extrametrical-first-pos']=1
#
# The first two metrical positions will not be evaluated
# for any metrical constraints whatsoever:
# [set to 1 to be true]
#Cs['skip_initial_foot']=1
#
# A word should not be an elision [use to discourage elisions]:
Cs['word-elision']=1
#
# A weak metrical position should not contain any syllables
# that are stressed and heavy: [Meter of Finnish "Kalevala"]
#Cs['kalevala.w=>-p']=1
#
# A strong metrical position should not contain any syllables
# that are stressed and light: [Meter of Finnish "Kalevala"]
#Cs['kalevala.s=>-u']=1
############################################
############################################
# METRICAL PARSING: OPTIONS ABOUT LINES
#
######
# [Line SIZE]
#
# The maximum size of the line to parse:
# [others will be skipped during parsing]
# [PROSODIC can parse lines of up to approximately 20 syllables
# before the number of possibilities become too large,
# slowing the algorithm down to a halt.]
line_maxsylls=20
#
# The minimum size of the line to parse:
# [useful if lines are determined by punctuation,
# because sometimes they can be very very short
# and so pointless for metrical parsing.]
line_minsylls=4
#
#
######
# [Line DIVISIONS]
#
# Here you may decide how texts divide into lines.
# This is significant only because the line,
# with its words and syllables, is the unit passed
# to the metrical parser for parsing.
#
# Linebreaks occur only at actual linebreaks in the
# processed text file (good for metrical poetry):
linebreak='line'
#
# Linebreaks occur only upon encountering any of these
# punctuation marks (good for prose):
#linebreak=',;:.?!()[]{}<>'
#
# Linebreaks occur both at linebreaks in the text,
# *and* at any of these punctuation marks (good for
# prose and free-verse poetry):
#linebreak='line,;:.?!()[]{}<>'
#
#
######
# [MISCELLANEOUS line options]
#
# Headedness [optional]
# If there are multiple parses tied for the lowest score,
# break the tie by preferring lines that begin with this pattern:
line_headedness='ws'
#line_headedness='sw'
#line_headedness='wws'
#line_headedness='ssw'
############################################
############################################
# OPTIONS ABOUT WORDS
#
######
# [Tokenization]
#
# How are lines of text split into words? Define the regular
# expression that is applied to a string of text in order
# to split it into a list of words.
#
# Words are tokenized against [^] white-spaces [\s+] and hyphens [-]
tokenizer='[^\s+-]+'
#
# Words are tokenized against [^] white-spaces [\s+]
#tokenizer='[^\s+]+'
#
######
# [Resolving stress ambiguity]
#
# Some words are multiple stress profiles: ambiguous polysyllabic
# words, and also ambiguous monosyllabic words. Words in the
# "maybestressed.txt" file of a language folder (e.g. dicts/en)
# will be given two stress profiles, one stressed and the other
# unstressed. The CMU also has multiple stress profiles for words.
#
# Allow the metrical parser to parse all stress profiles for all
# words in the line, thus choosing the stress profile for each
# word that best fit the metrical parse:
resolve_optionality=1
#resolve_optionality=0
#
#
######
# [ELISIONS of Syllables: English only]
#
# Some syllables are elided in English verse, e.g.
# e.g. sweet as love, which overflows her bower
# --> with|MU|sic|SWEET|as|LOVE|which|OV|er|FLOWS|her|BOW'R
# or e.g. scattering unbeholden
# --> SCAT|tring|UN|be|HOLD|en
#
# Add pronunciations for words that could have elided syllables:
#add_elided_pronunciations=1
add_elided_pronunciations=0
#
#
######
# [Output formatting]
#
# Here you may change the format under which the syllabified,
# phonetic output will appear. The options are:
# - ipa
# - cmu (the formatting used in the CMU Pronunciation Dictionary)
# - orth (the orthography itself [good for Finnish])
#
# The default phonetic output for all languages:
output='ipa'
#
# The phonetic output for English:
output_en='ipa'
#
# The phonetic output for Finnish:
output_fi='orth' # since finnish pronunciation is essentially identical to its orthography
############################################
|
kaysinds/PoopDog
|
config_backup.py
|
Python
|
gpl-3.0
| 14,244
|
[
"VisIt"
] |
d5a7022d39d67f654318650eff53d8a40b158658c62ab0a9fc5ffda8403f0b66
|
# hg.py - hg backend for convert extension
#
# Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
# Notes for hg->hg conversion:
#
# * Old versions of Mercurial didn't trim the whitespace from the ends
# of commit messages, but new versions do. Changesets created by
# those older versions, then converted, may thus have different
# hashes for changesets that are otherwise identical.
#
# * Using "--config convert.hg.saverev=true" will make the source
# identifier to be stored in the converted revision. This will cause
# the converted revision to have a different identity than the
# source.
import os, time, cStringIO
from mercurial.i18n import _
from mercurial.node import bin, hex, nullid
from mercurial import hg, util, context, bookmarks, error, scmutil, exchange
from mercurial import phases
from mercurial import lock as lockmod
from mercurial import merge as mergemod
from common import NoRepo, commit, converter_source, converter_sink, mapfile
import re
sha1re = re.compile(r'\b[0-9a-f]{12,40}\b')
class mercurial_sink(converter_sink):
def __init__(self, ui, path):
converter_sink.__init__(self, ui, path)
self.branchnames = ui.configbool('convert', 'hg.usebranchnames', True)
self.clonebranches = ui.configbool('convert', 'hg.clonebranches', False)
self.tagsbranch = ui.config('convert', 'hg.tagsbranch', 'default')
self.lastbranch = None
if os.path.isdir(path) and len(os.listdir(path)) > 0:
try:
self.repo = hg.repository(self.ui, path)
if not self.repo.local():
raise NoRepo(_('%s is not a local Mercurial repository')
% path)
except error.RepoError as err:
ui.traceback()
raise NoRepo(err.args[0])
else:
try:
ui.status(_('initializing destination %s repository\n') % path)
self.repo = hg.repository(self.ui, path, create=True)
if not self.repo.local():
raise NoRepo(_('%s is not a local Mercurial repository')
% path)
self.created.append(path)
except error.RepoError:
ui.traceback()
raise NoRepo(_("could not create hg repository %s as sink")
% path)
self.lock = None
self.wlock = None
self.filemapmode = False
self.subrevmaps = {}
def before(self):
self.ui.debug('run hg sink pre-conversion action\n')
self.wlock = self.repo.wlock()
self.lock = self.repo.lock()
def after(self):
self.ui.debug('run hg sink post-conversion action\n')
if self.lock:
self.lock.release()
if self.wlock:
self.wlock.release()
def revmapfile(self):
return self.repo.join("shamap")
def authorfile(self):
return self.repo.join("authormap")
def setbranch(self, branch, pbranches):
if not self.clonebranches:
return
setbranch = (branch != self.lastbranch)
self.lastbranch = branch
if not branch:
branch = 'default'
pbranches = [(b[0], b[1] and b[1] or 'default') for b in pbranches]
if pbranches:
pbranch = pbranches[0][1]
else:
pbranch = 'default'
branchpath = os.path.join(self.path, branch)
if setbranch:
self.after()
try:
self.repo = hg.repository(self.ui, branchpath)
except Exception:
self.repo = hg.repository(self.ui, branchpath, create=True)
self.before()
# pbranches may bring revisions from other branches (merge parents)
# Make sure we have them, or pull them.
missings = {}
for b in pbranches:
try:
self.repo.lookup(b[0])
except Exception:
missings.setdefault(b[1], []).append(b[0])
if missings:
self.after()
for pbranch, heads in sorted(missings.iteritems()):
pbranchpath = os.path.join(self.path, pbranch)
prepo = hg.peer(self.ui, {}, pbranchpath)
self.ui.note(_('pulling from %s into %s\n') % (pbranch, branch))
exchange.pull(self.repo, prepo,
[prepo.lookup(h) for h in heads])
self.before()
def _rewritetags(self, source, revmap, data):
fp = cStringIO.StringIO()
for line in data.splitlines():
s = line.split(' ', 1)
if len(s) != 2:
continue
revid = revmap.get(source.lookuprev(s[0]))
if not revid:
if s[0] == hex(nullid):
revid = s[0]
else:
continue
fp.write('%s %s\n' % (revid, s[1]))
return fp.getvalue()
def _rewritesubstate(self, source, data):
fp = cStringIO.StringIO()
for line in data.splitlines():
s = line.split(' ', 1)
if len(s) != 2:
continue
revid = s[0]
subpath = s[1]
if revid != hex(nullid):
revmap = self.subrevmaps.get(subpath)
if revmap is None:
revmap = mapfile(self.ui,
self.repo.wjoin(subpath, '.hg/shamap'))
self.subrevmaps[subpath] = revmap
# It is reasonable that one or more of the subrepos don't
# need to be converted, in which case they can be cloned
# into place instead of converted. Therefore, only warn
# once.
msg = _('no ".hgsubstate" updates will be made for "%s"\n')
if len(revmap) == 0:
sub = self.repo.wvfs.reljoin(subpath, '.hg')
if self.repo.wvfs.exists(sub):
self.ui.warn(msg % subpath)
newid = revmap.get(revid)
if not newid:
if len(revmap) > 0:
self.ui.warn(_("%s is missing from %s/.hg/shamap\n") %
(revid, subpath))
else:
revid = newid
fp.write('%s %s\n' % (revid, subpath))
return fp.getvalue()
def _calculatemergedfiles(self, source, p1ctx, p2ctx):
"""Calculates the files from p2 that we need to pull in when merging p1
and p2, given that the merge is coming from the given source.
This prevents us from losing files that only exist in the target p2 and
that don't come from the source repo (like if you're merging multiple
repositories together).
"""
anc = [p1ctx.ancestor(p2ctx)]
# Calculate what files are coming from p2
actions, diverge, rename = mergemod.calculateupdates(
self.repo, p1ctx, p2ctx, anc,
True, # branchmerge
True, # force
False, # acceptremote
False, # followcopies
)
for file, (action, info, msg) in actions.iteritems():
if source.targetfilebelongstosource(file):
# If the file belongs to the source repo, ignore the p2
# since it will be covered by the existing fileset.
continue
# If the file requires actual merging, abort. We don't have enough
# context to resolve merges correctly.
if action in ['m', 'dm', 'cd', 'dc']:
raise error.Abort(_("unable to convert merge commit "
"since target parents do not merge cleanly (file "
"%s, parents %s and %s)") % (file, p1ctx,
p2ctx))
elif action == 'k':
# 'keep' means nothing changed from p1
continue
else:
# Any other change means we want to take the p2 version
yield file
def putcommit(self, files, copies, parents, commit, source, revmap, full,
cleanp2):
files = dict(files)
def getfilectx(repo, memctx, f):
if p2ctx and f in p2files and f not in copies:
self.ui.debug('reusing %s from p2\n' % f)
try:
return p2ctx[f]
except error.ManifestLookupError:
# If the file doesn't exist in p2, then we're syncing a
# delete, so just return None.
return None
try:
v = files[f]
except KeyError:
return None
data, mode = source.getfile(f, v)
if data is None:
return None
if f == '.hgtags':
data = self._rewritetags(source, revmap, data)
if f == '.hgsubstate':
data = self._rewritesubstate(source, data)
return context.memfilectx(self.repo, f, data, 'l' in mode,
'x' in mode, copies.get(f))
pl = []
for p in parents:
if p not in pl:
pl.append(p)
parents = pl
nparents = len(parents)
if self.filemapmode and nparents == 1:
m1node = self.repo.changelog.read(bin(parents[0]))[0]
parent = parents[0]
if len(parents) < 2:
parents.append(nullid)
if len(parents) < 2:
parents.append(nullid)
p2 = parents.pop(0)
text = commit.desc
sha1s = re.findall(sha1re, text)
for sha1 in sha1s:
oldrev = source.lookuprev(sha1)
newrev = revmap.get(oldrev)
if newrev is not None:
text = text.replace(sha1, newrev[:len(sha1)])
extra = commit.extra.copy()
sourcename = self.repo.ui.config('convert', 'hg.sourcename')
if sourcename:
extra['convert_source'] = sourcename
for label in ('source', 'transplant_source', 'rebase_source',
'intermediate-source'):
node = extra.get(label)
if node is None:
continue
# Only transplant stores its reference in binary
if label == 'transplant_source':
node = hex(node)
newrev = revmap.get(node)
if newrev is not None:
if label == 'transplant_source':
newrev = bin(newrev)
extra[label] = newrev
if self.branchnames and commit.branch:
extra['branch'] = commit.branch
if commit.rev and commit.saverev:
extra['convert_revision'] = commit.rev
while parents:
p1 = p2
p2 = parents.pop(0)
p1ctx = self.repo[p1]
p2ctx = None
if p2 != nullid:
p2ctx = self.repo[p2]
fileset = set(files)
if full:
fileset.update(self.repo[p1])
fileset.update(self.repo[p2])
if p2ctx:
p2files = set(cleanp2)
for file in self._calculatemergedfiles(source, p1ctx, p2ctx):
p2files.add(file)
fileset.add(file)
ctx = context.memctx(self.repo, (p1, p2), text, fileset,
getfilectx, commit.author, commit.date, extra)
# We won't know if the conversion changes the node until after the
# commit, so copy the source's phase for now.
self.repo.ui.setconfig('phases', 'new-commit',
phases.phasenames[commit.phase], 'convert')
with self.repo.transaction("convert") as tr:
node = hex(self.repo.commitctx(ctx))
# If the node value has changed, but the phase is lower than
# draft, set it back to draft since it hasn't been exposed
# anywhere.
if commit.rev != node:
ctx = self.repo[node]
if ctx.phase() < phases.draft:
phases.retractboundary(self.repo, tr, phases.draft,
[ctx.node()])
text = "(octopus merge fixup)\n"
p2 = node
if self.filemapmode and nparents == 1:
man = self.repo.manifest
mnode = self.repo.changelog.read(bin(p2))[0]
closed = 'close' in commit.extra
if not closed and not man.cmp(m1node, man.revision(mnode)):
self.ui.status(_("filtering out empty revision\n"))
self.repo.rollback(force=True)
return parent
return p2
def puttags(self, tags):
try:
parentctx = self.repo[self.tagsbranch]
tagparent = parentctx.node()
except error.RepoError:
parentctx = None
tagparent = nullid
oldlines = set()
for branch, heads in self.repo.branchmap().iteritems():
for h in heads:
if '.hgtags' in self.repo[h]:
oldlines.update(
set(self.repo[h]['.hgtags'].data().splitlines(True)))
oldlines = sorted(list(oldlines))
newlines = sorted([("%s %s\n" % (tags[tag], tag)) for tag in tags])
if newlines == oldlines:
return None, None
# if the old and new tags match, then there is nothing to update
oldtags = set()
newtags = set()
for line in oldlines:
s = line.strip().split(' ', 1)
if len(s) != 2:
continue
oldtags.add(s[1])
for line in newlines:
s = line.strip().split(' ', 1)
if len(s) != 2:
continue
if s[1] not in oldtags:
newtags.add(s[1].strip())
if not newtags:
return None, None
data = "".join(newlines)
def getfilectx(repo, memctx, f):
return context.memfilectx(repo, f, data, False, False, None)
self.ui.status(_("updating tags\n"))
date = "%s 0" % int(time.mktime(time.gmtime()))
extra = {'branch': self.tagsbranch}
ctx = context.memctx(self.repo, (tagparent, None), "update tags",
[".hgtags"], getfilectx, "convert-repo", date,
extra)
node = self.repo.commitctx(ctx)
return hex(node), hex(tagparent)
def setfilemapmode(self, active):
self.filemapmode = active
def putbookmarks(self, updatedbookmark):
if not len(updatedbookmark):
return
wlock = lock = tr = None
try:
wlock = self.repo.wlock()
lock = self.repo.lock()
tr = self.repo.transaction('bookmark')
self.ui.status(_("updating bookmarks\n"))
destmarks = self.repo._bookmarks
for bookmark in updatedbookmark:
destmarks[bookmark] = bin(updatedbookmark[bookmark])
destmarks.recordchange(tr)
tr.close()
finally:
lockmod.release(lock, wlock, tr)
def hascommitfrommap(self, rev):
# the exact semantics of clonebranches is unclear so we can't say no
return rev in self.repo or self.clonebranches
def hascommitforsplicemap(self, rev):
if rev not in self.repo and self.clonebranches:
raise error.Abort(_('revision %s not found in destination '
'repository (lookups with clonebranches=true '
'are not implemented)') % rev)
return rev in self.repo
class mercurial_source(converter_source):
def __init__(self, ui, path, revs=None):
converter_source.__init__(self, ui, path, revs)
self.ignoreerrors = ui.configbool('convert', 'hg.ignoreerrors', False)
self.ignored = set()
self.saverev = ui.configbool('convert', 'hg.saverev', False)
try:
self.repo = hg.repository(self.ui, path)
# try to provoke an exception if this isn't really a hg
# repo, but some other bogus compatible-looking url
if not self.repo.local():
raise error.RepoError
except error.RepoError:
ui.traceback()
raise NoRepo(_("%s is not a local Mercurial repository") % path)
self.lastrev = None
self.lastctx = None
self._changescache = None, None
self.convertfp = None
# Restrict converted revisions to startrev descendants
startnode = ui.config('convert', 'hg.startrev')
hgrevs = ui.config('convert', 'hg.revs')
if hgrevs is None:
if startnode is not None:
try:
startnode = self.repo.lookup(startnode)
except error.RepoError:
raise error.Abort(_('%s is not a valid start revision')
% startnode)
startrev = self.repo.changelog.rev(startnode)
children = {startnode: 1}
for r in self.repo.changelog.descendants([startrev]):
children[self.repo.changelog.node(r)] = 1
self.keep = children.__contains__
else:
self.keep = util.always
if revs:
self._heads = [self.repo[r].node() for r in revs]
else:
self._heads = self.repo.heads()
else:
if revs or startnode is not None:
raise error.Abort(_('hg.revs cannot be combined with '
'hg.startrev or --rev'))
nodes = set()
parents = set()
for r in scmutil.revrange(self.repo, [hgrevs]):
ctx = self.repo[r]
nodes.add(ctx.node())
parents.update(p.node() for p in ctx.parents())
self.keep = nodes.__contains__
self._heads = nodes - parents
def _changectx(self, rev):
if self.lastrev != rev:
self.lastctx = self.repo[rev]
self.lastrev = rev
return self.lastctx
def _parents(self, ctx):
return [p for p in ctx.parents() if p and self.keep(p.node())]
def getheads(self):
return [hex(h) for h in self._heads if self.keep(h)]
def getfile(self, name, rev):
try:
fctx = self._changectx(rev)[name]
return fctx.data(), fctx.flags()
except error.LookupError:
return None, None
def _changedfiles(self, ctx1, ctx2):
ma, r = [], []
maappend = ma.append
rappend = r.append
d = ctx1.manifest().diff(ctx2.manifest())
for f, ((node1, flag1), (node2, flag2)) in d.iteritems():
if node2 is None:
rappend(f)
else:
maappend(f)
return ma, r
def getchanges(self, rev, full):
ctx = self._changectx(rev)
parents = self._parents(ctx)
if full or not parents:
files = copyfiles = ctx.manifest()
if parents:
if self._changescache[0] == rev:
ma, r = self._changescache[1]
else:
ma, r = self._changedfiles(parents[0], ctx)
if not full:
files = ma + r
copyfiles = ma
# _getcopies() is also run for roots and before filtering so missing
# revlogs are detected early
copies = self._getcopies(ctx, parents, copyfiles)
cleanp2 = set()
if len(parents) == 2:
d = parents[1].manifest().diff(ctx.manifest(), clean=True)
for f, value in d.iteritems():
if value is None:
cleanp2.add(f)
changes = [(f, rev) for f in files if f not in self.ignored]
changes.sort()
return changes, copies, cleanp2
def _getcopies(self, ctx, parents, files):
copies = {}
for name in files:
if name in self.ignored:
continue
try:
copysource, _copynode = ctx.filectx(name).renamed()
if copysource in self.ignored:
continue
# Ignore copy sources not in parent revisions
found = False
for p in parents:
if copysource in p:
found = True
break
if not found:
continue
copies[name] = copysource
except TypeError:
pass
except error.LookupError as e:
if not self.ignoreerrors:
raise
self.ignored.add(name)
self.ui.warn(_('ignoring: %s\n') % e)
return copies
def getcommit(self, rev):
ctx = self._changectx(rev)
parents = [p.hex() for p in self._parents(ctx)]
crev = rev
return commit(author=ctx.user(),
date=util.datestr(ctx.date(), '%Y-%m-%d %H:%M:%S %1%2'),
desc=ctx.description(), rev=crev, parents=parents,
branch=ctx.branch(), extra=ctx.extra(),
sortkey=ctx.rev(), saverev=self.saverev,
phase=ctx.phase())
def gettags(self):
# This will get written to .hgtags, filter non global tags out.
tags = [t for t in self.repo.tagslist()
if self.repo.tagtype(t[0]) == 'global']
return dict([(name, hex(node)) for name, node in tags
if self.keep(node)])
def getchangedfiles(self, rev, i):
ctx = self._changectx(rev)
parents = self._parents(ctx)
if not parents and i is None:
i = 0
ma, r = ctx.manifest().keys(), []
else:
i = i or 0
ma, r = self._changedfiles(parents[i], ctx)
ma, r = [[f for f in l if f not in self.ignored] for l in (ma, r)]
if i == 0:
self._changescache = (rev, (ma, r))
return ma + r
def converted(self, rev, destrev):
if self.convertfp is None:
self.convertfp = open(self.repo.join('shamap'), 'a')
self.convertfp.write('%s %s\n' % (destrev, rev))
self.convertfp.flush()
def before(self):
self.ui.debug('run hg source pre-conversion action\n')
def after(self):
self.ui.debug('run hg source post-conversion action\n')
def hasnativeorder(self):
return True
def hasnativeclose(self):
return True
def lookuprev(self, rev):
try:
return hex(self.repo.lookup(rev))
except (error.RepoError, error.LookupError):
return None
def getbookmarks(self):
return bookmarks.listbookmarks(self.repo)
def checkrevformat(self, revstr, mapname='splicemap'):
""" Mercurial, revision string is a 40 byte hex """
self.checkhexformat(revstr, mapname)
|
seewindcn/tortoisehg
|
src/hgext/convert/hg.py
|
Python
|
gpl-2.0
| 23,506
|
[
"Octopus"
] |
07d48ab69061b16f92bfb18dad123e60959c59ff3d80b84c0607151bfdc0a64d
|
# coding: utf-8
# Copyright (c) Henniggroup.
# Distributed under the terms of the MIT License.
from __future__ import division, print_function, unicode_literals, \
absolute_import
""" use the interface module to create bare slab """
from pymatgen.core.structure import Structure
from pymatgen.io.vasp.inputs import Poscar
from mpinterfaces.interface import Interface
if __name__ == '__main__':
# input structure file: bulk
fin = 'POSCAR.bulk'
# output file name
fout = 'POSCAR'
hkl = [0, 0, 1] # hkl wrt the input structure
min_thick = 15 # angstroms
min_vac = 30 # angstroms
# use ase to create an orthogonal slab
bulk = Structure.from_file(fin)
iface = Interface(bulk, hkl=hkl,
min_thick=min_thick, min_vac=min_vac,
primitive=False, from_ase=True)
iface.create_interface()
iface.sort()
# set selective dynamics flags
# 1 --> T and 0 --> F
sd_flags = [[1, 1, 1] for i in iface.sites]
iface_poscar = Poscar(iface, selective_dynamics=sd_flags)
# write to file
iface_poscar.write_file(fout)
|
henniggroup/MPInterfaces
|
examples/create_slab.py
|
Python
|
mit
| 1,117
|
[
"ASE",
"VASP",
"pymatgen"
] |
a1747a6936cf46651b0d309473a432ccf954636d5f6838ed53b4081e7f6e939a
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyGetorganelle(PythonPackage):
"""Organelle Genome Assembly Toolkit (Chloroplast/Mitocondrial/ITS)"""
homepage = "https://github.com/Kinggerm/GetOrganelle"
url = "https://github.com/Kinggerm/GetOrganelle/archive/refs/tags/1.7.5.0.tar.gz"
maintainers = ['dorton21']
version('1.7.5.0', sha256='c498196737726cb4c0158f23037bf301a069f5028ece729bb4d09c7d915df93d')
depends_on('py-setuptools', type='build')
depends_on('py-numpy@1.16.4:', type=('build', 'run'))
depends_on('py-scipy@1.3.0:', type=('build', 'run'))
depends_on('py-sympy@1.4:', type=('build', 'run'))
depends_on('py-requests', type=('build', 'run'))
depends_on('bowtie2', type='run')
depends_on('spades', type='run')
depends_on('blast-plus', type='run')
# Allow access to relevant runtime scripts
# I.e. get_organelle_config.py, get_organelle_from_reads.py, etc.
def setup_run_environment(self, env):
env.prepend_path('PATH', prefix)
env.prepend_path('PATH', prefix.Utilities)
|
LLNL/spack
|
var/spack/repos/builtin/packages/py-getorganelle/package.py
|
Python
|
lgpl-2.1
| 1,252
|
[
"BLAST"
] |
5205ecc6e948fb45d7dbfafbb9a53a38d8fee4b9b025d93c54cb2a4a4b7584fa
|
# Copyright 2020 Tensorforce Team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from collections import OrderedDict
from tensorforce import TensorforceError
from tensorforce.agents import TensorforceAgent
class DeepQNetwork(TensorforceAgent):
"""
[Deep Q-Network](https://www.nature.com/articles/nature14236) agent (specification key: `dqn`).
Args:
states (specification): States specification
(<span style="color:#C00000"><b>required</b></span>, better implicitly specified via
`environment` argument for `Agent.create(...)`), arbitrarily nested dictionary of state
descriptions (usually taken from `Environment.states()`) with the following attributes:
<ul>
<li><b>type</b> (<i>"bool" | "int" | "float"</i>) – state data type
(<span style="color:#00C000"><b>default</b></span>: "float").</li>
<li><b>shape</b> (<i>int | iter[int]</i>) – state shape
(<span style="color:#C00000"><b>required</b></span>).</li>
<li><b>num_values</b> (<i>int > 0</i>) – number of discrete state values
(<span style="color:#C00000"><b>required</b></span> for type "int").</li>
<li><b>min_value/max_value</b> (<i>float</i>) – minimum/maximum state value
(<span style="color:#00C000"><b>optional</b></span> for type "float").</li>
</ul>
actions (specification): Actions specification
(<span style="color:#C00000"><b>required</b></span>, better implicitly specified via
`environment` argument for `Agent.create(...)`), arbitrarily nested dictionary of
action descriptions (usually taken from `Environment.actions()`) with the following
attributes:
<ul>
<li><b>type</b> (<i>"bool" | "int" | "float"</i>) – action data type
(<span style="color:#C00000"><b>required</b></span>).</li>
<li><b>shape</b> (<i>int > 0 | iter[int > 0]</i>) – action shape
(<span style="color:#00C000"><b>default</b></span>: scalar).</li>
<li><b>num_values</b> (<i>int > 0</i>) – number of discrete action values
(<span style="color:#C00000"><b>required</b></span> for type "int").</li>
<li><b>min_value/max_value</b> (<i>float</i>) – minimum/maximum action value
(<span style="color:#00C000"><b>optional</b></span> for type "float").</li>
</ul>
max_episode_timesteps (int > 0): Upper bound for numer of timesteps per episode
(<span style="color:#00C000"><b>default</b></span>: not given, better implicitly
specified via `environment` argument for `Agent.create(...)`).
memory (int > 0): Replay memory capacity, has to fit at least maximum batch_size + maximum
network/estimator horizon + 1 timesteps
(<span style="color:#C00000"><b>required</b></span>).
batch_size (<a href="../modules/parameters.html">parameter</a>, int > 0): Number of
timesteps per update batch
(<span style="color:#C00000"><b>required</b></span>).
network ("auto" | specification): Policy network configuration, see the
[networks documentation](../modules/networks.html)
(<span style="color:#00C000"><b>default</b></span>: "auto", automatically configured
network).
update_frequency ("never" | <a href="../modules/parameters.html">parameter</a>, int > 0 | 0.0 < float <= 1.0):
Frequency of updates, relative to batch_size if float
(<span style="color:#00C000"><b>default</b></span>: 0.25 * batch_size).
start_updating (<a href="../modules/parameters.html">parameter</a>, int >= batch_size):
Number of timesteps before first update
(<span style="color:#00C000"><b>default</b></span>: none).
learning_rate (<a href="../modules/parameters.html">parameter</a>, float > 0.0): Optimizer
learning rate
(<span style="color:#00C000"><b>default</b></span>: 1e-3).
huber_loss (<a href="../modules/parameters.html">parameter</a>, float > 0.0): Huber loss
threshold
(<span style="color:#00C000"><b>default</b></span>: no huber loss).
horizon (<a href="../modules/parameters.html">parameter</a>, int >= 1): n-step DQN, horizon
of discounted-sum reward estimation before target network estimate
(<span style="color:#00C000"><b>default</b></span>: 1).
discount (<a href="../modules/parameters.html">parameter</a>, 0.0 <= float <= 1.0): Discount
factor for future rewards of discounted-sum reward estimation
(<span style="color:#00C000"><b>default</b></span>: 0.99).
return_processing (specification): Return processing as layer or list of layers, see the
[preprocessing documentation](../modules/preprocessing.html)
(<span style="color:#00C000"><b>default</b></span>: no return processing).
predict_terminal_values (bool): Whether to predict the value of terminal states, usually
not required since max_episode_timesteps terminals are handled separately
(<span style="color:#00C000"><b>default</b></span>: false).
target_update_weight (<a href="../modules/parameters.html">parameter</a>, 0.0 < float <= 1.0):
Target network update weight
(<span style="color:#00C000"><b>default</b></span>: 1.0).
target_sync_frequency (<a href="../modules/parameters.html">parameter</a>, int >= 1):
Interval between target network updates
(<span style="color:#00C000"><b>default</b></span>: every update).
l2_regularization (<a href="../modules/parameters.html">parameter</a>, float >= 0.0):
L2 regularization loss weight
(<span style="color:#00C000"><b>default</b></span>: no L2 regularization).
entropy_regularization (<a href="../modules/parameters.html">parameter</a>, float >= 0.0):
Entropy regularization loss weight, to discourage the policy distribution from being
"too certain"
(<span style="color:#00C000"><b>default</b></span>: no entropy regularization).
state_preprocessing (dict[specification]): State preprocessing as layer or list of layers,
see the [preprocessing documentation](../modules/preprocessing.html),
specified per state-type or -name
(<span style="color:#00C000"><b>default</b></span>: linear normalization of bounded
float states to [-2.0, 2.0]).
reward_preprocessing (specification): Reward preprocessing as layer or list of layers,
see the [preprocessing documentation](../modules/preprocessing.html)
(<span style="color:#00C000"><b>default</b></span>: no reward preprocessing).
exploration (<a href="../modules/parameters.html">parameter</a> | dict[<a href="../modules/parameters.html">parameter</a>], float >= 0.0):
Exploration, defined as the probability for uniformly random output in case of `bool`
and `int` actions, and the standard deviation of Gaussian noise added to every output in
case of `float` actions, specified globally or per action-type or -name
(<span style="color:#00C000"><b>default</b></span>: no exploration).
variable_noise (<a href="../modules/parameters.html">parameter</a>, float >= 0.0):
Add Gaussian noise with given standard deviation to all trainable variables, as
alternative exploration mechanism
(<span style="color:#00C000"><b>default</b></span>: no variable noise).<br/><br/>
>>>: For arguments below, see the [Tensorforce agent documentation](tensorforce.html).
parallel_interactions (int > 0)
config (specification)
saver (path | specification)
summarizer (path | specification)
tracking ("all" | iter[string])
recorder (path | specification)
"""
def __init__(
# Required
self, states, actions, memory, batch_size,
# Environment
max_episode_timesteps=None,
# Network
network='auto',
# Optimization
update_frequency=0.25, start_updating=None, learning_rate=1e-3, huber_loss=None,
# Reward estimation
horizon=1, discount=0.99, return_processing=None, predict_terminal_values=False,
# Target network
target_update_weight=1.0, target_sync_frequency=1,
# Preprocessing
state_preprocessing='linear_normalization', reward_preprocessing=None,
# Exploration
exploration=0.0, variable_noise=0.0,
# Regularization
l2_regularization=0.0, entropy_regularization=0.0,
# Parallel interactions
parallel_interactions=1,
# Config, saver, summarizer, tracking, recorder
config=None, saver=None, summarizer=None, tracking=None, recorder=None,
# Deprecated
**kwargs
):
if 'estimate_terminal' in kwargs:
raise TensorforceError.deprecated(
name='DQN', argument='estimate_terminal', replacement='predict_terminal_values'
)
self.spec = OrderedDict(
agent='dqn',
states=states, actions=actions, memory=memory, batch_size=batch_size,
max_episode_timesteps=max_episode_timesteps,
network=network,
update_frequency=update_frequency, start_updating=start_updating,
learning_rate=learning_rate, huber_loss=huber_loss,
horizon=horizon, discount=discount, return_processing=return_processing,
predict_terminal_values=predict_terminal_values,
target_update_weight=target_update_weight, target_sync_frequency=target_sync_frequency,
state_preprocessing=state_preprocessing, reward_preprocessing=reward_preprocessing,
exploration=exploration, variable_noise=variable_noise,
l2_regularization=l2_regularization, entropy_regularization=entropy_regularization,
parallel_interactions=parallel_interactions,
config=config, saver=saver, summarizer=summarizer, tracking=tracking, recorder=recorder
)
policy = dict(
type='parametrized_value_policy', network=network, state_value_mode='implicit'
)
memory = dict(type='replay', capacity=memory)
update = dict(
unit='timesteps', batch_size=batch_size, frequency=update_frequency,
start=start_updating
)
optimizer = dict(type='adam', learning_rate=learning_rate)
objective = dict(type='action_value', huber_loss=huber_loss)
reward_estimation = dict(
horizon=horizon, discount=discount, predict_horizon_values='late',
estimate_advantage=False, predict_action_values=False,
return_processing=return_processing, predict_terminal_values=predict_terminal_values
)
baseline = policy
baseline_optimizer = dict(
type='synchronization', update_weight=target_update_weight,
sync_frequency=target_sync_frequency
)
baseline_objective = None
super().__init__(
# Agent
states=states, actions=actions, max_episode_timesteps=max_episode_timesteps,
parallel_interactions=parallel_interactions, config=config, recorder=recorder,
# TensorforceModel
policy=policy, memory=memory, update=update, optimizer=optimizer, objective=objective,
reward_estimation=reward_estimation,
baseline=baseline, baseline_optimizer=baseline_optimizer,
baseline_objective=baseline_objective,
l2_regularization=l2_regularization, entropy_regularization=entropy_regularization,
state_preprocessing=state_preprocessing, reward_preprocessing=reward_preprocessing,
exploration=exploration, variable_noise=variable_noise,
saver=saver, summarizer=summarizer, tracking=tracking, **kwargs
)
|
reinforceio/tensorforce
|
tensorforce/agents/dqn.py
|
Python
|
apache-2.0
| 12,802
|
[
"Gaussian"
] |
727194e8c9db9f7aa7b9f2d4797d2579e4b292f6be29f69c040247d71498c89e
|
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# A script to test the mask filter.
# replaces a circle with a color
# Image pipeline
reader = vtk.vtkPNMReader()
reader.ReleaseDataFlagOff()
reader.SetFileName("" + str(VTK_DATA_ROOT) + "/Data/earth.ppm")
reader.Update()
sphere = vtk.vtkImageEllipsoidSource()
sphere.SetWholeExtent(0,511,0,255,0,0)
sphere.SetCenter(128,128,0)
sphere.SetRadius(80,80,1)
sphere.Update()
mask = vtk.vtkImageMask()
mask.SetImageInputData(reader.GetOutput())
mask.SetMaskInputData(sphere.GetOutput())
mask.SetMaskedOutputValue(100,128,200)
mask.NotMaskOn()
mask.ReleaseDataFlagOff()
mask.Update()
sphere2 = vtk.vtkImageEllipsoidSource()
sphere2.SetWholeExtent(0,511,0,255,0,0)
sphere2.SetCenter(328,128,0)
sphere2.SetRadius(80,50,1)
sphere2.Update()
# Test the wrapping of the output masked value
mask2 = vtk.vtkImageMask()
mask2.SetImageInputData(mask.GetOutput())
mask2.SetMaskInputData(sphere2.GetOutput())
mask2.SetMaskedOutputValue(100)
mask2.NotMaskOn()
mask2.ReleaseDataFlagOff()
mask2.Update()
sphere3 = vtk.vtkImageEllipsoidSource()
sphere3.SetWholeExtent(0,511,0,255,0,0)
sphere3.SetCenter(228,155,0)
sphere3.SetRadius(80,80,1)
sphere3.Update()
# Test the wrapping of the output masked value
mask3 = vtk.vtkImageMask()
mask3.SetImageInputData(mask2.GetOutput())
mask3.SetMaskInputData(sphere3.GetOutput())
mask3.SetMaskedOutputValue(255)
mask3.NotMaskOn()
mask3.SetMaskAlpha(0.5)
mask3.ReleaseDataFlagOff()
viewer = vtk.vtkImageViewer()
viewer.SetInputConnection(mask3.GetOutputPort())
viewer.SetColorWindow(255)
viewer.SetColorLevel(128)
#viewer DebugOn
viewer.Render()
# --- end of script --
|
HopeFOAM/HopeFOAM
|
ThirdParty-0.1/ParaView-5.0.1/VTK/Imaging/Core/Testing/Python/TestMask2.py
|
Python
|
gpl-3.0
| 1,722
|
[
"VTK"
] |
e75a04c15ea00c59834e139f53fe51a3a196d376c14454eb865aedcf5c78d7ca
|
# Copyright (c) 2001-2014, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
import calendar
from collections import deque
from datetime import datetime
from google.protobuf.descriptor import FieldDescriptor
from google.protobuf.internal.containers import RepeatedScalarFieldContainer
from jormungandr import i_manager
import logging
import pytz
from jormungandr.exceptions import RegionNotFound
from navitiacommon import response_pb2
def str_to_time_stamp(str):
"""
convert a string to a posix timestamp
the string must be in the YYYYMMDDTHHMMSS format
like 20170534T124500
"""
date = datetime.strptime(str, "%Y%m%dT%H%M%S")
return date_to_timestamp(date)
def date_to_timestamp(date):
"""
convert a datatime objet to a posix timestamp (number of seconds from 1070/1/1)
"""
return int(calendar.timegm(date.utctimetuple()))
class ResourceUtc:
def __init__(self):
self._tz = None
def tz(self):
if not self._tz:
instance = i_manager.instances.get(self.region, None)
if not instance:
raise RegionNotFound(self.region)
tz_name = instance.timezone # TODO store directly the tz?
if not tz_name:
logging.Logger(__name__).warn("unknown timezone for region {}"
.format(self.region))
return None
self._tz = (pytz.timezone(tz_name),)
return self._tz[0]
def convert_to_utc(self, original_datetime):
"""
convert the original_datetime in the args to UTC
for that we need to 'guess' the timezone wanted by the user
For the moment We only use the default instance timezone.
It won't obviously work for multi timezone instances, we'll have to do
something smarter.
We'll have to consider either the departure or the arrival of the journey
(depending on the `clockwise` param)
and fetch the tz of this point.
we'll have to store the tz for stop area and the coord for admin, poi, ...
"""
if self.tz() is None:
return original_datetime
utctime = self.tz().normalize(self.tz().localize(original_datetime)).astimezone(pytz.utc)
return utctime
def format(self, value):
dt = datetime.utcfromtimestamp(value)
if self.tz() is not None:
dt = pytz.utc.localize(dt)
dt = dt.astimezone(self.tz())
return dt.strftime("%Y%m%dT%H%M%S")
return None # for the moment I prefer not to display anything instead of something wrong
def walk_dict(tree, visitor):
"""
depth first search on a dict.
call the visit(elem) method on the visitor for each node
if the visitor returns True, stop the search
>>> bob = {'tutu': 1,
... 'tata': [1, 2],
... 'toto': {'bob':12, 'bobette': 13, 'nested_bob': {'bob': 3}},
... 'tete': ('tuple1', ['ltuple1', 'ltuple2']),
... 'titi': [{'a':1}, {'b':1}]}
>>> def my_visitor(name, val):
... print "{}={}".format(name, val)
>>> walk_dict(bob, my_visitor)
titi={'b': 1}
b=1
titi={'a': 1}
a=1
tete=ltuple2
tete=ltuple1
tete=tuple1
tutu=1
toto={'bobette': 13, 'bob': 12, 'nested_bob': {'bob': 3}}
nested_bob={'bob': 3}
bob=3
bob=12
bobette=13
tata=2
tata=1
>>> def my_stoper_visitor(name, val):
... print "{}={}".format(name, val)
... if name == 'tete':
... return True
>>> walk_dict(bob, my_stoper_visitor)
titi={'b': 1}
b=1
titi={'a': 1}
a=1
tete=ltuple2
"""
queue = deque()
def add_elt(name, elt, first=False):
if isinstance(elt, (list, tuple)):
for val in elt:
queue.append((name, val))
elif hasattr(elt, 'iteritems'):
for k, v in elt.iteritems():
queue.append((k, v))
elif first: # for the first elt, we add it even if it is no collection
queue.append((name, elt))
add_elt("main", tree, first=True)
while queue:
elem = queue.pop()
#we don't want to visit the list, we'll visit each node separately
if not isinstance(elem[1], (list, tuple)):
if visitor(elem[0], elem[1]) is True:
#we stop the search if the visitor returns True
break
#for list and tuple, the name is the parent's name
add_elt(elem[0], elem[1])
def walk_protobuf(pb_object, visitor):
"""
Walk on a protobuf and call the visitor for each nodes
>>> journeys = response_pb2.Response()
>>> journey_standard = journeys.journeys.add()
>>> journey_standard.type = "none"
>>> journey_standard.duration = 1
>>> journey_standard.nb_transfers = 2
>>> s = journey_standard.sections.add()
>>> s.duration = 3
>>> s = journey_standard.sections.add()
>>> s.duration = 4
>>> journey_rapid = journeys.journeys.add()
>>> journey_rapid.duration = 5
>>> journey_rapid.nb_transfers = 6
>>> s = journey_rapid.sections.add()
>>> s.duration = 7
>>>
>>> from collections import defaultdict
>>> types_counter = defaultdict(int)
>>> def visitor(name, val):
... types_counter[type(val)] +=1
>>>
>>> walk_protobuf(journeys, visitor)
>>> types_counter[response_pb2.Response]
1
>>> types_counter[response_pb2.Journey]
2
>>> types_counter[response_pb2.Section]
3
>>> types_counter[int] # and 7 int in all
7
"""
queue = deque()
def add_elt(name, elt):
try:
fields = elt.ListFields()
except AttributeError:
return
for field, value in fields:
if field.label == FieldDescriptor.LABEL_REPEATED:
for v in value:
queue.append((field.name, v))
else:
queue.append((field.name, value))
# add_elt("main", pb_object)
queue.append(('main', pb_object))
while queue:
elem = queue.pop()
visitor(elem[0], elem[1])
add_elt(elem[0], elem[1])
|
prhod/navitia
|
source/jormungandr/jormungandr/utils.py
|
Python
|
agpl-3.0
| 7,289
|
[
"VisIt"
] |
55910cc438e4d04f87519ce10af5dc92466dedb1978f4d0d9d6b2e7e05645ecb
|
from dateutil.relativedelta import relativedelta
from edc_visit_schedule import VisitSchedule, Schedule, Visit
from edc_visit_schedule import FormsCollection, Crf, Requisition
from edc_visit_schedule.visit.requisition import Panel as BasePanel
app_label = 'edc_metadata'
class Panel(BasePanel):
def __init__(self, name):
super().__init__(
requisition_model='edc_metadata.subjectrequisition',
name=name)
crfs0 = FormsCollection(
Crf(show_order=1, model=f'{app_label}.crfone', required=True),
Crf(show_order=2, model=f'{app_label}.crftwo', required=True),
Crf(show_order=3, model=f'{app_label}.crfthree', required=True),
Crf(show_order=4, model=f'{app_label}.crffour', required=True),
Crf(show_order=5, model=f'{app_label}.crffive', required=True),
)
crfs1 = FormsCollection(
Crf(show_order=1, model=f'{app_label}.crffour', required=True),
Crf(show_order=2, model=f'{app_label}.crffive', required=True),
Crf(show_order=3, model=f'{app_label}.crfsix', required=True),
)
crfs2 = FormsCollection(
Crf(show_order=1, model=f'{app_label}.crfseven', required=True),
)
crfs_unscheduled = FormsCollection(
Crf(show_order=1, model=f'{app_label}.crftwo', required=True),
Crf(show_order=2, model=f'{app_label}.crfthree', required=True),
Crf(show_order=3, model=f'{app_label}.crffive', required=True),
)
requisitions = FormsCollection(
Requisition(
show_order=10,
panel=Panel('one'), required=True, additional=False),
Requisition(
show_order=20,
panel=Panel('two'), required=True, additional=False),
Requisition(
show_order=30,
panel=Panel('three'), required=True, additional=False),
Requisition(
show_order=40,
panel=Panel('four'), required=True, additional=False),
Requisition(
show_order=50,
panel=Panel('five'), required=True, additional=False),
Requisition(
show_order=60,
panel=Panel('six'), required=True, additional=False),
)
requisitions3000 = FormsCollection(
Requisition(
show_order=10,
panel=Panel('seven'), required=True, additional=False),
)
requisitions_unscheduled = FormsCollection(
Requisition(
show_order=10,
panel=Panel('one'), required=True, additional=False),
Requisition(
show_order=20,
panel=Panel('three'), required=True, additional=False),
Requisition(
show_order=30,
panel=Panel('five'), required=True, additional=False))
visit0 = Visit(
code='1000',
title='Day 1',
timepoint=0,
rbase=relativedelta(days=0),
rlower=relativedelta(days=0),
rupper=relativedelta(days=6),
requisitions=requisitions,
crfs=crfs0,
crfs_unscheduled=crfs_unscheduled,
requisitions_unscheduled=requisitions_unscheduled,
allow_unscheduled=True,
facility_name='5-day-clinic')
visit1 = Visit(
code='2000',
title='Day 2',
timepoint=1,
rbase=relativedelta(days=1),
rlower=relativedelta(days=0),
rupper=relativedelta(days=6),
requisitions=requisitions,
crfs=crfs1,
facility_name='5-day-clinic')
visit2 = Visit(
code='3000',
title='Day 3',
timepoint=2,
rbase=relativedelta(days=2),
rlower=relativedelta(days=0),
rupper=relativedelta(days=6),
requisitions=requisitions3000,
crfs=crfs2,
facility_name='5-day-clinic')
schedule = Schedule(
name='schedule',
onschedule_model='edc_metadata.onschedule',
offschedule_model='edc_metadata.offschedule',
consent_model='edc_metadata.subjectconsent',
appointment_model='edc_appointment.appointment')
schedule.add_visit(visit0)
schedule.add_visit(visit1)
schedule.add_visit(visit2)
visit_schedule = VisitSchedule(
name='visit_schedule',
offstudy_model='edc_metadata.subjectoffstudy',
death_report_model='edc_metadata.deathreport')
visit_schedule.add_schedule(schedule)
|
botswana-harvard/edc-entry
|
edc_metadata/tests/visit_schedule.py
|
Python
|
gpl-2.0
| 3,940
|
[
"VisIt"
] |
bbe45af470759f64aebfd3290656a8261ff1990791377f233bb19ba590879c3a
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This package implements modules for input and output to and from VASP. It
imports the key classes form both vasp_input and vasp_output to allow most
classes to be simply called as pymatgen.io.vasp.Incar for example, to retain
backwards compatibility.
"""
from .inputs import Incar, Poscar, Potcar, Kpoints, PotcarSingle, VaspInput
from .outputs import Vasprun, BSVasprun, Outcar, VolumetricData, Locpot, Chgcar, Elfcar, Procar, Oszicar, Xdatcar, \
Dynmat, Wavecar, Wavederf, Waveder
|
gVallverdu/pymatgen
|
pymatgen/io/vasp/__init__.py
|
Python
|
mit
| 603
|
[
"VASP",
"pymatgen"
] |
7f518c4d013f83921e016148bbf04b5d9d62e5da1493a749f010065d51199653
|
"""
The :mod:`sklearn.covariance` module includes methods and algorithms to
robustly estimate the covariance of features given a set of points. The
precision matrix defined as the inverse of the covariance is also estimated.
Covariance estimation is closely related to the theory of Gaussian Graphical
Models.
"""
from ._empirical_covariance import (empirical_covariance,
EmpiricalCovariance,
log_likelihood)
from ._shrunk_covariance import (shrunk_covariance, ShrunkCovariance,
ledoit_wolf, ledoit_wolf_shrinkage,
LedoitWolf, oas, OAS)
from ._robust_covariance import fast_mcd, MinCovDet
from ._graph_lasso import graphical_lasso, GraphicalLasso, GraphicalLassoCV
from ._elliptic_envelope import EllipticEnvelope
__all__ = ['EllipticEnvelope',
'EmpiricalCovariance',
'GraphicalLasso',
'GraphicalLassoCV',
'LedoitWolf',
'MinCovDet',
'OAS',
'ShrunkCovariance',
'empirical_covariance',
'fast_mcd',
'graphical_lasso',
'ledoit_wolf',
'ledoit_wolf_shrinkage',
'log_likelihood',
'oas',
'shrunk_covariance']
|
glemaitre/scikit-learn
|
sklearn/covariance/__init__.py
|
Python
|
bsd-3-clause
| 1,309
|
[
"Gaussian"
] |
d1e62db96312b9537006402cdbb574012e9fb4c40f33315b33de6f7ea292ebc1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: InfoGAN-mnist.py
# Author: Yuxin Wu
import cv2
import numpy as np
import tensorflow as tf
import os
import argparse
from tensorpack import *
from tensorpack.utils import viz
from tensorpack.tfutils.scope_utils import auto_reuse_variable_scope, under_name_scope
from tensorpack.tfutils import optimizer, summary, gradproc
from tensorpack.dataflow import dataset
from GAN import GANTrainer, GANModelDesc
"""
To train:
./InfoGAN-mnist.py
To visualize:
./InfoGAN-mnist.py --sample --load path/to/model
A pretrained model is at http://models.tensorpack.com/GAN/
"""
BATCH = 128
# latent space is cat(10) x uni(2) x noise(NOISE_DIM)
NUM_CLASS = 10
NUM_UNIFORM = 2
DIST_PARAM_DIM = NUM_CLASS + NUM_UNIFORM
NOISE_DIM = 62
# prior: the assumption how the latent factors are presented in the dataset
DIST_PRIOR_PARAM = [1.] * NUM_CLASS + [0.] * NUM_UNIFORM
def shapeless_placeholder(x, axis, name):
"""
Make the static shape of a tensor less specific.
If you want to feed to a tensor, the shape of the feed value must match
the tensor's static shape. This function creates a placeholder which
defaults to x if not fed, but has a less specific static shape than x.
See also `tensorflow#5680 <https://github.com/tensorflow/tensorflow/issues/5680>`_.
Args:
x: a tensor
axis(int or list of ints): these axes of ``x.get_shape()`` will become
None in the output.
name(str): name of the output tensor
Returns:
a tensor equal to x, but shape information is partially cleared.
"""
shp = x.get_shape().as_list()
if not isinstance(axis, list):
axis = [axis]
for a in axis:
if shp[a] is None:
raise ValueError("Axis {} of shape {} is already unknown!".format(a, shp))
shp[a] = None
x = tf.placeholder_with_default(x, shape=shp, name=name)
return x
def get_distributions(vec_cat, vec_uniform):
cat = tf.distributions.Categorical(logits=vec_cat, validate_args=True, name='cat')
uni = tf.distributions.Normal(vec_uniform, scale=1., validate_args=True, allow_nan_stats=False, name='uni_a')
return cat, uni
def entropy_from_samples(samples, vec):
"""
Estimate H(x|s) ~= -E_{x \sim P(x|s)}[\log Q(x|s)], where x are samples, and Q is parameterized by vec.
"""
samples_cat = tf.argmax(samples[:, :NUM_CLASS], axis=1, output_type=tf.int32)
samples_uniform = samples[:, NUM_CLASS:]
cat, uniform = get_distributions(vec[:, :NUM_CLASS], vec[:, NUM_CLASS:])
def neg_logprob(dist, sample, name):
nll = -dist.log_prob(sample)
# average over batch
return tf.reduce_sum(tf.reduce_mean(nll, axis=0), name=name)
entropies = [neg_logprob(cat, samples_cat, 'nll_cat'),
neg_logprob(uniform, samples_uniform, 'nll_uniform')]
return entropies
@under_name_scope()
def sample_prior(batch_size):
cat, _ = get_distributions(DIST_PRIOR_PARAM[:NUM_CLASS], DIST_PRIOR_PARAM[NUM_CLASS:])
sample_cat = tf.one_hot(cat.sample(batch_size), NUM_CLASS)
"""
OpenAI official code actually models the "uniform" latent code as
a Gaussian distribution, but obtain the samples from a uniform distribution.
"""
sample_uni = tf.random_uniform([batch_size, NUM_UNIFORM], -1, 1)
samples = tf.concat([sample_cat, sample_uni], axis=1)
return samples
class Model(GANModelDesc):
def inputs(self):
return [tf.placeholder(tf.float32, (None, 28, 28), 'input')]
def generator(self, z):
l = FullyConnected('fc0', z, 1024, activation=BNReLU)
l = FullyConnected('fc1', l, 128 * 7 * 7, activation=BNReLU)
l = tf.reshape(l, [-1, 7, 7, 128])
l = Conv2DTranspose('deconv1', l, 64, 4, 2, activation=BNReLU)
l = Conv2DTranspose('deconv2', l, 1, 4, 2, activation=tf.identity)
l = tf.sigmoid(l, name='gen')
return l
@auto_reuse_variable_scope
def discriminator(self, imgs):
with argscope(Conv2D, kernel_size=4, strides=2):
l = (LinearWrap(imgs)
.Conv2D('conv0', 64)
.tf.nn.leaky_relu()
.Conv2D('conv1', 128)
.BatchNorm('bn1')
.tf.nn.leaky_relu()
.FullyConnected('fc1', 1024)
.BatchNorm('bn2')
.tf.nn.leaky_relu()())
logits = FullyConnected('fct', l, 1)
encoder = (LinearWrap(l)
.FullyConnected('fce1', 128)
.BatchNorm('bne')
.tf.nn.leaky_relu()
.FullyConnected('fce-out', DIST_PARAM_DIM)())
return logits, encoder
def build_graph(self, real_sample):
real_sample = tf.expand_dims(real_sample, -1)
# sample the latent code:
zc = shapeless_placeholder(sample_prior(BATCH), 0, name='z_code')
z_noise = shapeless_placeholder(
tf.random_uniform([BATCH, NOISE_DIM], -1, 1), 0, name='z_noise')
z = tf.concat([zc, z_noise], 1, name='z')
with argscope([Conv2D, Conv2DTranspose, FullyConnected],
kernel_initializer=tf.truncated_normal_initializer(stddev=0.02)):
with tf.variable_scope('gen'):
fake_sample = self.generator(z)
fake_sample_viz = tf.cast((fake_sample) * 255.0, tf.uint8, name='viz')
tf.summary.image('gen', fake_sample_viz, max_outputs=30)
# may need to investigate how bn stats should be updated across two discrim
with tf.variable_scope('discrim'):
real_pred, _ = self.discriminator(real_sample)
fake_pred, dist_param = self.discriminator(fake_sample)
"""
Mutual information between x (i.e. zc in this case) and some
information s (the generated samples in this case):
I(x;s) = H(x) - H(x|s)
= H(x) + E[\log P(x|s)]
The distribution from which zc is sampled, in this case, is set to a fixed prior already.
So the first term is a constant.
For the second term, we can maximize its variational lower bound:
E_{x \sim P(x|s)}[\log Q(x|s)]
where Q(x|s) is a proposal distribution to approximate P(x|s).
Here, Q(x|s) is assumed to be a distribution which shares the form
of P, and whose parameters are predicted by the discriminator network.
"""
with tf.name_scope("mutual_information"):
with tf.name_scope('prior_entropy'):
cat, uni = get_distributions(DIST_PRIOR_PARAM[:NUM_CLASS], DIST_PRIOR_PARAM[NUM_CLASS:])
ents = [cat.entropy(name='cat_entropy'), tf.reduce_sum(uni.entropy(), name='uni_entropy')]
entropy = tf.add_n(ents, name='total_entropy')
# Note that the entropy of prior is a constant. The paper mentioned it but didn't use it.
with tf.name_scope('conditional_entropy'):
cond_ents = entropy_from_samples(zc, dist_param)
cond_entropy = tf.add_n(cond_ents, name="total_entropy")
MI = tf.subtract(entropy, cond_entropy, name='mutual_information')
summary.add_moving_summary(entropy, cond_entropy, MI, *cond_ents)
# default GAN objective
self.build_losses(real_pred, fake_pred)
# subtract mutual information for latent factors (we want to maximize them)
self.g_loss = tf.subtract(self.g_loss, MI, name='total_g_loss')
self.d_loss = tf.subtract(self.d_loss, MI, name='total_d_loss')
summary.add_moving_summary(self.g_loss, self.d_loss)
# distinguish between variables of generator and discriminator updates
self.collect_variables()
def optimizer(self):
lr = tf.get_variable('learning_rate', initializer=2e-4, dtype=tf.float32, trainable=False)
opt = tf.train.AdamOptimizer(lr, beta1=0.5, epsilon=1e-6)
# generator learns 5 times faster
return optimizer.apply_grad_processors(
opt, [gradproc.ScaleGradient(('gen/.*', 5))])
def get_data():
ds = ConcatData([dataset.Mnist('train'), dataset.Mnist('test')])
ds = BatchData(ds, BATCH)
ds = MapData(ds, lambda dp: [dp[0]]) # only use the image
return ds
def sample(model_path):
pred = OfflinePredictor(PredictConfig(
session_init=get_model_loader(model_path),
model=Model(),
input_names=['z_code', 'z_noise'],
output_names=['gen/viz']))
# sample all one-hot encodings (10 times)
z_cat = np.tile(np.eye(10), [10, 1])
# sample continuos variables from -2 to +2 as mentioned in the paper
z_uni = np.linspace(-2.0, 2.0, num=100)
z_uni = z_uni[:, None]
IMG_SIZE = 400
while True:
# only categorical turned on
z_noise = np.random.uniform(-1, 1, (100, NOISE_DIM))
zc = np.concatenate((z_cat, z_uni * 0, z_uni * 0), axis=1)
o = pred(zc, z_noise)[0]
viz1 = viz.stack_patches(o, nr_row=10, nr_col=10)
viz1 = cv2.resize(viz1, (IMG_SIZE, IMG_SIZE))
# show effect of first continous variable with fixed noise
zc = np.concatenate((z_cat, z_uni, z_uni * 0), axis=1)
o = pred(zc, z_noise * 0)[0]
viz2 = viz.stack_patches(o, nr_row=10, nr_col=10)
viz2 = cv2.resize(viz2, (IMG_SIZE, IMG_SIZE))
# show effect of second continous variable with fixed noise
zc = np.concatenate((z_cat, z_uni * 0, z_uni), axis=1)
o = pred(zc, z_noise * 0)[0]
viz3 = viz.stack_patches(o, nr_row=10, nr_col=10)
viz3 = cv2.resize(viz3, (IMG_SIZE, IMG_SIZE))
canvas = viz.stack_patches(
[viz1, viz2, viz3],
nr_row=1, nr_col=3, border=5, bgcolor=(255, 0, 0))
viz.interactive_imshow(canvas)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')
parser.add_argument('--load', help='load model')
parser.add_argument('--sample', action='store_true', help='visualize the space of the 10 latent codes')
args = parser.parse_args()
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
if args.sample:
BATCH = 100
sample(args.load)
else:
logger.auto_set_dir()
GANTrainer(QueueInput(get_data()),
Model()).train_with_defaults(
callbacks=[ModelSaver(keep_checkpoint_every_n_hours=0.1)],
steps_per_epoch=500,
max_epoch=100,
session_init=SaverRestore(args.load) if args.load else None
)
|
eyaler/tensorpack
|
examples/GAN/InfoGAN-mnist.py
|
Python
|
apache-2.0
| 10,728
|
[
"Gaussian"
] |
31fc32352df5ab4bdab17cd48597aa78733e605fdabd3347254a99aaf77585e5
|
import sys
from PyFastBDT import FastBDT
from PyFastBDT import utility
import numpy as np
import numpy
import numpy.linalg
import sklearn.metrics
import matplotlib.pyplot as plt
import matplotlib as mpl
def calculate_cdf_and_pdf(X):
"""
Calculates cdf and pdf of given sample and adds under/overflow bins
@param X 1-d numpy.array
"""
pdf, bins = numpy.histogram(X, bins=100, density=True)
cdf = numpy.cumsum(pdf * (bins - numpy.roll(bins, 1))[1:])
return numpy.hstack([0.0, cdf, 1.0]), numpy.hstack([0.0, pdf, 0.0]), bins
class Prior(object):
def __init__(self, signal, bckgrd):
self.signal_cdf, self.signal_pdf, self.signal_bins = calculate_cdf_and_pdf(signal)
self.bckgrd_cdf, self.bckgrd_pdf, self.bckgrd_bins = calculate_cdf_and_pdf(bckgrd)
# Avoid numerical instabilities
self.bckgrd_pdf[0] = self.bckgrd_pdf[-1] = 1
self.signal_yield = len(signal)
self.bckgrd_yield = len(bckgrd)
def get_signal_pdf(self, X):
return self.signal_pdf[numpy.digitize(X, bins=self.signal_bins)]
def get_bckgrd_pdf(self, X):
return self.bckgrd_pdf[numpy.digitize(X, bins=self.bckgrd_bins)]
def get_signal_cdf(self, X):
return self.signal_cdf[numpy.digitize(X, bins=self.signal_bins)]
def get_bckgrd_cdf(self, X):
return self.bckgrd_cdf[numpy.digitize(X, bins=self.bckgrd_bins)]
def get_prior(self, X):
return self.get_signal_pdf(X) / (self.get_signal_pdf(X) + self.get_bckgrd_pdf(X))
def combine_probabilities(p1, p2):
return p1*p2 / (p1*p2 + (1-p1)*(1-p2))
def evaluation(label, X_test, y_test, p, p_prior):
print(label, utility.auc_roc(p, y_test), utility.flatness(p, X_test[:, 0], y_test, classes=[0]), utility.flatness(p, X_test[:, 0], y_test, classes=[1]))
print(label, sklearn.metrics.roc_auc_score(y_test, p))
print(label + " with prior", sklearn.metrics.roc_auc_score(y_test, combine_probabilities(p, p_prior)))
plt.scatter(X_test[y_test == 1, 0], p[y_test == 1], c='r', label=label + " (Signal)", alpha=0.2)
plt.scatter(X_test[y_test == 0, 0], p[y_test == 0], c='b', label=label + " (Background)", alpha=0.2)
plt.xlabel("Feature")
plt.ylabel("Probability")
plt.show()
if __name__ == '__main__':
# Create some Monte Carlo data using a multidimensional gaussian distribution
# The 0th row of the coveriance matrix describes the correlation to the target variable
mean = [0.5, 0.4, 0.4]
cov = [[1.0, 0.6, 0.6],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0]]
mean2 = [-0.5, -0.4, -0.4]
cov2 = [[1.0, 0.6, 0.6],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0]]
for i in range(len(mean)):
for j in range(i+1, len(mean)):
cov[j][i] = cov[i][j]
cov2[j][i] = cov2[i][j]
N_train, N_test = 100000, 2000
data = np.random.multivariate_normal(mean2, cov2, (N_train + N_test)//2)
data2 = np.random.multivariate_normal(mean, cov, (N_train + N_test)//2)
X_train, y_train = np.r_[data[:N_train//2], data2[:N_train//2]], np.r_[np.ones(N_train//2) == 1, np.ones(N_train//2) == 0]
X_test, y_test = np.r_[data[N_train//2:], data2[N_train//2:]], np.r_[np.ones(N_test//2) == 1, np.ones(N_test//2) == 0]
# First variable is the variable we want to have independent of our network output
prior = Prior(X_train[y_train == 1, 0], X_train[y_train == 0, 0])
p_prior = prior.get_prior(X_test[:, 0])
evaluation("Prior", X_test, y_test, p_prior, p_prior)
evaluation("Random", X_test, y_test, np.random.uniform(size=N_test), p_prior)
for i in [0.0, 1.0, 2.0, 5.0, 10.0, 20.0, 50.0, 100.0, 200.0, 500.0, 1000.0]:
p = FastBDT.Classifier(flatnessLoss=i, numberOfFlatnessFeatures=1).fit(X=np.c_[X_train[:, 1:], X_train[:, 0]], y=y_train).predict(X_test[:, 1:])
print("Flatness", i)
evaluation("UBoost", X_test, y_test, p, p_prior)
p = FastBDT.Classifier().fit(X=X_train, y=y_train).predict(X_test)
evaluation("Full", X_test, y_test, p, p_prior)
p = FastBDT.Classifier().fit(X=X_train[:, 1:], y=y_train).predict(X_test[:, 1:])
evaluation("Restricted", X_test, y_test, p, p_prior)
|
thomaskeck/FastBDT
|
examples/ugboost.py
|
Python
|
gpl-3.0
| 4,224
|
[
"Gaussian"
] |
3137b1219cea5086d41c71d9e876702b7fb266c08f90c5f6755d62ec7aeb16dc
|
import pandas as pd
import time
import numpy as np
import json
import simplejson
import urllib
import config
import ast
import bs4
import geocoder
from helpers import *
from math import cos, radians
import us_state_abbrevation as abb
from bs4 import BeautifulSoup as BS
from pymongo import MongoClient
import re
client = MongoClient()
db = client.zoeshrm
db.TripAdvisor
## import progressbar
abb2state_dict = abb.abb2state
with open('api_key_list.config') as key_file:
api_key_list = json.load(key_file)
api_key_list["goecoder_api_key_list"]
def state_park_web(db_html):
poi_detail_state_park_df=pd.DataFrame(columns=['index','name','street_address','city', 'county','state_abb','state','postal_code','country','address','coord_lat','coord_long','num_reviews','review_score','ranking','tag','raw_visit_length','fee','description','url',"geo_content", "adjusted_visit_length", "area"])
error_message_df = pd.DataFrame(columns=['index','name','url','state_abb_error', 'state_error','address_error','geo_error','review_error','score_error','ranking_error','tag_error','county_error'])
search_visit_length = re.compile('Recommended length of visit:')
search_fee = re.compile('Fee:')
cnt = 0
name_lst = []
full_address_lst = []
api_i = 0
for page in db_html[len(poi_detail_state_park_df):]:
s = BS(page['html'], "html.parser")
#index
#name
input_list, error_message = [],[]
state_abb_error, state_error, address_error, geo_error, review_error, score_error, ranking_error, tag_error, county_error = 0,0,0,0,0,0,0,0,0
latitude, longitude, geo_content = None, None, None
# print name
url = page['url']
name = s.find('h1', attrs = {'class':'heading_name'}).text.strip()
#street_address
street_address = s.find('span', attrs = {'class':'street-address'}).text.strip()
#city
city = s.find('span', attrs = {'property':'addressLocality'}).text.strip()
#state
state_abb = s.find('span', attrs = {'property':'addressRegion'}).text.strip()
if state_abb:
try:
# state = state_abb_dict.keys()[state_abb_dict.values().index(state_abb)]
state = abb2state_dict[state_abb]
except:
state_abb_error = 1
state = state_abb
else:
state_error =1
state_abb = None
state = None
#postal_code
postal_code = s.find('span', attrs = {'property':'postalCode'}).text.strip()
#country
if s.find('span', attrs = {'property':'addressCountry'}).get('content'):
country = s.find('span',{'property':'addressCountry'}).get('content')
elif s.find('span',{'property':'addressCountry'}).get('content') == None:
country = s.find('span',{'property':'addressCountry'}).text.strip()
else:
country = 'United States'
#address
if state:
full_address = street_address+', '+city+', '+state+', '+postal_code[:5]+', '+country
else:
address_error =1
full_address = street_address+', '+city+', '+postal_code[:5]+', '+country
# if (name in name_lst) and (full_address in full_address_lst):
# continue
# else:
# name_lst.append(name)
# full_address_lst.append(full_address)
#geo
try:
# latitude, longitude, geo_content = find_latlng(full_address, name, api_key)
result_longlat = find_latlng(full_address, name, api_i)
while result_longlat == False:
api_i+=1
result_longlat = find_latlng(full_address, name, api_i)
except:
geo_error =1
latitude, longitude, county, geo_content = None, None, None, None
[latitude, longitude, county ,bbox, geo_content] = result_longlat
# county
if county == None:
try:
county = find_county(state, city)
except:
county_error =1
#num_reviews
try:
num_reviews = s.find('div', attrs = {'class': 'rs rating'}).find('a').get('content')
if not num_reviews:
num_reviews = 0
except:
try:
num_reviews = s.find('a', {'property': "reviewCount"}).get('content')
if not num_reviews:
num_reviews = 0
except:
num_reviews = 0
review_error=1
#review_score
try:
review_score = s.find('div', attrs = {'class': 'heading_rating separator'}).find('img').get('content')
if not review_score:
review_score =0
except:
try:
review_score = s.find('span', {'property': "ratingValue"}).get('content')
if not review_score:
review_score =0
except:
review_score = 0
score_error =1
#ranking
try:
ranking = s.find('b', attrs = {'class':'rank_text wrap'}).text.strip().replace('#',"")
except:
ranking = 999
ranking_error=1
#tag
try:
tags = ", ".join(label.text.strip() for label in s.select('div.detail > a') + s.select('span.collapse.hidden > a'))
except:
tags = None
tag_error =1
#visit_length
if s.find('b', text =search_visit_length):
raw_visit_length = s.find('b', text =search_visit_length).next_sibling.strip()
adjusted_time = raw_to_adjust_time(raw_visit_length)
else:
raw_visit_length = None
adjusted_time = None
#area to determine the time spent on poi
if bbox !=None:
area = find_area(bbox)
if adjusted_time ==None:
adjusted_time == area_to_adjust_time(area)
#fee
if s.find('b', text= search_fee):
fee = s.find('b',text= search_fee).next_sibling.strip()
else:
fee = 'Unknown'
#description
if s.find('div', attrs = {'class': "listing_details"}):
description = s.find('div', attrs = {'class': "listing_details"}).text.strip()
else:
description = None
adjusted_time = raw_to_adjust_time(raw_visit_length)
error_message = [len(poi_detail_state_park_df), name, url,state_abb_error, state_error, address_error, geo_error, review_error, score_error, ranking_error, tag_error, county_error]
error_message_df.loc[len(poi_detail_state_park_df)] =error_message
input_list = [len(poi_detail_state_park_df), name, street_address, city, county, state_abb, state, postal_code, country, full_address, latitude, longitude, num_reviews, review_score, ranking, tags, raw_visit_length, fee, description, url, geo_content, adjusted_time, area]
poi_detail_state_park_df.loc[len(poi_detail_state_park_df)] = input_list
# print cnt, name
# if cnt % 1000 == 0:
# poi_detail_state_park_df.to_csv('poi_detail_no_coords_%s.csv' %(cnt), index_col = None, encoding=('utf-8'))
# error_message_df.to_csv('poi_error_message_no_coords_%s.csv' %(cnt), index_col = None, encoding=('utf-8'))
# # time.sleep(1)
# cnt+=1
# poi_detail_state_park_df.to_csv('poi_detail_state_park_df3.csv',encoding=('utf-8'))
# error_message_df.to_csv('poi_error_message__state_park_df3.csv',encoding=('utf-8'))
return poi_detail_state_park_df, error_message_df
def find_latlng(full_address, name, i):
# api_key1 = 'AIzaSyCrgwS_L75NfO9qzIKG8L0ox7zGw81BpRU' #geocoder.google API only
# api_key2 = 'AIzaSyBwh4WqOIVJGJuKkmzpQxlkjahgx6qzimk'
# api_key3 = 'AIzaSyA25LW2CRcD9mSmiAWBYSPOSoiKP_m2plQ'
# api_key4 = ''
g_address = geocoder.google(full_address, key = api_key[i])
if g_address.content['status'] == 'OVER_QUERY_LIMIT':
return False
if g_address.ok:
county= g_address.county.replace("County","").upper().encode('utf-8').strip()
return [g_address.lat, g_address.lng, county ,g_address.bbox ,g_address.content]
g_name = geocoder.google(name, key = api_key[i])
if g_name.content['status'] == 'OVER_QUERY_LIMIT':
return False
if g_name.ok:
county= g_name.county.replace("County","").upper().encode('utf-8').strip()
return [g_name.lat, g_name.lng, county, g_name.bbox, g_name.content]
else:
return [None, None, None, None]
def find_area(box):
# to make thing simple, we use 111.111
# we assume the distance:
# Latitude: 1 deg = 110.574 km
# Longitude: 1 deg = 111.320*cos(latitude) km
# if we need more accuracy number, we need to use different approach.
# ex. using Shapely to calculate polygon/ WGS84 formula
lat = (box["southwest"][0]-box["northeast"][0])*110.574
lng = 111.320*cos(radians(box["southwest"][1]-box["northeast"][1]))
return abs(lat*lng)
def request_s(url):
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}
r=requests.get(url,headers=headers)
s = BS(r.text, 'html.parser')
return s
def thing_to_do_in_national_park(s):
thing_to_do = pd.DataFrame(columns=["national_park_name","activate_name","url","num_reviews","score","ranking","tags"])
national_park_name = s.find('h1', {"id": "HEADING"}).text.strip('\n').replace("Things to Do in ","")
print "park name: ",national_park_name
for activate in s.findAll('div', {"class":"listing_title"}):
activate_name = activate.text.strip()
url ="https://www.tripadvisor.com"+ activate.find('a').get("href")
if activate.find_next('div', {"class":"rs rating"}) ==None:
score, num_reviews = 0, 0
else:
score = activate.find_next('div', {"class":"rs rating"}).find('span').get('alt').replace(" of 5 bubbles","")
num_reviews = activate.find_next('div', {"class":"rs rating"}).find('span', {'class': "more"}).text.strip().replace("reviews","")
ranking = activate.find_next('div', {'class':"popRanking wrap"}).text.strip().replace("#","")[0]
if activate.find_next('div',{'class':"tag_line"}).find('span') == None:
tags = None
else:
tags = activate.find_next('div',{'class':"tag_line"}).find('span').text
list_thing = [national_park_name, activate_name, url, num_reviews, score, ranking, tags]
thing_to_do.loc[len(thing_to_do)] = list_thing
return thing_to_do
def wiki_table(s):
#s is webpage in html
name, state =None, None
table = s.find('table', {"class" : "wikitable"})
# col_name = [x.text for x in table.findAll("th",{"scope":"col"})]
# num_col = len(col_name)
# wiki_table= pd.DataFrame(columns=col_name)
df = pd.DataFrame(columns = ["name","state","description"])
for row in table.findAll("tr")[1:]:
if row.find('th', {'scope':"row"}) != None:
name = row.find('th', {'scope':"row"}).next_element.get('title')
cells = row.findAll("td")
#For each "tr", assign each "td" to a variable.
#have 6 columns, we only need 1,5 (state, description)
if len(cells) == 6:
state = cells[1].find(text=True)
des = str("".join(cells[5].findAll(text=True)).encode('utf8'))
description = re.sub(r"\[\d+\]","",des)
df.loc[len(df)] = [name, state, description]
return df
def raw_to_adjust_time(raw):
adjusted_time =0
if raw == "1-2 hours":
adjusted_time = 120
if raw == "2-3 hours":
adjusted_time = 180
if raw == "More than 3 hours":
adjusted_time = 360
if raw == "<1 hour":
adjusted_time = 60
return adjusted_time
def area_to_adjust_time(area):
if area<34:
adjusted_time = 60
elif area<500:
adjusted_time = 120
elif area<2000:
adjusted_time = 180
else:
adjusted_time = 240
return adjusted_time
#error_message
#state_abb_error : state_abb can not change to state name/ either state_abb is already state name or state_abb is not in United States
#state_error : no state can be found
#address_error : address that do not have state(not USA) or address is not complete
#geo_error : geocoder api error, mainly due to over limit
#review_error and score_error ranking_error : can not get review/ score. either no one review or new style of website.
#tag_error : can not get tags. maybe different style of website or no tags on that location. (plz report error)
# save into cvs
# error_message_df.to_csv('error_message.csv', encoding=('utf-8'))
# poi_detail_state_park.to_csv("poi_detail_state_park.csv", encoding=('utf-8'))
#
# remove geo_content before input to sql, normal sql cannot handle geo_content (we take out and put into mongodb)
#
# poi_additional_detail = poi_detail_state_park[['index','name','url','address','geo_content']]
# geo_content_detail=poi_detail_state_park.pop('geo_content')
#
# save into mongodb and sql
#
# db.geo_content.insert_many(poi_additional_detail.to_dict('records'))
# poi_detail_state_park.to_sql('poi_detail_state_park_table',engine, if_exists = "replace")
if __name__ == '__main__':
poi_pages = db.TripAdvisor.find()
state_park_web(poi_pages)
|
kennethcc2005/travel_with_friends
|
web_scraping_tripadvisor.py
|
Python
|
mit
| 13,608
|
[
"VisIt"
] |
45c92a96f500b6d8524ac4175d9cd66928cf8618d568a7d7cb72319e92a8ec2c
|
#
# Quru Image Server
#
# Document: tests.py
# Date started: 05 Apr 2011
# By: Matt Fozard
# Purpose: Contains the image server development test suite
# Requires:
# Copyright: Quru Ltd (www.quru.com)
# Licence:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/
#
# Last Changed: $Date$ $Rev$ by $Author$
#
# Notable modifications:
# Date By Details
# ========= ==== ============================================================
# 18Aug2011 Matt Require test settings file, reset databases before running tests
# 05Oct2012 Matt Added API tests class
# 16Nov2012 Matt Added system permission tests
# 19Feb2013 Matt Added folder permission tests
# 13Aug2013 Matt Replaced file size checks with image comparison checks
# 28Sep2015 Matt Moved API tests into test_api.py
# 26Aug2016 Matt Moved web page tests into test_web_pages.py
# 07Jun2018 Matt Moved imaging tests into test_imaging.py, added Pillow tests
#
import json
import os
import shutil
import subprocess
import time
import timeit
import unittest
from unittest import mock
import flask
from werkzeug.http import http_date
# Do not use the base or any other current environment settings,
# as we'll be clearing down the database
TESTING_SETTINGS = 'unit_tests'
os.environ['QIS_SETTINGS'] = TESTING_SETTINGS
print("Importing imageserver libraries")
# Assign global managers, same as the main app uses
from imageserver.flask_app import app as flask_app
from imageserver.flask_app import logger as lm
from imageserver.flask_app import cache_engine as cm
from imageserver.flask_app import data_engine as dm
from imageserver.flask_app import image_engine as im
from imageserver.flask_app import stats_engine as sm
from imageserver.flask_app import task_engine as tm
from imageserver.flask_app import permissions_engine as pm
from imageserver.flask_app import launch_aux_processes, _stop_aux_processes
from imageserver.api_util import API_CODES
from imageserver.errors import AlreadyExistsError
from imageserver.filesystem_manager import (
get_abs_path, copy_file, delete_dir, delete_file
)
from imageserver.filesystem_manager import get_abs_path, path_exists, make_dirs
from imageserver.filesystem_sync import (
auto_sync_existing_file, auto_sync_folder
)
from imageserver.flask_util import internal_url_for
from imageserver.image_attrs import ImageAttrs
from imageserver.models import (
Folder, Group, User, Image, ImageHistory, ImageTemplate,
FolderPermission, SystemPermissions
)
from imageserver.permissions_manager import _trace_to_str
from imageserver.session_manager import get_session_user
from imageserver.scripts.cache_util import delete_image_ids
from imageserver.template_attrs import TemplateAttrs
from imageserver.util import secure_filename
from imageserver import imaging
# Module level setUp and tearDown
def setUpModule():
init_tests()
def tearDownModule():
cleanup_tests()
# Utility - resets the database and cache, and optionally starts the aux processes
def init_tests(launch_aux=True):
cm.clear()
reset_databases()
if launch_aux:
launch_aux_processes()
time.sleep(1)
# Utility - cleans up after tests have finished
def cleanup_tests():
_stop_aux_processes()
# The aux processes have a 1 second shutdown response
time.sleep(1.5)
# In case there are other test modules following on,
# tell the test app that its aux server connections are now invalid
lm._client_close()
sm._client_close()
# Utility - delete and re-create the internal databases
def reset_databases():
assert flask_app.config['TESTING'], \
'Testing settings have not been applied, not clearing the database!'
cm._drop_db()
dm._drop_db()
cm._init_db()
dm._init_db()
# Set the admin password to something known so we can log in
admin_user = dm.get_user(username='admin')
admin_user.set_password('admin')
dm.save_object(admin_user)
# Default the public and internal root folder permissions to allow View + Download
set_default_public_permission(FolderPermission.ACCESS_DOWNLOAD, False)
set_default_internal_permission(FolderPermission.ACCESS_DOWNLOAD, False)
# Set some standard image generation settings
reset_default_image_template(False)
# As we have wiped the data, invalidate the internal caches' data versions
pm._foliop_data_version = 0
pm._fp_data_version = 0
pm.reset()
im._templates._data_version = 0
im.reset_templates()
def set_default_public_permission(access, clear_cache=True):
default_fp = dm.get_object(FolderPermission, 1)
assert default_fp.group_id == Group.ID_PUBLIC
default_fp.access = access
dm.save_object(default_fp)
if clear_cache:
pm.reset_folder_permissions()
def set_default_internal_permission(access, clear_cache=True):
default_fp = dm.get_object(FolderPermission, 2)
assert default_fp.group_id == Group.ID_EVERYONE
default_fp.access = access
dm.save_object(default_fp)
if clear_cache:
pm.reset_folder_permissions()
def reset_default_image_template(clear_cache=True):
default_it = dm.get_image_template(tempname='Default')
# Use reasonably standard image defaults
image_defaults = [
('format', 'jpg'), ('quality', 75), ('colorspace', None),
('dpi_x', None), ('dpi_y', None), ('strip', False),
('record_stats', True),
('expiry_secs', 60 * 60 * 24 * 7)
]
# Clear default template then apply our defaults
default_it.template = {}
for (key, value) in image_defaults:
default_it.template[key] = {'value': value}
dm.save_object(default_it)
if clear_cache:
im.reset_templates()
# Utility - selects or switches the Pillow / ImageMagick imaging back end
def select_backend(back_end):
imaging.init(
back_end,
flask_app.config['GHOSTSCRIPT_PATH'],
flask_app.config['TEMP_DIR'],
flask_app.config['PDF_BURST_DPI']
)
# VERY IMPORTANT! Clear any images cached from the previous back end
cm.clear()
# The image manager does not expect the back end to change,
# we need to clear its internal caches too
im._memo_image_formats_all = None
im._memo_image_formats_supported = None
im._memo_supported_ops = None
# Utility - create/reset and return a test user having a certain system permission.
# the returned user is a member of the standard "everyone" group and also
# a separate group that is used for setting the custom system permission.
def setup_user_account(login_name, user_type='none', allow_api=False):
db_session = dm.db_get_session()
try:
# Get or create user
testuser = dm.get_user(
username=login_name,
load_groups=True,
_db_session=db_session
)
if not testuser:
testuser = User(
'Kryten', 'Testing Droid', 'kryten@reddwarf.galaxy',
login_name, login_name,
User.AUTH_TYPE_PASSWORD,
allow_api,
User.STATUS_ACTIVE
)
dm.create_user(testuser, _db_session=db_session)
else:
testuser.allow_api = allow_api
testuser.status = User.STATUS_ACTIVE
# Wipe system permissions
test_group = dm.get_group(groupname=login_name+'-group', _db_session=db_session)
if not test_group:
test_group = Group(
login_name + '-group',
'Test group',
Group.GROUP_TYPE_LOCAL
)
dm.create_group(test_group, _db_session=db_session)
test_group.permissions = SystemPermissions(
test_group, False, False, False, False, False, False, False
)
# Wipe folder and folio permissions
del test_group.folder_permissions[:]
del test_group.folio_permissions[:]
# Apply permissions for requested test type
if user_type == 'none':
pass
elif user_type == 'folios':
test_group.permissions.folios = True
elif user_type == 'admin_users':
test_group.permissions.admin_users = True
elif user_type == 'admin_files':
test_group.permissions.admin_files = True
elif user_type == 'admin_folios':
test_group.permissions.admin_folios = True
elif user_type == 'admin_permissions':
test_group.permissions.admin_users = True
test_group.permissions.admin_permissions = True
elif user_type == 'admin_all':
test_group.permissions.admin_all = True
else:
raise ValueError('Unimplemented test user type ' + user_type)
dm.save_object(test_group, _db_session=db_session)
everyone_group = dm.get_group(Group.ID_EVERYONE, _db_session=db_session)
testuser.groups = [everyone_group, test_group]
dm.save_object(testuser, _db_session=db_session)
db_session.commit()
return testuser
finally:
db_session.close()
# Clear old cached user permissions
pm.reset()
class FlaskTestCase(unittest.TestCase):
def setUp(self):
# Reset the app configuration before each test and create a Flask test client
self.reset_settings()
self.app = flask_app.test_client()
def reset_settings(self):
flask_app.config.from_object(flask.Flask.default_config)
flask_app.config.from_object('imageserver.conf.base_settings')
flask_app.config.from_object('imageserver.conf.' + TESTING_SETTINGS)
class BaseTestCase(FlaskTestCase):
# Utility - perform a log in via the web page
def login(self, usr, pwd):
rv = self.app.post('/login/', data={
'username': usr,
'password': pwd
})
# 302 = success redirect, 200 = login page with error message
self.assertEqual(
rv.status_code, 302,
'Login failed with response: ' + self.get_login_error(rv.data.decode('utf8'))
)
# Utility - gets an API token
def api_login(self, usr, pwd):
rv = self.app.post('/api/token/', data={
'username': usr,
'password': pwd
})
# 200 = success, other = error
self.assertEqual(rv.status_code, 200)
obj = json.loads(rv.data.decode('utf8'))
return obj['data']['token']
# Utility - perform a log out
def logout(self):
rv = self.app.get('/logout/')
# 302 = success redirect
self.assertEqual(rv.status_code, 302)
# Utility - uploads a file (provide the full path) to an image server
# folder via the file upload API. Returns the app.post() return value.
def file_upload(self, app, src_file_path, dest_folder, overwrite='1'):
return self.multi_file_upload(app, [src_file_path], dest_folder, overwrite)
# Utility - uploads multiple files (provide the full paths) to an image server
# folder via the file upload API. Returns the app.post() return value.
def multi_file_upload(self, app, src_file_paths, dest_folder, overwrite='1'):
files = []
try:
files = [open(path, 'rb') for path in src_file_paths]
return app.post('/api/upload/', data={
'files': files,
'path': dest_folder,
'overwrite': overwrite
})
finally:
for fp in files:
fp.close()
# Utility - deletes a file from the image repository, and purges its data record
# from the database. If the path does not exist or there is no data record then
# the function continues silently without raising an error.
def delete_image_and_data(self, rel_path):
delete_file(rel_path)
db_img = dm.get_image(src=rel_path)
if db_img is not None:
dm.delete_image(db_img, True)
# Utility - waits for a file path (relative to IMAGES_BASE_DIR else absolute)
# to exist, or else raises a timeout and fails the test
def wait_for_path_existence(self, fpath, path_relative, timeout_secs):
abs_path = get_abs_path(fpath) if path_relative else fpath
start_time = time.time()
while not os.path.exists(abs_path) and time.time() < (start_time + timeout_secs):
time.sleep(1)
self.assertTrue(
os.path.exists(abs_path),
'Path \'%s\' was not created within %.1f seconds' % (fpath, timeout_secs)
)
# Utility - return the interesting bit of a failed login page
def get_login_error(self, html):
fromidx = html.find("<div class=\"error")
toidx = html.find("</div>", fromidx)
return html[fromidx:toidx + 6]
class ImageServerBackgroundTaskTests(BaseTestCase):
# Test xref parameter
def test_xref_parameter(self):
inner_server = None
try:
# Make sure we have no test image A at width 50
test_img = auto_sync_existing_file('test_images/cathedral.jpg', dm, tm)
test_image_attrs = ImageAttrs(
test_img.src, test_img.id, width=50,
strip=False, dpi=0, iformat='jpg', quality=75,
colorspace='rgb'
)
im.finalise_image_attrs(test_image_attrs)
cache_img = cm.get(test_image_attrs.get_cache_key())
assert cache_img is None, 'Test image was already in cache - cannot run test!'
# Create a subprocess to handle the xref-generated http request
temp_env = os.environ
temp_env['QIS_SETTINGS'] = TESTING_SETTINGS
temp_env['FLASK_ENV'] = 'production'
rs_path = 'src/runserver.py' if os.path.exists('src/runserver.py') else 'runserver.py'
inner_server = subprocess.Popen('python ' + rs_path, cwd='.', shell=True, env=temp_env)
time.sleep(5)
# Set the xref base URL so that we will generate image A if we pass 50 as width
flask_app.config['XREF_TRACKING_URL'] = \
'http://127.0.0.1:5000' + \
'/image?src=test_images/cathedral.jpg&strip=0&dpi=0&format=jpg&quality=75&colorspace=rgb&width='
# Call a different image B passing in the xref of 50
rv = self.app.get('/image?src=test_images/dorset.jpg&xref=50')
assert rv.status_code == 200
# Wait a little for the background xref handling thread to complete
time.sleep(2)
# Now the test image A should have been created
cache_img = cm.get(test_image_attrs.get_cache_key())
assert cache_img is not None, 'Failed to find ' + test_image_attrs.get_cache_key() + '. xref URL did not appear to be invoked.'
finally:
# Kill the temporary server subprocess
if inner_server:
inner_server.terminate()
time.sleep(1)
# Tests that the anonymous stats upload task works
def test_upload_usage_stats(self):
from imageserver.tasks import upload_usage_stats
flask_app.config['USAGE_DATA_URL'] = 'http://dummy.url/'
host_id_1 = ''
with mock.patch('requests.post') as mockpost:
upload_usage_stats()
mockpost.assert_called_once_with(mock.ANY, data=mock.ANY, timeout=mock.ANY)
mock_args = mockpost.call_args
stats = json.loads(mock_args[1]['data'])
self.assertIn('version', stats)
self.assertIn('host_id', stats)
self.assertIn('stats', stats)
self.assertIn('time', stats)
self.assertIn('hash', stats)
host_id_1 = stats['host_id']
# v4.1.3 Host ID should be generated and stay the same
self.assertGreater(len(host_id_1), 0)
with mock.patch('requests.post') as mockpost:
upload_usage_stats()
mockpost.assert_called_once_with(mock.ANY, data=mock.ANY, timeout=mock.ANY)
mock_args = mockpost.call_args
stats = json.loads(mock_args[1]['data'])
host_id_2 = stats['host_id']
self.assertEqual(host_id_1, host_id_2)
# Similar to test_db_auto_population, but with the emphasis
# on auto-detecting external changes to the file system
def test_db_auto_sync(self):
temp_folder = 'test_auto_sync'
temp_file_1 = temp_folder + '/image1.jpg'
temp_file_2 = temp_folder + '/image2.jpg'
try:
# Create a new folder and copy 2 images into it
make_dirs(temp_folder)
copy_file('test_images/cathedral.jpg', temp_file_1)
copy_file('test_images/dorset.jpg', temp_file_2)
# View the images
rv = self.app.get('/image?src=' + temp_file_1)
assert rv.status_code == 200
rv = self.app.get('/image?src=' + temp_file_2)
assert rv.status_code == 200
# We should now have a folder db record, 2x image db records
db_folder = dm.get_folder(folder_path=temp_folder)
db_file_1 = dm.get_image(src=temp_file_1, load_history=True)
db_file_2 = dm.get_image(src=temp_file_2, load_history=True)
assert db_folder and db_file_1 and db_file_2
assert db_folder.status == Folder.STATUS_ACTIVE and \
db_file_1.status == Image.STATUS_ACTIVE and \
db_file_2.status == Image.STATUS_ACTIVE
# Should have image creation history
assert len(db_file_1.history) == 1
assert len(db_file_2.history) == 1
# Save the IDs for checking later
prev_folder_id = db_folder.id
prev_image_id_1 = db_file_1.id
prev_image_id_2 = db_file_2.id
# Delete the folder from disk
delete_dir(temp_folder, recursive=True)
# View 1 image original to trigger a disk read
rv = self.app.get('/original?src=' + temp_file_1)
assert rv.status_code == 404
# This should have triggered a background task to delete all data for temp_folder.
# Wait a short time for the task to complete.
time.sleep(15)
# The db records should all now be present but with status deleted,
# including the folder and other image
db_folder = dm.get_folder(folder_path=temp_folder)
db_file_1 = dm.get_image(src=temp_file_1, load_history=True)
db_file_2 = dm.get_image(src=temp_file_2, load_history=True)
assert db_folder and db_file_1 and db_file_2
assert db_folder.status == Folder.STATUS_DELETED and \
db_file_1.status == Image.STATUS_DELETED and \
db_file_2.status == Image.STATUS_DELETED
# Also we should have image deletion history
assert len(db_file_1.history) == 2
assert len(db_file_2.history) == 2
# Check we get 404 for images (that the cached images are cleared)
rv = self.app.get('/image?src=' + temp_file_1)
assert rv.status_code == 404
rv = self.app.get('/image?src=' + temp_file_2)
assert rv.status_code == 404
# Restore the folder and one image
make_dirs(temp_folder)
copy_file('test_images/cathedral.jpg', temp_file_1)
# View the image (this may actually be a 404 but should detect the disk changes)
self.app.get('/image?src=' + temp_file_1)
# These db records should now be status active (same records with same IDs)
db_folder = dm.get_folder(folder_path=temp_folder)
db_file_1 = dm.get_image(src=temp_file_1, load_history=True)
db_file_2 = dm.get_image(src=temp_file_2, load_history=True)
assert db_folder and db_file_1 and db_file_2
assert db_folder.id == prev_folder_id and \
db_file_1.id == prev_image_id_1 and \
db_file_2.id == prev_image_id_2
assert db_folder.status == Folder.STATUS_ACTIVE and \
db_file_1.status == Image.STATUS_ACTIVE
# And with image re-creation history
assert len(db_file_1.history) == 3
# But with the unviewed image still deleted at present
assert db_file_2.status == Image.STATUS_DELETED
assert len(db_file_2.history) == 2
finally:
delete_dir(temp_folder, recursive=True)
# Test the auto-pyramid generation, which is really a specialist case of
# test_base_image_detection with the base image generated as a background task
def test_auto_pyramid(self):
image_obj = auto_sync_existing_file('test_images/dorset.jpg', dm, tm)
orig_attrs = ImageAttrs(image_obj.src, image_obj.id)
im.finalise_image_attrs(orig_attrs)
w500_attrs = ImageAttrs(image_obj.src, image_obj.id, width=500)
im.finalise_image_attrs(w500_attrs)
# Clean
cm.clear()
im.reset_image(orig_attrs)
# Get the original
rv = self.app.get('/image?src=test_images/dorset.jpg')
assert rv.status_code == 200
orig_len = len(rv.data)
# Only the original should come back as base for a 500 version
base = im._get_base_image(w500_attrs)
assert base is None or len(base.data()) == orig_len
# Set the minimum auto-pyramid threshold and get a tile of the image
flask_app.config["AUTO_PYRAMID_THRESHOLD"] = 1000000
rv = self.app.get('/image?src=test_images/dorset.jpg&tile=1:4')
assert rv.status_code == 200
# Wait a bit for the pyramiding to finish
time.sleep(15)
# Now check the cache again for a base for the 500 version
base = im._get_base_image(w500_attrs)
assert base is not None, 'Auto-pyramid did not generate a smaller image'
# And it shouldn't be the original image either
assert len(base.data()) < orig_len
assert base.attrs().width() is not None
assert base.attrs().width() < 1600 and base.attrs().width() >= 500
class ImageServerMiscTests(BaseTestCase):
# Test basic caching
def test_basic_caching(self):
test_url = '/image?src=test_images/cathedral.jpg&width=413&format=png'
rv = self.app.get(test_url)
self.assertEqual(rv.status_code, 200)
self.assertEqual(rv.headers['X-From-Cache'], 'False')
t1_usec = int(rv.headers['X-Time-Taken'])
def get_from_cache():
self.app.get(test_url)
t2 = timeit.Timer(get_from_cache).timeit(1)
self.assertLess(t2, 0.050, 'Cached png took more than 50ms to return')
rv = self.app.get(test_url)
self.assertEqual(rv.status_code, 200)
self.assertEqual(rv.headers['X-From-Cache'], 'True')
t3_usec = int(rv.headers['X-Time-Taken'])
# Cached internal time should be quicker than generated internal time
self.assertLess(t3_usec, t1_usec)
# Internal time taken (cached) should be lte total request time (cached)
self.assertLessEqual(t3_usec / 1000000.0, t2)
# Test cache parameter
# v1.34 now only supported when logged in
def test_cache_param(self):
test_url = '/image?src=test_images/cathedral.jpg&width=123'
test_img = auto_sync_existing_file('test_images/cathedral.jpg', dm, tm)
test_attrs = ImageAttrs(test_img.src, test_img.id, width=123)
im.finalise_image_attrs(test_attrs)
# v1.34 when not logged in this param should be ignored
cm.clear()
rv = self.app.get(test_url + '&cache=0')
self.assertEqual(rv.status_code, 200)
cached_image = cm.get(test_attrs.get_cache_key())
self.assertIsNotNone(cached_image)
# When logged in the param should be respected
cm.clear()
setup_user_account('kryten', 'none')
self.login('kryten', 'kryten')
rv = self.app.get(test_url + '&cache=0')
self.assertEqual(rv.status_code, 200)
self.assertEqual(rv.headers['X-From-Cache'], 'False')
# Should not yet be in cache
cached_image = cm.get(test_attrs.get_cache_key())
self.assertIsNone(cached_image, 'Image was cached when cache=0 was specified')
# Request with cache=1 (default)
rv = self.app.get(test_url)
self.assertEqual(rv.status_code, 200)
# Should be in cache now
cached_image = cm.get(test_attrs.get_cache_key())
self.assertIsNotNone(cached_image)
# Test re-cache parameter
# v1.34 re-cache is only enabled with BENCHMARKING=True
def test_recache_param(self):
# So in v1.34 the param should be ignored by default
url = '/image?src=test_images/dorset.jpg&width=50'
rv = self.app.get(url)
self.assertEqual(rv.status_code, 200)
rv = self.app.get(url + '&recache=1')
self.assertEqual(rv.status_code, 200)
self.assertEqual(rv.headers['X-From-Cache'], 'True') # not re-cached
# Now enable recache and get on with the test
flask_app.config['BENCHMARKING'] = True
# Create a new test image to use
src_file = get_abs_path('test_images/cathedral.jpg')
dst_file = get_abs_path('test_images/test_image.jpg')
shutil.copy(src_file, dst_file)
i = None
try:
# Get an image
url = '/image?src=test_images/test_image.jpg&width=500'
rv = self.app.get(url)
self.assertEqual(rv.status_code, 200)
self.assertEqual(rv.headers['X-From-Cache'], 'False')
# Get it again
rv = self.app.get(url)
self.assertEqual(rv.status_code, 200)
self.assertEqual(rv.headers['X-From-Cache'], 'True')
# Get it again with re-cache
rv = self.app.get(url + '&recache=1')
self.assertEqual(rv.status_code, 200)
self.assertEqual(rv.headers['X-From-Cache'], 'False')
# Get it again
rv = self.app.get(url)
self.assertEqual(rv.status_code, 200)
self.assertEqual(rv.headers['X-From-Cache'], 'True')
# Delete the file
os.remove(dst_file)
# Get it again (should actually work, returned from cache)
rv = self.app.get(url)
self.assertEqual(rv.status_code, 200)
# Get it again with re-cache (delete should now be detected)
rv = self.app.get(url + '&recache=1')
self.assertEqual(rv.status_code, 404)
# Get it again (check the cache was cleared)
rv = self.app.get(url)
self.assertEqual(rv.status_code, 404)
# Check that the database knows it's deleted
i = dm.get_image(src='test_images/test_image.jpg', load_history=True)
self.assertIsNotNone(i)
self.assertEqual(i.status, Image.STATUS_DELETED)
self.assertGreater(len(i.history), 0)
self.assertEqual(i.history[-1].action, ImageHistory.ACTION_DELETED)
finally:
self.delete_image_and_data('test_images/test_image.jpg')
# Test no params
def test_no_src(self):
rv = self.app.get('/image')
assert rv.status_code == 400
rv = self.app.get('/original')
assert rv.status_code == 400
# Test 403 on insecure src param
def test_insecure_src(self):
# Should be permission denied unless running as root
rv = self.app.get('/image?src=~root/../../../etc/passwd')
self.assertEqual(rv.status_code, 403)
rv = self.app.get('/original?src=~root/../../../etc/passwd')
self.assertEqual(rv.status_code, 403)
# Might be readable by anyone
rv = self.app.get('/image?src=../../../../../../../../../etc/passwd')
self.assertEqual(rv.status_code, 403)
rv = self.app.get('/original?src=../../../../../../../../../etc/passwd')
self.assertEqual(rv.status_code, 403)
rv = self.app.get('/image?src=%2Fetc%2Fpasswd')
self.assertEqual(rv.status_code, 404)
rv = self.app.get('/original?src=%2Fetc%2Fpasswd')
self.assertEqual(rv.status_code, 404)
# Also try leading // to see if the code only strips one of them
rv = self.app.get('/image?src=//etc/passwd')
self.assertEqual(rv.status_code, 404)
rv = self.app.get('/original?src=//etc/passwd')
self.assertEqual(rv.status_code, 404)
rv = self.app.get('/image?src=%2F%2Fetc%2Fpasswd')
self.assertEqual(rv.status_code, 404)
rv = self.app.get('/original?src=%2F%2Fetc%2Fpasswd')
self.assertEqual(rv.status_code, 404)
# Test 403 on insecure unicode src param
def test_unicode_insecure_src(self):
# Should be permission denied unless running as root
rv = self.app.get('/image?src=~root/../../../etc/pâßßwd')
self.assertEqual(rv.status_code, 403)
rv = self.app.get('/original?src=~root/../../../etc/pâßßwd')
self.assertEqual(rv.status_code, 403)
# Might be readable by anyone
rv = self.app.get('/image?src=../../../../../../../../../etc/pâßßwd')
self.assertEqual(rv.status_code, 403)
rv = self.app.get('/original?src=../../../../../../../../../etc/pâßßwd')
self.assertEqual(rv.status_code, 403)
# Test 404 on invalid src param
def test_404_src(self):
rv = self.app.get('/image?src=test_images/none_existent.jpg')
self.assertEqual(rv.status_code, 404)
rv = self.app.get('/original?src=test_images/none_existent.jpg')
self.assertEqual(rv.status_code, 404)
# #1864 Ensure unicode garbage URLs return 404 (thanks script kiddies)
def test_unicode_404_src(self):
rv = self.app.get('/image?src=swëdish/dørset.jpg')
self.assertEqual(rv.status_code, 404)
self.assertIn('swëdish/dørset.jpg', rv.data.decode('utf8'))
rv = self.app.get('/original?src=swëdish/dørset.jpg')
self.assertEqual(rv.status_code, 404)
self.assertIn('swëdish/dørset.jpg', rv.data.decode('utf8'))
# #2517 Test that a/b.jpg is /a/b.jpg is /a//b.jpg
# #2517 Test that /a/b/c.jpg is /a//b/c.jpg is /a///b/c.jpg
def test_src_dup_seps(self):
test_cases = [
{
'try_images': [
'test_images/cathedral.jpg',
'/test_images/cathedral.jpg',
'/test_images//cathedral.jpg'
],
'try_folders': [
'test_images',
'/test_images',
'test_images//'
]
},
{
'try_images': [
'test_images/subfolder/cathedral.jpg',
'/test_images//subfolder/cathedral.jpg',
'/test_images///subfolder/cathedral.jpg'
],
'try_folders': [
'test_images/subfolder',
'test_images//subfolder',
'test_images///subfolder'
]
}
]
# Create test_images/subfolder/cathedral.jpg
make_dirs('test_images/subfolder')
copy_file('test_images/cathedral.jpg', 'test_images/subfolder/cathedral.jpg')
# Run tests
try:
for test_case in test_cases:
image_ids = []
folder_ids = []
for image_src in test_case['try_images']:
rv = self.app.get('/image?src=' + image_src)
self.assertEqual(rv.status_code, 200)
rv = self.app.get('/api/v1/details/?src=' + image_src)
self.assertEqual(rv.status_code, 200)
obj = json.loads(rv.data.decode('utf8'))
image_ids.append(obj['data']['id'])
for folder_path in test_case['try_folders']:
db_folder = dm.get_folder(folder_path=folder_path)
self.assertIsNotNone(db_folder)
folder_ids.append(db_folder.id)
# Image IDs should all be the same
self.assertEqual(len(image_ids), 3)
self.assertEqual(image_ids[0], image_ids[1])
self.assertEqual(image_ids[1], image_ids[2])
# Folder IDs should all be the same
self.assertEqual(len(folder_ids), 3)
self.assertEqual(folder_ids[0], folder_ids[1])
self.assertEqual(folder_ids[1], folder_ids[2])
finally:
delete_dir('test_images/subfolder', recursive=True)
# Test buffer overflow protection on string params
def test_overflow_params(self):
buf = 'a' * 1025
rv = self.app.get('/image?src=' + buf)
self.assertEqual(rv.status_code, 400)
self.assertIn('out of range', rv.data.decode('utf8'))
rv = self.app.get('/original?src=' + buf)
self.assertEqual(rv.status_code, 400)
self.assertIn('out of range', rv.data.decode('utf8'))
rv = self.app.get('/image?src=test_images/test.jpg&overlay=' + buf)
self.assertEqual(rv.status_code, 400)
self.assertIn('out of range', rv.data.decode('utf8'))
buf = 'a' * 257
rv = self.app.get('/image?src=test_images/test.jpg&format=' + buf)
self.assertEqual(rv.status_code, 400)
self.assertIn('not a valid choice', rv.data.decode('utf8'))
rv = self.app.get('/image?src=test_images/test.jpg&tmp=' + buf)
self.assertEqual(rv.status_code, 400)
self.assertIn('not a valid choice', rv.data.decode('utf8'))
rv = self.app.get('/image?src=test_images/test.jpg&angle=1&fill=' + buf)
self.assertEqual(rv.status_code, 400)
self.assertIn('out of range', rv.data.decode('utf8'))
rv = self.app.get('/image?src=test_images/test.jpg&icc=' + buf)
self.assertEqual(rv.status_code, 400)
self.assertIn('not a valid choice', rv.data.decode('utf8'))
rv = self.app.get('/image?src=test_images/test.jpg&ovpos=' + buf)
self.assertEqual(rv.status_code, 400)
self.assertIn('not a valid choice', rv.data.decode('utf8'))
# #1864 Test buffer overflow protection on unicode string params (thanks script kiddies)
def test_unicode_overflow_params(self):
buf = 'ø' * 1025
rv = self.app.get('/image?src=' + buf)
self.assertEqual(rv.status_code, 400)
self.assertIn('out of range', rv.data.decode('utf8'))
rv = self.app.get('/original?src=' + buf)
self.assertEqual(rv.status_code, 400)
self.assertIn('out of range', rv.data.decode('utf8'))
rv = self.app.get('/image?src=test_images/test.jpg&overlay=' + buf)
self.assertEqual(rv.status_code, 400)
self.assertIn('out of range', rv.data.decode('utf8'))
# Test bad params
def test_bad_params(self):
rv = self.app.get('/image?src=test_images/test.jpg&width=99999')
self.assertEqual(rv.status_code, 400)
self.assertIn('out of range', rv.data.decode('utf8'))
rv = self.app.get('/image?src=test_images/test.jpg&height=-10')
self.assertEqual(rv.status_code, 400)
self.assertIn('out of range', rv.data.decode('utf8'))
rv = self.app.get('/image?src=test_images/test.jpg&top=1.1')
self.assertEqual(rv.status_code, 400)
self.assertIn('out of range', rv.data.decode('utf8'))
rv = self.app.get('/image?src=test_images/test.jpg&bottom=-0.5')
self.assertEqual(rv.status_code, 400)
self.assertIn('out of range', rv.data.decode('utf8'))
rv = self.app.get('/image?src=test_images/cathedral.jpg&format=eggs')
self.assertEqual(rv.status_code, 400)
self.assertIn('not a valid choice', rv.data.decode('utf8'))
rv = self.app.get('/image?src=test_images/cathedral.jpg&width=500&height=500&fill=spam')
self.assertEqual(rv.status_code, 415)
self.assertIn('unsupported fill colour', rv.data.decode('utf8'))
rv = self.app.get('/image?src=test_images/cathedral.jpg&tmp=eggs_and_spam')
self.assertEqual(rv.status_code, 400)
rv = self.app.get('/image?src=test_images/cathedral.jpg&icc=eggs_and_spam')
self.assertEqual(rv.status_code, 400)
rv = self.app.get('/image?src=test_images/cathedral.jpg&overlay=eggs_and_spam')
self.assertEqual(rv.status_code, 404)
self.assertIn('not found', rv.data.decode('utf8'))
rv = self.app.get('/image?src=test_images/cathedral.jpg&ovopacity=1.1')
self.assertEqual(rv.status_code, 400)
self.assertIn('out of range', rv.data.decode('utf8'))
rv = self.app.get('/image?src=test_images/cathedral.jpg&ovsize=-0.5')
self.assertEqual(rv.status_code, 400)
self.assertIn('out of range', rv.data.decode('utf8'))
rv = self.app.get('/image?src=test_images/cathedral.jpg&icc=AdobeRGB1998&intent=perceptive')
self.assertEqual(rv.status_code, 400)
self.assertIn('not a valid choice', rv.data.decode('utf8'))
rv = self.app.get('/image?src=test_images/cathedral.jpg&tile=5')
self.assertEqual(rv.status_code, 400)
self.assertIn('invalid format', rv.data.decode('utf8'))
rv = self.app.get('/image?src=test_images/cathedral.jpg&tile=1:400')
self.assertEqual(rv.status_code, 400)
self.assertIn('out of range', rv.data.decode('utf8'))
rv = self.app.get('/image?src=test_images/cathedral.jpg&tile=1:12')
self.assertEqual(rv.status_code, 400)
self.assertIn('not square', rv.data.decode('utf8'))
rv = self.app.get('/image?src=test_images/cathedral.jpg&tile=0:9')
self.assertEqual(rv.status_code, 400)
self.assertIn('out of range', rv.data.decode('utf8'))
rv = self.app.get('/image?src=test_images/cathedral.jpg&tile=10:9')
self.assertEqual(rv.status_code, 400)
self.assertIn('out of range', rv.data.decode('utf8'))
rv = self.app.get('/image?src=test_images/cathedral.jpg&page=-1')
self.assertEqual(rv.status_code, 400)
self.assertIn('out of range', rv.data.decode('utf8'))
rv = self.app.get('/image?src=test_images/cathedral.jpg&page=1024768')
self.assertEqual(rv.status_code, 400)
self.assertIn('out of range', rv.data.decode('utf8'))
rv = self.app.get('/image?src=test_images/cathedral.jpg&flip=x')
self.assertEqual(rv.status_code, 400)
self.assertIn('not a valid choice', rv.data.decode('utf8'))
# #2590 Some clients request "x=1&y=2" instead of "x=1&y=2"
def test_bad_query_string(self):
# Get an image correctly
rv = self.app.get('/image?src=test_images/cathedral.jpg&width=100&height=100&format=png')
self.assertEqual(rv.status_code, 200)
self.assertIn('image/png', rv.headers['Content-Type'])
self.assertEqual(rv.headers['X-From-Cache'], 'False')
image_len = len(rv.data)
# This incorrect URL should now retrieve the same image
rv = self.app.get('/image?src=test_images/cathedral.jpg&width=100&height=100&format=png')
self.assertEqual(rv.status_code, 200)
self.assertIn('image/png', rv.headers['Content-Type'])
self.assertEqual(rv.headers['X-From-Cache'], 'True')
self.assertEqual(len(rv.data), image_len)
# Ensure that cache key values are normalised and handled properly
def test_cache_keys(self):
# Floats should be rounded to 0.0000x
f = 0.123456789
i = ImageAttrs(
'', 1,
rotation=f, top=f, left=f, bottom=f, right=f,
overlay_size=f, overlay_opacity=f,
overlay_src='ovsrc123' # So the overlay params get kept
)
ov_hash = str(hash('ovsrc123'))
self.assertEqual(
i.get_cache_key(),
'IMG:1,O0.12346,T0.12346,L0.12346,B0.12346,R0.12346,Y' + ov_hash + ',YO0.12346,YS0.12346'
)
# Smallest numbers should be 0.00005 not 5e-05
f = 0.00001
i = ImageAttrs('', 1, rotation=f, top=f, left=f, bottom=f, right=f)
self.assertEqual(i.get_cache_key(), 'IMG:1,O0.00001,T0.00001,L0.00001,B0.00001,R0.00001')
# Tiny values should be rounded to 0 then removed
f = 0.0000001
i = ImageAttrs('', 1, rotation=f, top=f, left=f, bottom=f, right=f)
i.normalise_values()
self.assertEqual(i.get_cache_key(), 'IMG:1')
# Currently we encode 1.0 as "1.0" rather than "1"
i = ImageAttrs('', 1, rotation=1.0)
i.normalise_values()
self.assertEqual(i.get_cache_key(), 'IMG:1,O1.0')
# Blank strings should be left out
i = ImageAttrs(
'', 1, iformat='', template='', align_h='', align_v='', flip='',
fill='', overlay_src='', icc_profile='', icc_intent='',
colorspace='', tile_spec=''
)
i.normalise_values()
self.assertEqual(i.get_cache_key(), 'IMG:1')
# 0 and 1 crop marks should be left out
i = ImageAttrs('', 1, top=0, left=0, bottom=1, right=1)
i.normalise_values()
self.assertEqual(i.get_cache_key(), 'IMG:1')
# Rotation 0 or 360 should be left out
i = ImageAttrs('', 1, rotation=0)
i.normalise_values()
self.assertEqual(i.get_cache_key(), 'IMG:1')
i = ImageAttrs('', 1, rotation=360)
i.normalise_values()
self.assertEqual(i.get_cache_key(), 'IMG:1')
# Fill should be left out when not resizing both dimensions
i = ImageAttrs('', 1, width=200, height=200, fill='#0000ff')
i.normalise_values()
self.assertEqual(i.get_cache_key(), 'IMG:1,W200,H200,I#0000ff')
i = ImageAttrs('', 1, width=200, fill='#0000ff')
i.normalise_values()
self.assertEqual(i.get_cache_key(), 'IMG:1,W200')
# Hidden or 0 size overlay cancels overlay
ov_hash = str(hash('ovsrc123'))
i = ImageAttrs('', 1, overlay_src='ovsrc123', overlay_opacity=0.9, overlay_size=0.9)
i.normalise_values()
self.assertEqual(i.get_cache_key(), 'IMG:1,Y' + ov_hash + ',YO0.9,YS0.9')
i = ImageAttrs('', 1, overlay_src='ovsrc123', overlay_opacity=0, overlay_size=1)
i.normalise_values()
self.assertEqual(i.get_cache_key(), 'IMG:1')
i = ImageAttrs('', 1, overlay_src='ovsrc123', overlay_opacity=1, overlay_size=0)
i.normalise_values()
self.assertEqual(i.get_cache_key(), 'IMG:1')
# No ICC name cancels ICC params
i = ImageAttrs('', 1, icc_profile='icc123', icc_intent='relative', icc_bpc=True)
i.normalise_values()
self.assertEqual(i.get_cache_key(), 'IMG:1,Picc123,Nrelative,C')
i = ImageAttrs('', 1, icc_profile='', icc_intent='relative', icc_bpc=True)
i.normalise_values()
self.assertEqual(i.get_cache_key(), 'IMG:1')
# Test requested image attributes get applied and processed properly
def test_image_attrs_precedence(self):
ia = ImageAttrs('myimage.png', -1, width=789, fill='auto')
# Apply a fictional template
ia.apply_dict({
'width': 100,
'rotation': 360,
'fill': 'red',
'dpi_x': 300,
'dpi_y': 300},
override_values=False,
normalise=False
)
# Ensure the template params are there
self.assertEqual(ia.rotation(), 360)
self.assertEqual(ia.dpi(), 300)
# Ensure the initial parameters override the template
self.assertEqual(ia.width(), 789)
self.assertEqual(ia.fill(), 'auto')
# Ensure the net parameters look good
ia.normalise_values()
self.assertIsNone(ia.format_raw())
self.assertEqual(ia.format(), 'png') # from filename, not params
self.assertEqual(ia.width(), 789)
self.assertEqual(ia.dpi(), 300)
self.assertIsNone(ia.rotation()) # 360 would have no effect
self.assertIsNone(ia.fill()) # As there's no rotation and no other filling to do
# Check a 0 DPI does overrides the template
ia = ImageAttrs('myimage.png', -1, dpi=0)
ia.apply_dict({
'dpi_x': 300,
'dpi_y': 300},
override_values=False,
normalise=True
)
self.assertIsNone(ia.dpi())
# Test the identification of suitable base images in cache
def test_base_image_detection(self):
image_obj = auto_sync_existing_file('test_images/dorset.jpg', dm, tm)
image_id = image_obj.id
# Clean
orig_attrs = ImageAttrs('test_images/dorset.jpg', image_id)
im.reset_image(orig_attrs)
# Set up tests
w1000_attrs = ImageAttrs('test_images/dorset.jpg', image_id, iformat='png', width=1000, rotation=90)
w500_attrs = ImageAttrs('test_images/dorset.jpg', image_id, iformat='png', width=500, rotation=90)
w100_attrs = ImageAttrs('test_images/dorset.jpg', image_id, iformat='png', width=100, rotation=90)
base = im._get_base_image(im.finalise_image_attrs(w1000_attrs))
assert base is None, 'Found an existing base image for ' + str(w1000_attrs)
# Get an 1100 image, should provide the base for 1000
rv = self.app.get('/image?src=test_images/dorset.jpg&format=png&width=1100&angle=90')
assert rv.status_code == 200
base = im._get_base_image(im.finalise_image_attrs(w1000_attrs))
assert base is not None and base.attrs().width() == 1100
# Get 1000 image, should provide the base for 500
rv = self.app.get('/image?src=test_images/dorset.jpg&format=png&width=1000&angle=90')
assert rv.status_code == 200
base = im._get_base_image(im.finalise_image_attrs(w500_attrs))
assert base is not None and base.attrs().width() == 1000
# Get 500 image, should provide the base for 100
rv = self.app.get('/image?src=test_images/dorset.jpg&format=png&width=500&angle=90')
assert rv.status_code == 200
base = im._get_base_image(im.finalise_image_attrs(w100_attrs))
assert base is not None and base.attrs().width() == 500
# Make sure none of these come back for incompatible image requests
try_attrs = ImageAttrs('test_images/dorset.jpg', image_id, iformat='png', width=500) # No rotation
base = im._get_base_image(im.finalise_image_attrs(try_attrs))
assert base is None
try_attrs = ImageAttrs('test_images/dorset.jpg', image_id, iformat='gif', width=500, rotation=90) # Format
base = im._get_base_image(im.finalise_image_attrs(try_attrs))
assert base is None
try_attrs = ImageAttrs('test_images/dorset.jpg', image_id, iformat='png', width=500, height=500, rotation=90) # Aspect ratio
base = im._get_base_image(im.finalise_image_attrs(try_attrs))
assert base is None
try_attrs = ImageAttrs('test_images/dorset.jpg', image_id, iformat='png', width=500, rotation=90, fill='#ff0000') # Fill
base = im._get_base_image(im.finalise_image_attrs(try_attrs))
assert base is None
# But if we want to sharpen the 500px version that should be OK
try_attrs = ImageAttrs('test_images/dorset.jpg', image_id, iformat='png', width=500, rotation=90, sharpen=200)
base = im._get_base_image(im.finalise_image_attrs(try_attrs))
assert base is not None and base.attrs().width() == 500
# Adding an overlay should be OK
try_attrs = ImageAttrs('test_images/dorset.jpg', image_id, iformat='png', width=500, rotation=90, overlay_src='test_images/quru110.png')
base = im._get_base_image(im.finalise_image_attrs(try_attrs))
assert base is not None and base.attrs().width() == 500
# Tiling!
# Creating a tile of the 500px version should use the same as a base
try_attrs = ImageAttrs('test_images/dorset.jpg', image_id, iformat='png', width=500, rotation=90, tile_spec=(3,9))
base = im._get_base_image(im.finalise_image_attrs(try_attrs))
assert base is not None and base.attrs().width() == 500
# Create that tile
rv = self.app.get('/image?src=test_images/dorset.jpg&format=png&width=500&angle=90&tile=3:9')
assert rv.status_code == 200
# A different format of the tile should not use the cached tile as a base
try_attrs = ImageAttrs('test_images/dorset.jpg', image_id, iformat='jpg', width=500, rotation=90, tile_spec=(3,9))
base = im._get_base_image(im.finalise_image_attrs(try_attrs))
assert base is None
# But a stripped version of the same tile should
try_attrs = ImageAttrs('test_images/dorset.jpg', image_id, iformat='png', width=500, rotation=90, tile_spec=(3,9), strip=True)
base = im._get_base_image(im.finalise_image_attrs(try_attrs))
assert base is not None and base.attrs().tile_spec() == (3, 9)
# Test the identification of suitable base images in cache
def test_overlay_base_image_detection(self):
image_obj = auto_sync_existing_file('test_images/dorset.jpg', dm, tm)
image_id = image_obj.id
# Clean
orig_attrs = ImageAttrs('test_images/dorset.jpg', image_id)
im.reset_image(orig_attrs)
#
# Overlays - We cannot allow an overlayed image to be use as a base, because:
# a) After resizing, the resulting overlay size might not be correct
# b) When cropping, rotating, blurring, flipping etc, the operation would already
# include the overlay, while normally (without a base) the overlay is done last
#
# The only exception is tiling, which can (and should!) use the exact same image as a base
#
w1000_attrs = ImageAttrs('test_images/dorset.jpg', image_id, iformat='png', width=1000, overlay_src='test_images/quru110.png')
im.finalise_image_attrs(w1000_attrs)
base = im._get_base_image(w1000_attrs)
assert base is None, 'Found an existing base image for ' + str(w1000_attrs)
# Get an 1100 image, which should NOT provide the base for 1000
rv = self.app.get('/image?src=test_images/dorset.jpg&format=png&width=1100&overlay=test_images/quru110.png')
assert rv.status_code == 200
base = im._get_base_image(w1000_attrs)
assert base is None
# Get a 500 image, we should be able to tile from it
rv = self.app.get('/image?src=test_images/dorset.jpg&format=png&width=500&overlay=test_images/quru110.png')
assert rv.status_code == 200
try_attrs = ImageAttrs('test_images/dorset.jpg', image_id, iformat='png', width=500, overlay_src='test_images/quru110.png', tile_spec=(1,4))
im.finalise_image_attrs(try_attrs)
base = im._get_base_image(try_attrs)
assert base is not None and base.attrs().width() == 500
# Test the identification of suitable base images in cache
def test_flip_base_image_detection(self):
#
# Run some similar tests for newer parameters (flip and page)
#
image_obj = auto_sync_existing_file('test_images/multipage.tif', dm, tm)
image_id = image_obj.id
# Clean
orig_attrs = ImageAttrs('test_images/multipage.tif', image_id)
im.reset_image(orig_attrs)
# Set up tests
w500_attrs = ImageAttrs('test_images/multipage.tif', image_id, page=2, iformat='png', width=500, flip='v')
im.finalise_image_attrs(w500_attrs)
base = im._get_base_image(w500_attrs)
assert base is None, 'Found an existing base image for ' + str(w500_attrs)
# Get an 800 image, p2, flip v
rv = self.app.get('/image?src=test_images/multipage.tif&format=png&width=800&page=2&flip=v')
assert rv.status_code == 200
# Should now be able to use the 800 as a base for a 500
base = im._get_base_image(w500_attrs)
assert base is not None and base.attrs().width() == 800
# Generate the 500
rv = self.app.get('/image?src=test_images/multipage.tif&format=png&width=500&page=2&flip=v')
assert rv.status_code == 200
# Make sure none of these come back for incompatible image requests
try_attrs = ImageAttrs('test_images/multipage.tif', image_id, iformat='png', width=500) # No page
base = im._get_base_image(im.finalise_image_attrs(try_attrs))
assert base is None
try_attrs = ImageAttrs('test_images/multipage.tif', image_id, page=2, iformat='png', width=500, flip='h') # Wrong flip
base = im._get_base_image(im.finalise_image_attrs(try_attrs))
assert base is None
try_attrs = ImageAttrs('test_images/multipage.tif', image_id, page=3, iformat='png', width=500, flip='v') # Wrong page
base = im._get_base_image(im.finalise_image_attrs(try_attrs))
assert base is None
# But if we want to sharpen the 500px version that should be OK
try_attrs = ImageAttrs('test_images/multipage.tif', image_id, page=2, iformat='png', width=500, flip='v', sharpen=200)
base = im._get_base_image(im.finalise_image_attrs(try_attrs))
assert base is not None and base.attrs().width() == 500
# Adding an overlay should be OK
try_attrs = ImageAttrs('test_images/multipage.tif', image_id, page=2, iformat='png', width=500, flip='v', overlay_src='test_images/quru110.png')
base = im._get_base_image(im.finalise_image_attrs(try_attrs))
assert base is not None and base.attrs().width() == 500
# Creating a tile of the 500px version should use the same as a base
try_attrs = ImageAttrs('test_images/multipage.tif', image_id, page=2, iformat='png', width=500, flip='v', tile_spec=(3,9))
base = im._get_base_image(im.finalise_image_attrs(try_attrs))
assert base is not None and base.attrs().width() == 500
# There was a bug where "cmyk.jpg&colorspace=rgb" would be used as a base image
# for "cmyk.jpg&icc=some_icc&colorspace=rgb" but this was incorrect because the
# base image is then RGB instead of CMYK.
def test_base_image_colorspaces(self):
# Clean
image_obj = auto_sync_existing_file('test_images/picture-cmyk.jpg', dm, tm)
image_id = image_obj.id
orig_attrs = ImageAttrs('test_images/picture-cmyk.jpg', image_id)
im.reset_image(orig_attrs)
# Set up tests
w200_attrs = ImageAttrs('test_images/picture-cmyk.jpg',
image_id, iformat='jpg', width=200, colorspace='rgb')
icc_attrs_1 = ImageAttrs('test_images/picture-cmyk.jpg',
image_id, iformat='jpg', width=500, icc_profile='CoatedGRACoL2006')
icc_attrs_2 = ImageAttrs('test_images/picture-cmyk.jpg',
image_id, iformat='jpg', width=500, icc_profile='CoatedGRACoL2006',
colorspace='rgb')
cspace_attrs = ImageAttrs('test_images/picture-cmyk.jpg',
image_id, iformat='jpg', width=500, colorspace='gray')
# Get the orig_attrs image
rv = self.app.get('/image?src=test_images/picture-cmyk.jpg&format=jpg&width=500&colorspace=rgb')
self.assertEqual(rv.status_code, 200)
# Now getting a width 200 of that should be OK
base = im._get_base_image(im.finalise_image_attrs(w200_attrs))
self.assertIsNotNone(base)
# Getting an ICC version should not use the RGB base
base = im._get_base_image(im.finalise_image_attrs(icc_attrs_1))
self.assertIsNone(base)
# Getting an RGB of the ICC version should not use the RGB base either
base = im._get_base_image(im.finalise_image_attrs(icc_attrs_2))
self.assertIsNone(base)
# Getting a GRAY version should not use the RGB base
base = im._get_base_image(im.finalise_image_attrs(cspace_attrs))
self.assertIsNone(base)
# Test the correct base images are used when creating tiles
def test_tile_base_images(self):
orig_img = auto_sync_existing_file('test_images/cathedral.jpg', dm, tm)
orig_attrs = ImageAttrs(orig_img.src, orig_img.id)
orig_id = orig_img.id
# Clean
im.reset_image(orig_attrs)
# Generate 2 base images
rv = self.app.get('/image?src=test_images/cathedral.jpg&width=1000&strip=0')
assert rv.status_code == 200
rv = self.app.get('/image?src=test_images/cathedral.jpg&width=500&strip=0')
assert rv.status_code == 200
# Test base image detection
try_attrs = ImageAttrs('test_images/cathedral.jpg', orig_id, width=800, tile_spec=(2,4))
base = im._get_base_image(im.finalise_image_attrs(try_attrs))
assert base is not None and base.attrs().width() == 1000
try_attrs = ImageAttrs('test_images/cathedral.jpg', orig_id, width=800, rotation=180, flip='v', tile_spec=(2,4))
base = im._get_base_image(im.finalise_image_attrs(try_attrs))
assert base is not None and base.attrs().width() == 1000
try_attrs = ImageAttrs('test_images/cathedral.jpg', orig_id, width=800, height=800, size_fit=True, strip=True, tile_spec=(2,4))
base = im._get_base_image(im.finalise_image_attrs(try_attrs))
assert base is not None and base.attrs().width() == 1000
try_attrs = ImageAttrs('test_images/cathedral.jpg', orig_id, width=800, height=800, size_fit=True, strip=True, overlay_src='test_images/quru110.png', tile_spec=(2,4))
base = im._get_base_image(im.finalise_image_attrs(try_attrs))
assert base is not None and base.attrs().width() == 1000
try_attrs = ImageAttrs('test_images/cathedral.jpg', orig_id, width=500, tile_spec=(18,36))
base = im._get_base_image(im.finalise_image_attrs(try_attrs))
assert base is not None and base.attrs().width() == 500
try_attrs = ImageAttrs('test_images/cathedral.jpg', orig_id, width=500, rotation=180, tile_spec=(18,36))
base = im._get_base_image(im.finalise_image_attrs(try_attrs))
assert base is not None and base.attrs().width() == 500
try_attrs = ImageAttrs('test_images/cathedral.jpg', orig_id, width=500, rotation=180, flip='v', tile_spec=(18,36))
base = im._get_base_image(im.finalise_image_attrs(try_attrs))
assert base is not None and base.attrs().width() == 500
try_attrs = ImageAttrs('test_images/cathedral.jpg', orig_id, width=500, height=500, size_fit=True, strip=True, tile_spec=(18,36))
base = im._get_base_image(im.finalise_image_attrs(try_attrs))
assert base is not None and base.attrs().width() == 500
try_attrs = ImageAttrs('test_images/cathedral.jpg', orig_id, width=500, height=500, size_fit=True, strip=True, overlay_src='test_images/quru110.png', tile_spec=(18,36))
base = im._get_base_image(im.finalise_image_attrs(try_attrs))
assert base is not None and base.attrs().width() == 500
# Test changing the default template values
def test_default_template_settings(self):
try:
db_def_temp = dm.get_image_template(tempname='Default')
# Get img1 with explicit format and quality params
img1 = self.app.get('/image?src=test_images/dorset.jpg&width=800&format=png&quality=50')
self.assertEqual(img1.status_code, 200)
self.assertIn('image/png', img1.headers['Content-Type'])
# Get img2 with no params but with defaults set to be the same as img1
db_def_temp.template['format']['value'] = 'png'
db_def_temp.template['quality']['value'] = 50
dm.save_object(db_def_temp)
im.reset_templates()
img2 = self.app.get('/image?src=test_images/dorset.jpg&width=800')
self.assertEqual(img2.status_code, 200)
self.assertIn('image/png', img2.headers['Content-Type'])
self.assertEqual(len(img1.data), len(img2.data))
# Test keeping the original image format
db_def_temp.template['format']['value'] = ''
db_def_temp.template['quality']['value'] = 75
dm.save_object(db_def_temp)
im.reset_templates()
img3 = self.app.get('/image?src=test_images/dorset.jpg&width=805')
self.assertEqual(img3.status_code, 200)
self.assertIn('image/jpeg', img3.headers['Content-Type'])
img3_size = len(img3.data)
# Test strip
db_def_temp.template['strip']['value'] = True
dm.save_object(db_def_temp)
im.reset_templates()
img4 = self.app.get('/image?src=test_images/dorset.jpg&width=805')
self.assertEqual(img4.status_code, 200)
self.assertLess(len(img4.data), img3_size)
finally:
reset_default_image_template()
# Test image templates
def test_templates(self):
try:
template = 'Unit test template'
# Utility to update a template in the database and reset the template manager cache
def update_db_template(db_obj, update_dict):
db_obj.template.update(update_dict)
dm.save_object(db_obj)
im.reset_templates()
# Get the default template
db_default_template = dm.get_image_template(tempname='Default')
# Create a temporary template to work with
db_template = ImageTemplate(template, 'Temporary template for unit testing', {})
db_template = dm.save_object(db_template, refresh=True)
# Test format from template
update_db_template(db_template, {'format': {'value': 'png'}})
self.assertIn(template, im.get_template_names())
rv = self.app.get('/image?src=test_images/thames.jpg&tmp='+template)
self.assertEqual(rv.status_code, 200)
self.assertIn('image/png', rv.headers['Content-Type'])
# Switch back to JPG as Pillow support for strip=1/0 with PNG isn't there yet
update_db_template(db_template, {'format': {'value': ''}})
rv = self.app.get('/image?src=test_images/thames.jpg&tmp='+template)
self.assertEqual(rv.status_code, 200)
self.assertIn('image/jpeg', rv.headers['Content-Type'])
original_len = len(rv.data)
# Test cropping from template makes it smaller
update_db_template(db_template, {'top': {'value': 0.1}, 'left': {'value': 0.1},
'bottom': {'value': 0.9}, 'right': {'value': 0.9}})
rv = self.app.get('/image?src=test_images/thames.jpg&tmp='+template)
self.assertEqual(rv.status_code, 200)
cropped_len = len(rv.data)
self.assertLess(cropped_len, original_len)
# Test stripping the EXIF data makes it smaller again 2
update_db_template(db_template, {'strip': {'value': True}})
rv = self.app.get('/image?src=test_images/thames.jpg&tmp='+template)
self.assertEqual(rv.status_code, 200)
stripped_len = len(rv.data)
self.assertLess(stripped_len, cropped_len)
# Test resizing it small makes it smaller again 3
update_db_template(db_template, {'width': {'value': 500}, 'height': {'value': 500}})
rv = self.app.get('/image?src=test_images/thames.jpg&tmp='+template)
self.assertEqual(rv.status_code, 200)
resized_len = len(rv.data)
self.assertLess(resized_len, stripped_len)
# And that auto-fitting the crop then makes it slightly larger
update_db_template(db_template, {'crop_fit': {'value': True}})
rv = self.app.get('/image?src=test_images/thames.jpg&tmp='+template)
self.assertEqual(rv.status_code, 200)
autofit_crop_len = len(rv.data)
self.assertGreater(autofit_crop_len, resized_len)
# Test expiry settings - from the default template first
update_db_template(db_default_template, {'expiry_secs': {'value': 99}})
rv = self.app.get('/image?src=test_images/thames.jpg')
self.assertEqual(rv.headers.get('Expires'), http_date(int(time.time() + 99)))
# Test expiry settings from template
update_db_template(db_template, {'expiry_secs': {'value': -1}})
rv = self.app.get('/image?src=test_images/thames.jpg&tmp='+template)
self.assertEqual(rv.headers.get('Expires'), http_date(0))
# Test attachment settings from template
update_db_template(db_template, {'attachment': {'value': True}})
rv = self.app.get('/image?src=test_images/thames.jpg&tmp='+template)
self.assertIsNotNone(rv.headers.get('Content-Disposition'))
self.assertIn('attachment', rv.headers['Content-Disposition'])
# Test that URL params override the template
rv = self.app.get('/image?src=test_images/thames.jpg&tmp='+template+'&format=gif&attach=0')
self.assertEqual(rv.status_code, 200)
self.assertIn('image/gif', rv.headers['Content-Type'])
self.assertIsNone(rv.headers.get('Content-Disposition'))
template_gif_len = len(rv.data) # at 500x500
rv = self.app.get('/image?src=test_images/thames.jpg&tmp='+template+'&format=gif&width=600&height=600&attach=0')
self.assertEqual(rv.status_code, 200)
template_gif_len2 = len(rv.data) # at 600x600
self.assertGreater(template_gif_len2, template_gif_len)
finally:
reset_default_image_template()
# Test spaces in file names - serving and caching
def test_filename_spaces(self):
# Test serving and cache store
rv = self.app.get('/image?src=test_images/blue%20bells.jpg')
assert rv.status_code == 200, 'Filename with spaces was not served'
assert 'image/jpeg' in rv.headers['Content-Type']
# knowing length requires 'keep original' values in settings
assert len(rv.data) == 904256
# Test retrieval from cache
blue_img = auto_sync_existing_file('test_images/blue bells.jpg', dm, tm)
blue_attrs = ImageAttrs(blue_img.src, blue_img.id)
im.finalise_image_attrs(blue_attrs)
blue_image = cm.get(blue_attrs.get_cache_key())
assert blue_image is not None, 'Filename with spaces was not retrieved from cache'
assert len(blue_image) == 904256
# Test attachment filename
rv = self.app.get('/original?src=test_images/blue%20bells.jpg&attach=1')
assert rv.status_code == 200
assert rv.headers.get('Content-Disposition') is not None
assert 'attachment' in rv.headers['Content-Disposition']
assert 'filename="blue bells.jpg"' in rv.headers['Content-Disposition']
# Test spaces in overlay images
rv = self.app.get('/image?src=test_images/dorset.jpg&width=500&overlay=test_images/blue%20bells.jpg&ovsize=0.5')
assert rv.status_code == 200, 'Overlay with spaces was not served'
# Test the original URL won't serve up non-image files
def test_original_serving_bad_files(self):
tempfile = get_abs_path('php.ini')
try:
# Create a php.ini
with open(tempfile, 'w') as tfile:
tfile.write('QIS TEST. This is my php.ini file containing interesting info.')
# Test we can't now serve that up
rv = self.app.get('/original?src=php.ini')
self.assertEqual(rv.status_code, 415)
self.assertIn('not a supported image', rv.data.decode('utf8'))
finally:
os.remove(tempfile)
# Image management database tests
def test_db_auto_population(self):
folder_path = 'test_images'
image_path = folder_path + '/cathedral.jpg'
i = dm.get_image(src=image_path)
if i: dm.delete_image(i, True)
# Check db auto-populates from image URL
rv = self.app.get('/image?src=' + image_path)
assert rv.status_code == 200
# Test folder now exists
f = dm.get_folder(folder_path=folder_path)
assert f is not None
assert f.path == '/'+folder_path
# Test image record now exists and has correct folder
i = dm.get_image(src=image_path)
assert i is not None
assert i.src == image_path
assert i.folder.id == f.id
assert i.width == 1600
assert i.height == 1200
# Reset database for i
dm.delete_image(i, True)
assert dm.get_image(src=image_path) is None
# Check db auto-populates from details API
rv = self.app.get('/api/details/?src=' + image_path)
assert rv.status_code == 200
i = dm.get_image(src=image_path, load_history=True)
assert i is not None and i.width == 1600 and i.height == 1200, 'db has '+str(i)
# Image history should be written for this one
assert len(i.history) == 1
assert i.history[0].action == ImageHistory.ACTION_CREATED
# Reset
dm.delete_image(i, True)
assert dm.get_image(src=image_path) is None
# Check db auto-populates from original URL
rv = self.app.get('/original?src=' + image_path)
assert rv.status_code == 200
i = dm.get_image(src=image_path)
assert i is not None and i.width == 1600 and i.height == 1200, 'db has '+str(i)
# Reset
dm.delete_image(i, True)
assert dm.get_image(src=image_path) is None
# Log in
self.login('admin', 'admin')
# Check db auto-populates from details page
rv = self.app.get('/details/?src=' + image_path)
assert rv.status_code == 200, 'Details page returned status ' + str(rv.status_code)
i = dm.get_image(src=image_path)
assert i is not None and i.width == 1600 and i.height == 1200, 'db has '+str(i)
# Reset
dm.delete_image(i, True)
assert dm.get_image(src=image_path) is None
# Check db auto-populates from an image upload
temp_file = '/tmp/qis_uploadfile.jpg'
image_path = 'test_images/qis_uploadfile.jpg'
try:
i = dm.get_image(src=image_path)
assert i is None
# Create image to upload, upload it
src_file = get_abs_path('test_images/cathedral.jpg')
shutil.copy(src_file, temp_file)
rv = self.file_upload(self.app, temp_file, 'test_images')
self.assertEqual(rv.status_code, 200)
i = dm.get_image(src=image_path, load_history=True)
assert i is not None and i.width == 1600 and i.height == 1200, 'after upload, db has '+str(i)
uploaded_id = i.id
# Check image history
assert len(i.history) == 1
assert i.history[0].action == ImageHistory.ACTION_CREATED
assert i.history[0].user is not None
assert i.history[0].user.username == 'admin'
# Get an image and ensure it adds a cache entry for it
rv = self.app.get('/image?src=' + image_path)
assert rv.status_code == 200
cache_entries = cm.search(searchfield1__eq=uploaded_id)
assert len(cache_entries) > 0
# Check db re-populates from a replacement image upload
src_file = get_abs_path('test_images/dorset.jpg')
shutil.copy(src_file, temp_file)
rv = self.file_upload(self.app, temp_file, 'test_images')
self.assertEqual(rv.status_code, 200)
i = dm.get_image(src=image_path, load_history=True)
assert i is not None and i.id == uploaded_id, 'db returned different record after re-upload: '+str(i)
assert i.width == 1200 and i.height == 1600, 'after re-upload, db has '+str(i)
# Check image history
assert len(i.history) == 2
assert i.history[1].action == ImageHistory.ACTION_REPLACED
# Check that the cache was cleared too
cache_entries = cm.search(searchfield1__eq=uploaded_id)
assert len(cache_entries) == 0
finally:
# Delete temp file and uploaded file
if os.path.exists(temp_file): os.remove(temp_file)
delete_file(image_path)
# Check db auto-populates for the now-deleted uploaded file
im.reset_image(ImageAttrs(image_path)) # Anything to trigger auto-populate
i = dm.get_image(src=image_path, load_history=True)
assert i is not None
assert i.status == Image.STATUS_DELETED
assert len(i.history) == 3
assert i.history[2].action == ImageHistory.ACTION_DELETED
# Clean up
dm.delete_image(i, True)
# # A basic test to look for obvious memory leaks
# def test_memory_leaks(self):
# import gc
# gc.collect()
# rv = self.app.get('/image?src=test_images/cathedral.jpg&width=500&flip=v&cache=0')
# assert rv.status_code == 200
# rv = self.app.get('/image?src=test_images/cathedral.jpg&width=500&flip=v&cache=1')
# assert rv.status_code == 200
# rv = self.app.get('/original?src=test_images/cathedral.jpg')
# assert rv.status_code == 200
# rv = self.app.get('/image?src=test_images/multipage.tif&format=png&width=500&strip=1&page=2')
# assert rv.status_code == 200
# rv = self.app.get('/image?src=test_images/multipage.tif&format=png&width=500&strip=1&page=3')
# assert rv.status_code == 200
# gc.collect()
# unreach = gc.collect()
# assert unreach == 0, str(unreach) + ' unreachable'
# File upload
def test_file_upload(self):
self.login('admin', 'admin')
# Copy a test file to upload
src_file = get_abs_path('test_images/cathedral.jpg')
dst_file = '/tmp/qis_uploadfile.jpg'
shutil.copy(src_file, dst_file)
try:
# Upload
rv = self.file_upload(self.app, dst_file, 'test_images', '0')
self.assertEqual(rv.status_code, 200)
obj = json.loads(rv.data.decode('utf8'))['data']
self.assertEqual(len(obj), 1)
self.assertIn('/tmp/qis_uploadfile.jpg', obj)
imgdata = obj['/tmp/qis_uploadfile.jpg']
self.assertEqual(imgdata['src'], 'test_images/qis_uploadfile.jpg')
self.assertGreater(imgdata['id'], 0)
self.assertNotIn('history', imgdata) # v4.1
# Make sure it works
rv = self.app.get('/image?src=test_images/qis_uploadfile.jpg')
self.assertEqual(rv.status_code, 200)
db_img = dm.get_image(src='test_images/qis_uploadfile.jpg')
self.assertIsNotNone(db_img, 'Upload did not create image data')
finally:
# Remove the test files
os.remove(dst_file)
self.delete_image_and_data('test_images/qis_uploadfile.jpg')
# v2.7.1 File upload - with overwrite as rename
def test_file_upload_overwrite_rename(self):
self.login('admin', 'admin')
# Copy a test file to upload
src_file = get_abs_path('test_images/cathedral.jpg')
dst_file = '/tmp/qis_uploadfile.jpg'
shutil.copy(src_file, dst_file)
try:
# Have the upload file exist already
copy_file('test_images/cathedral.jpg', 'test_images/qis_uploadfile.jpg')
# Upload
rv = self.file_upload(self.app, dst_file, 'test_images', 'rename')
self.assertEqual(rv.status_code, 200)
obj = json.loads(rv.data.decode('utf8'))['data']
self.assertEqual(len(obj), 1)
self.assertIn('/tmp/qis_uploadfile.jpg', obj)
imgdata = obj['/tmp/qis_uploadfile.jpg']
self.assertGreater(imgdata['id'], 0)
# Make sure it was renamed with -001 appended
self.assertEqual(imgdata['src'], 'test_images/qis_uploadfile-001.jpg')
# Make sure it works
rv = self.app.get('/image?src=test_images/qis_uploadfile-001.jpg')
self.assertEqual(rv.status_code, 200)
finally:
# Remove the test files
os.remove(dst_file)
self.delete_image_and_data('test_images/qis_uploadfile.jpg')
self.delete_image_and_data('test_images/qis_uploadfile-001.jpg')
# Multiple file uploads - expecting success
def test_file_upload_multi(self):
self.login('admin', 'admin')
# Copy test files to upload
src_file = get_abs_path('test_images/cathedral.jpg')
dst_file1 = '/tmp/qis_uploadfile1.jpg'
dst_file2 = '/tmp/qis_uploadfile2.jpg'
shutil.copy(src_file, dst_file1)
shutil.copy(src_file, dst_file2)
try:
rv = self.multi_file_upload(
self.app,
[dst_file1, dst_file2],
'test_images',
'0'
)
self.assertEqual(rv.status_code, 200)
obj = json.loads(rv.data.decode('utf8'))['data']
self.assertEqual(len(obj), 2)
imgdata = obj['/tmp/qis_uploadfile1.jpg']
self.assertEqual(imgdata['src'], 'test_images/qis_uploadfile1.jpg')
self.assertGreater(imgdata['id'], 0)
self.assertTrue(path_exists('test_images/qis_uploadfile1.jpg'))
imgdata = obj['/tmp/qis_uploadfile2.jpg']
self.assertEqual(imgdata['src'], 'test_images/qis_uploadfile2.jpg')
self.assertGreater(imgdata['id'], 0)
self.assertTrue(path_exists('test_images/qis_uploadfile2.jpg'))
finally:
# Remove the test files
os.remove(dst_file1)
os.remove(dst_file2)
self.delete_image_and_data('test_images/qis_uploadfile1.jpg')
self.delete_image_and_data('test_images/qis_uploadfile2.jpg')
# Multiple file uploads - with overwrite no
def test_file_upload_multi_overwrite_no(self):
self.login('admin', 'admin')
# Copy test files to upload
src_file = get_abs_path('test_images/cathedral.jpg')
dst_file1 = '/tmp/qis_uploadfile1.jpg'
dst_file2 = '/tmp/qis_uploadfile2.jpg'
shutil.copy(src_file, dst_file1)
shutil.copy(src_file, dst_file2)
try:
# Make one of the files exist already
copy_file('test_images/cathedral.jpg', 'test_images/qis_uploadfile2.jpg')
# Test 1 file success, 1 file failure
rv = self.multi_file_upload(
self.app,
[dst_file1, dst_file2],
'test_images',
'0' # This will break now on dst_file2
)
self.assertEqual(rv.status_code, API_CODES.ALREADY_EXISTS)
obj = json.loads(rv.data.decode('utf8'))
self.assertEqual(obj['status'], API_CODES.ALREADY_EXISTS)
obj = obj['data']
self.assertEqual(len(obj), 2)
# First entry should be image info
imgdata = obj['/tmp/qis_uploadfile1.jpg']
self.assertEqual(imgdata['src'], 'test_images/qis_uploadfile1.jpg')
self.assertGreater(imgdata['id'], 0)
# Second entry should be error info
imgdata = obj['/tmp/qis_uploadfile2.jpg']
self.assertNotIn('id', imgdata)
self.assertIn('error', imgdata)
self.assertEqual(imgdata['error']['status'], API_CODES.ALREADY_EXISTS)
self.assertIn('already exists', imgdata['error']['message'])
finally:
# Remove the test files
os.remove(dst_file1)
os.remove(dst_file2)
self.delete_image_and_data('test_images/qis_uploadfile1.jpg')
self.delete_image_and_data('test_images/qis_uploadfile2.jpg')
# Multiple file uploads - with overwrite yes
def test_file_upload_multi_overwrite_yes(self):
self.login('admin', 'admin')
# Copy test files to upload
src_file = get_abs_path('test_images/cathedral.jpg')
dst_file1 = '/tmp/qis_uploadfile1.jpg'
dst_file2 = '/tmp/qis_uploadfile2.jpg'
shutil.copy(src_file, dst_file1)
shutil.copy(src_file, dst_file2)
try:
# Make both files exist already
copy_file('test_images/cathedral.jpg', 'test_images/qis_uploadfile1.jpg')
copy_file('test_images/cathedral.jpg', 'test_images/qis_uploadfile2.jpg')
# Test overwrites
rv = self.multi_file_upload(
self.app,
[dst_file1, dst_file2],
'test_images',
'1'
)
self.assertEqual(rv.status_code, API_CODES.SUCCESS)
obj = json.loads(rv.data.decode('utf8'))['data']
self.assertEqual(len(obj), 2)
# Both entries should be image info
imgdata = obj['/tmp/qis_uploadfile1.jpg']
self.assertEqual(imgdata['src'], 'test_images/qis_uploadfile1.jpg')
self.assertGreater(imgdata['id'], 0)
imgdata = obj['/tmp/qis_uploadfile2.jpg']
self.assertEqual(imgdata['src'], 'test_images/qis_uploadfile2.jpg')
self.assertGreater(imgdata['id'], 0)
finally:
# Remove the test files
os.remove(dst_file1)
os.remove(dst_file2)
self.delete_image_and_data('test_images/qis_uploadfile1.jpg')
self.delete_image_and_data('test_images/qis_uploadfile2.jpg')
# v2.7.1 Multiple file uploads - with dupe filenames (from different source directories)
def test_file_upload_multi_dupe_filenames(self):
self.login('admin', 'admin')
# Copy test files to upload
src_file = get_abs_path('test_images/cathedral.jpg')
dst_dir1 = '/tmp/qis_temp1'
dst_dir2 = '/tmp/qis_temp2'
dst_file1 = dst_dir1 + '/qis_uploadfile.jpg' # } same
dst_file2 = dst_dir2 + '/qis_uploadfile.jpg' # } filenames
os.mkdir(dst_dir1)
os.mkdir(dst_dir2)
shutil.copy(src_file, dst_file1)
shutil.copy(src_file, dst_file2)
try:
rv = self.multi_file_upload(
self.app,
[dst_file1, dst_file2],
'test_images',
'0'
)
self.assertEqual(rv.status_code, API_CODES.SUCCESS)
obj = json.loads(rv.data.decode('utf8'))['data']
self.assertEqual(len(obj), 2)
# Both entries should be image info with one file renamed
imgdata = obj[dst_file1]
self.assertEqual(imgdata['src'], 'test_images/qis_uploadfile.jpg')
self.assertGreater(imgdata['id'], 0)
imgdata = obj[dst_file2]
self.assertEqual(imgdata['src'], 'test_images/qis_uploadfile-001.jpg')
self.assertGreater(imgdata['id'], 0)
finally:
# Remove the test files
shutil.rmtree(dst_dir1)
shutil.rmtree(dst_dir2)
self.delete_image_and_data('test_images/qis_uploadfile.jpg')
self.delete_image_and_data('test_images/qis_uploadfile-001.jpg')
# v2.7.1 Multiple file uploads - with filenames that accidentally get converted to dupes
def test_file_upload_multi_dupe_filenames_via_chars(self):
self.login('admin', 'admin')
# Copy test files to upload
src_file = get_abs_path('test_images/cathedral.jpg')
dst_file1 = '/tmp/qis_upload.jpg'
dst_file2 = '/tmp/?qis_upload?.jpg'
shutil.copy(src_file, dst_file1)
shutil.copy(src_file, dst_file2)
# Check that secure_filename() will make filenames the same during upload
self.assertEqual(secure_filename(dst_file1), secure_filename(dst_file2))
try:
rv = self.multi_file_upload(
self.app,
[dst_file1, dst_file2],
'test_images',
'0'
)
self.assertEqual(rv.status_code, API_CODES.SUCCESS)
obj = json.loads(rv.data.decode('utf8'))['data']
self.assertEqual(len(obj), 2)
# Both entries should be image info with one file renamed
imgdata = obj['/tmp/qis_upload.jpg']
self.assertEqual(imgdata['src'], 'test_images/qis_upload.jpg')
self.assertGreater(imgdata['id'], 0)
imgdata = obj['/tmp/?qis_upload?.jpg']
self.assertEqual(imgdata['src'], 'test_images/qis_upload-001.jpg')
self.assertGreater(imgdata['id'], 0)
finally:
# Remove the test files
os.remove(dst_file1)
os.remove(dst_file2)
self.delete_image_and_data('test_images/qis_upload.jpg')
self.delete_image_and_data('test_images/qis_upload-001.jpg')
# File uploads
def test_file_upload_unicode(self):
self.login('admin', 'admin')
# Copy a test file to upload
src_file = get_abs_path('test_images/cathedral.jpg')
dst_file = '/tmp/qis uplo\xe4d f\xefle.jpg'
shutil.copy(src_file, dst_file)
try:
# Upload
rv = self.file_upload(self.app, dst_file, 'test_images', '0')
self.assertEqual(rv.status_code, 200)
obj = json.loads(rv.data.decode('utf8'))['data']
self.assertEqual(len(obj), 1)
self.assertIn('/tmp/qis uplo\xe4d f\xefle.jpg', obj)
imgdata = obj['/tmp/qis uplo\xe4d f\xefle.jpg']
self.assertEqual(imgdata['src'], 'test_images/qis uplo\xe4d f\xefle.jpg')
self.assertGreater(imgdata['id'], 0)
# Make sure it works
rv = self.app.get('/image?src=test_images/qis uplo\xe4d f\xefle.jpg')
self.assertEqual(rv.status_code, 200)
db_img = dm.get_image(src='test_images/qis uplo\xe4d f\xefle.jpg')
self.assertIsNotNone(db_img, 'Upload did not create image data')
finally:
# Remove the test files
os.remove(dst_file)
self.delete_image_and_data('test_images/qis uplo\xe4d f\xefle.jpg')
# File uploads expected failures
def test_bad_file_uploads(self):
# Should fail if not logged in
rv = self.app.post('/api/upload/', data={
'files': None,
'path': 'test_images',
'overwrite': '1'
})
self.assertEqual(rv.status_code, 401)
self.login('admin', 'admin')
# Non-image file upload should fail (1)
rv = self.file_upload(self.app, '/etc/hosts', 'test_images')
self.assertEqual(rv.status_code, 400)
# Non-image file upload should fail (2)
src_file = get_abs_path('test_images/cathedral.jpg')
dst_file = '/tmp/qis_uploadfile.doc'
shutil.copy(src_file, dst_file)
try:
rv = self.file_upload(self.app, dst_file, 'test_images')
self.assertEqual(rv.status_code, 415)
finally:
if os.path.exists(dst_file):
os.remove(dst_file)
# Too large a file should fail
src_file = get_abs_path('test_images/cathedral.jpg')
dst_file = '/tmp/qis_uploadfile.jpg'
shutil.copy(src_file, dst_file)
old_MAX_CONTENT_LENGTH = flask_app.config['MAX_CONTENT_LENGTH']
try:
flask_app.config['MAX_CONTENT_LENGTH'] = 100
rv = self.file_upload(self.app, dst_file, 'test_images')
self.assertEqual(rv.status_code, 413)
finally:
flask_app.config['MAX_CONTENT_LENGTH'] = old_MAX_CONTENT_LENGTH
if os.path.exists(dst_file):
os.remove(dst_file)
# Test unicode characters in filenames, especially dashes!
def test_unicode_filenames(self):
temp_dir = '\u00e2 te\u00dft \u2014 of \u00e7har\u0292'
temp_file = os.path.join(temp_dir, temp_dir + '.jpg')
try:
with flask_app.test_request_context():
image_url = internal_url_for('image', src=temp_file)
original_url = internal_url_for('original', src=temp_file)
overlayed_image_url = internal_url_for('image', src='test_images/cathedral.jpg', width=500, overlay_src=temp_file, overlay_size=0.5)
list_url = internal_url_for('browse', path=temp_dir)
details_url = internal_url_for('details', src=temp_file)
fp_admin_url = internal_url_for('admin.folder_permissions', path=temp_dir)
fp_trace_url = internal_url_for('admin.trace_permissions', path=temp_dir)
# Create test folder and file
make_dirs(temp_dir)
copy_file('test_images/thames.jpg', temp_file)
# Test plain image views
rv = self.app.get(image_url)
self.assertEqual(rv.status_code, 200)
rv = self.app.get(original_url)
self.assertEqual(rv.status_code, 200)
# Test image with a unicode overlay name
rv = self.app.get(overlayed_image_url)
self.assertEqual(rv.status_code, 200)
# Test directory listing
self.login('admin', 'admin')
rv = self.app.get(list_url)
self.assertEqual(rv.status_code, 200)
self.assertNotIn('class="error', rv.data.decode('utf8'))
# Test viewing details
rv = self.app.get(details_url)
self.assertEqual(rv.status_code, 200)
self.assertNotIn('class="error', rv.data.decode('utf8'))
# Test folder permission admin
rv = self.app.get(fp_admin_url)
self.assertEqual(rv.status_code, 200)
self.assertNotIn('class="error', rv.data.decode('utf8'))
# Test permissions tracing
rv = self.app.get(fp_trace_url)
self.assertEqual(rv.status_code, 200)
self.assertNotIn('class="error', rv.data.decode('utf8'))
finally:
delete_dir(temp_dir, recursive=True)
# Test that there are no database accesses under optimal conditions
def test_db_accesses(self):
test_image = 'test_images/cathedral.jpg'
sql_info = { 'count': 0, 'last': '' }
# Install an SQL event listener
def on_sql(sql):
sql_info['count'] += 1
sql_info['last'] = sql
dm._add_sql_listener(on_sql)
# Check that the listener works
dm.get_group(Group.ID_PUBLIC)
assert sql_info['count'] == 1
# Clear out the image caches and permissions caches
im.reset_image(ImageAttrs(test_image))
delete_image_ids()
pm.reset_folder_permissions()
# Viewing an image will trigger SQL for the image record and folder permissions reads
rv = self.app.get('/image?src=' + test_image)
assert rv.status_code == 200
last_sql_count = sql_info['count']
assert last_sql_count > 1
# Viewing it again should use cached data with no SQL
rv = self.app.get('/image?src=' + test_image)
assert rv.status_code == 200
assert sql_info['count'] == last_sql_count, 'Unexpected SQL: ' + sql_info['last']
# Viewing a smaller version of the same thing
rv = self.app.get('/image?src=' + test_image + '&width=200')
assert rv.status_code == 200
# We expect SQL:
# 1) Cache miss looking for an exact cached version (issues a delete)
# 2) Cache search looking for a base image to resize (issues a select)
# 3) Cache addition of the resized version (issues a select then an insert)
EXPECT_SQL = 4
assert sql_info['count'] == last_sql_count + EXPECT_SQL
last_sql_count = sql_info['count']
# Viewing that again should use cached data with no SQL
rv = self.app.get('/image?src=' + test_image + '&width=200')
assert rv.status_code == 200
assert sql_info['count'] == last_sql_count, 'Unexpected SQL: ' + sql_info['last']
# Test folder permission hierarchy / inheritance
def test_folder_permissions_hierarchy(self):
tempfile = '/rootfile.jpg'
try:
# Reset the default permission to None
set_default_public_permission(FolderPermission.ACCESS_NONE)
set_default_internal_permission(FolderPermission.ACCESS_NONE)
# test_images should not be viewable
rv = self.app.get('/image?src=test_images/cathedral.jpg')
assert rv.status_code == API_CODES.UNAUTHORISED
# Set a user's group to allow view for root folder
setup_user_account('kryten', 'none')
db_group = dm.get_group(groupname='kryten-group')
db_folder = dm.get_folder(folder_path='')
dm.save_object(FolderPermission(db_folder, db_group, FolderPermission.ACCESS_VIEW))
pm.reset_folder_permissions()
# Log in, test_images should be viewable now
self.login('kryten', 'kryten')
rv = self.app.get('/image?src=test_images/cathedral.jpg')
assert rv.status_code == 200
# But download should be denied
rv = self.app.get('/original?src=test_images/cathedral.jpg')
assert rv.status_code == API_CODES.UNAUTHORISED
# Update test group permission to allow download for test_images folder
db_folder = dm.get_folder(folder_path='test_images')
dm.save_object(FolderPermission(db_folder, db_group, FolderPermission.ACCESS_DOWNLOAD))
pm.reset_folder_permissions()
# Download should be denied for root, but now allowed for test_images
copy_file('test_images/cathedral.jpg', tempfile)
rv = self.app.get('/original?src=' + tempfile)
assert rv.status_code == API_CODES.UNAUTHORISED
rv = self.app.get('/original?src=test_images/cathedral.jpg')
assert rv.status_code == 200
# For theoretical new sub-folders, /newfolder should now allow view
# and /test_images/newfolder should allow download. This test is
# for the upload page, which has to create - e.g. the daily uploads
# folder - and so needs to calculate the permissions in advance.
with self.app as this_session:
this_session.get('/')
assert pm.calculate_folder_permissions('/newfolder',
get_session_user(), folder_must_exist=False) == FolderPermission.ACCESS_VIEW
assert pm.calculate_folder_permissions('/test_images/newfolder',
get_session_user(), folder_must_exist=False) == FolderPermission.ACCESS_DOWNLOAD
# Log out, test_image should not be viewable again
self.logout()
rv = self.app.get('/image?src=test_images/cathedral.jpg')
assert rv.status_code == API_CODES.UNAUTHORISED
rv = self.app.get('/original?src=test_images/cathedral.jpg')
assert rv.status_code == API_CODES.UNAUTHORISED
# Set the default public permission to View
set_default_public_permission(FolderPermission.ACCESS_VIEW)
# test_image should be viewable now
rv = self.app.get('/image?src=test_images/cathedral.jpg')
assert rv.status_code == 200
finally:
delete_file(tempfile)
set_default_public_permission(FolderPermission.ACCESS_DOWNLOAD)
set_default_internal_permission(FolderPermission.ACCESS_DOWNLOAD)
# Test image and page access (folder permissions)
def test_folder_permissions(self):
temp_file = '/tmp/qis_uploadfile.jpg'
temp_image_path = 'test_images/qis_uploadfile.jpg'
try:
# 1 Folder browse page requires view permission
# 2 Image details page requires view permission
# 3 Image view requires view permission
# 4 Image download requires download permission
# 5 Image edit page required edit permission
# 6 Image upload requires upload permission
def test_pages(expect_pass):
rv = self.app.get('/list/') #1
assert rv.status_code == 200
assert ('test_images</a>' in rv.data.decode('utf8')) if expect_pass[0] else ('permission is required' in rv.data.decode('utf8'))
rv = self.app.get('/details/?src=test_images/cathedral.jpg') #2
assert rv.status_code == 200
assert ('Image width' in rv.data.decode('utf8')) if expect_pass[1] else ('permission is required' in rv.data.decode('utf8'))
rv = self.app.get('/image?src=test_images/cathedral.jpg') #3
assert (rv.status_code == 200) if expect_pass[2] else (rv.status_code == 403)
rv = self.app.get('/original?src=test_images/cathedral.jpg') #4
assert (rv.status_code == 200) if expect_pass[3] else (rv.status_code == 403)
rv = self.app.get('/edit/?src=test_images/cathedral.jpg') #5
assert rv.status_code == 200
assert ('Title:' in rv.data.decode('utf8')) if expect_pass[4] else ('permission is required' in rv.data.decode('utf8'))
rv = self.file_upload(self.app, temp_file, 'test_images') #6
assert rv.status_code == 200 if expect_pass[5] else rv.status_code != 200
# Create temp file for uploads
src_file = get_abs_path('test_images/cathedral.jpg')
shutil.copy(src_file, temp_file)
# Reset the default permission to None
set_default_public_permission(FolderPermission.ACCESS_NONE)
set_default_internal_permission(FolderPermission.ACCESS_NONE)
# Create test user with no permission overrides, log in
setup_user_account('kryten', 'none')
self.login('kryten', 'kryten')
db_group = dm.get_group(groupname='kryten-group')
db_folder = dm.get_folder(folder_path='')
db_test_folder = dm.get_folder(folder_path='test_images')
# Run numbered tests - first with no permission
fp = FolderPermission(db_folder, db_group, FolderPermission.ACCESS_NONE)
fp = dm.save_object(fp, refresh=True)
pm.reset_folder_permissions()
test_pages((False, False, False, False, False, False))
# Also test permission tracing (ATPT)
with self.app as this_session:
this_session.get('/')
ptrace = pm._trace_folder_permissions(db_test_folder, get_session_user(), check_consistency=True)
assert ptrace['access'] == FolderPermission.ACCESS_NONE, 'Trace is ' + _trace_to_str(ptrace)
# With view permission
fp.access = FolderPermission.ACCESS_VIEW
dm.save_object(fp)
pm.reset_folder_permissions()
test_pages((True, True, True, False, False, False))
# ATPT
with self.app as this_session:
this_session.get('/')
ptrace = pm._trace_folder_permissions(db_test_folder, get_session_user(), check_consistency=True)
assert ptrace['access'] == FolderPermission.ACCESS_VIEW, 'Trace is ' + _trace_to_str(ptrace)
# With download permission
fp.access = FolderPermission.ACCESS_DOWNLOAD
dm.save_object(fp)
pm.reset_folder_permissions()
test_pages((True, True, True, True, False, False))
# ATPT
with self.app as this_session:
this_session.get('/')
ptrace = pm._trace_folder_permissions(db_test_folder, get_session_user(), check_consistency=True)
assert ptrace['access'] == FolderPermission.ACCESS_DOWNLOAD, 'Trace is ' + _trace_to_str(ptrace)
# With edit permission
fp.access = FolderPermission.ACCESS_EDIT
dm.save_object(fp)
pm.reset_folder_permissions()
test_pages((True, True, True, True, True, False))
# ATPT
with self.app as this_session:
this_session.get('/')
ptrace = pm._trace_folder_permissions(db_test_folder, get_session_user(), check_consistency=True)
assert ptrace['access'] == FolderPermission.ACCESS_EDIT, 'Trace is ' + _trace_to_str(ptrace)
# With upload permission
fp.access = FolderPermission.ACCESS_UPLOAD
dm.save_object(fp)
pm.reset_folder_permissions()
test_pages((True, True, True, True, True, True))
# ATPT
with self.app as this_session:
this_session.get('/')
ptrace = pm._trace_folder_permissions(db_test_folder, get_session_user(), check_consistency=True)
assert ptrace['access'] == FolderPermission.ACCESS_UPLOAD, 'Trace is ' + _trace_to_str(ptrace)
# Check the permissions trace updates when admin permission is granted
setup_user_account('kryten', 'admin_files')
self.login('kryten', 'kryten')
with self.app as this_session:
this_session.get('/')
ptrace = pm._trace_folder_permissions(db_test_folder, get_session_user(), check_consistency=True)
assert ptrace['access'] == FolderPermission.ACCESS_ALL, 'Trace is ' + _trace_to_str(ptrace)
finally:
# Delete temp file and uploaded file
if os.path.exists(temp_file): os.remove(temp_file)
self.delete_image_and_data(temp_image_path)
set_default_public_permission(FolderPermission.ACCESS_DOWNLOAD)
set_default_internal_permission(FolderPermission.ACCESS_DOWNLOAD)
# Test that overlay obeys the permissions rules
def test_overlay_permissions(self):
ov_folder = 'test_overlays/'
ov_path = ov_folder + 'overlay.png'
try:
# Create an overlay folder and image
make_dirs(ov_folder)
copy_file('test_images/quru110.png', ov_path)
# Set the folder permissions to deny view on overlay folder
db_ov_folder = auto_sync_folder(ov_folder, dm, tm, False)
db_group = dm.get_group(Group.ID_PUBLIC)
dm.save_object(FolderPermission(db_ov_folder, db_group, FolderPermission.ACCESS_NONE))
# Check we can view our test image and NOT the overlay image
rv = self.app.get('/image?src=test_images/cathedral.jpg')
assert rv.status_code == 200
rv = self.app.get('/image?src=' + ov_path)
assert rv.status_code == 403
# Now see if we can view the overlay inside the test image (hopefully not)
rv = self.app.get('/image?src=test_images/cathedral.jpg&overlay=' + ov_path)
assert rv.status_code == 403
finally:
delete_dir(ov_folder, True)
# Test that the browser cache expiry headers work
def test_caching_expiry_settings(self):
try:
# Utility to update the default template
db_def_temp = dm.get_image_template(tempname='Default')
def set_default_expiry(value):
db_def_temp.template['expiry_secs']['value'] = value
dm.save_object(db_def_temp)
im.reset_templates()
# This should work with both 'image' and 'original'
for api in ['image', 'original']:
img_url = '/' + api + '?src=test_images/dorset.jpg&width=800'
set_default_expiry(-1)
img = self.app.get(img_url)
self.assertEqual(img.headers.get('Expires'), http_date(0))
self.assertIn(img.headers.get('Cache-Control'), ['no-cache, public', 'public, no-cache'])
set_default_expiry(0)
img = self.app.get(img_url)
self.assertIsNone(img.headers.get('Expires'))
self.assertIsNone(img.headers.get('Cache-Control'))
set_default_expiry(60)
img = self.app.get(img_url)
self.assertEqual(img.headers.get('Expires'), http_date(int(time.time() + 60)))
self.assertIn(img.headers.get('Cache-Control'), ['public, max-age=60', 'max-age=60, public'])
finally:
reset_default_image_template()
# Test that the browser cache validation headers work
def test_etags(self):
# Check that client caching is enabled
self.assertGreater(im.get_default_template().expiry_secs(), 0)
# Setup
img_url = '/image?src=test_images/dorset.jpg&width=440&angle=90&top=0.2&tile=3:4'
rv = self.app.get(img_url)
assert rv.headers['X-From-Cache'] == 'False'
assert rv.headers.get('ETag') is not None
etag = rv.headers.get('ETag')
# Etag should stay the same for the same cached image
rv = self.app.get(img_url)
assert rv.headers['X-From-Cache'] == 'True'
assert rv.headers.get('ETag') == etag
# Etag should be updated when the image is re-generated
im.reset_image(ImageAttrs('test_images/dorset.jpg'))
rv = self.app.get(img_url)
assert rv.headers['X-From-Cache'] == 'False'
assert rv.headers.get('ETag') is not None
new_etag = rv.headers.get('ETag')
assert new_etag != etag
# Etag should stay the same for the same cached image
rv = self.app.get(img_url)
assert rv.headers['X-From-Cache'] == 'True'
assert rv.headers.get('ETag') == new_etag
# Test the ETag header behaves as it should
def test_etag_variations(self):
rv = self.app.get('/image?src=test_images/thames.jpg&width=800')
assert rv.headers.get('ETag') is not None
etag_800 = rv.headers.get('ETag')
# Test same image
rv = self.app.get('/image?src=test_images/thames.jpg&width=800')
assert rv.headers.get('ETag') == etag_800
# Test equivalent image
rv = self.app.get('/image?src=test_images/thames.jpg&width=800&angle=360&format=jpg&left=0&right=1')
assert rv.headers.get('ETag') == etag_800
# Test slightly different image
rv = self.app.get('/image?src=test_images/thames.jpg&width=810')
assert rv.headers.get('ETag') != etag_800
etag_810 = rv.headers.get('ETag')
rv = self.app.get('/image?src=test_images/thames.jpg')
assert rv.headers.get('ETag') != etag_800
assert rv.headers.get('ETag') != etag_810
etag_thames = rv.headers.get('ETag')
# Test very different image
rv = self.app.get('/image?src=test_images/cathedral.jpg')
assert rv.headers.get('ETag') != etag_800
assert rv.headers.get('ETag') != etag_810
assert rv.headers.get('ETag') != etag_thames
# Test that browser caching still works when server side caching is off
def test_no_server_caching_etags(self):
# Check the expected default expires header value
self.assertEqual(im.get_default_template().expiry_secs(), 604800)
# Setup
setup_user_account('kryten', 'none')
self.login('kryten', 'kryten') # Login to allow cache=0
img_url = '/image?src=test_images/dorset.jpg&width=250&cache=0'
rv = self.app.get(img_url)
self.assertEqual(rv.headers['X-From-Cache'], 'False')
self.assertIsNotNone(rv.headers.get('ETag'))
self.assertEqual(rv.headers.get('Expires'), http_date(int(time.time() + 604800)))
self.assertIn(rv.headers.get('Cache-Control'), ['public, max-age=604800', 'max-age=604800, public'])
etag = rv.headers.get('ETag')
# Etag should stay the same for the same re-generated image
rv = self.app.get(img_url)
self.assertEqual(rv.headers['X-From-Cache'], 'False')
self.assertEqual(rv.headers.get('Expires'), http_date(int(time.time() + 604800)))
self.assertIn(rv.headers.get('Cache-Control'), ['public, max-age=604800', 'max-age=604800, public'])
self.assertEqual(rv.headers.get('ETag'), etag)
# Test that etags are removed when client side caching is off
def test_no_client_caching_etags(self):
try:
# Turn off client side caching in the default template
db_def_temp = dm.get_image_template(tempname='Default')
db_def_temp.template['expiry_secs']['value'] = -1
dm.save_object(db_def_temp)
im.reset_templates()
# Run test
for api in ['image', 'original']:
img_url = '/' + api + '?src=test_images/dorset.jpg'
img = self.app.get(img_url)
self.assertIsNone(img.headers.get('ETag'))
finally:
reset_default_image_template()
# Test that ETags are all different for different images with the
# same parameters and for the same images with different parameters
def test_etag_collisions(self):
# Check that client caching is enabled
self.assertGreater(im.get_default_template().expiry_secs(), 0)
# Generate a suite of URLs
url_list = [
'/image?src=test_images/dorset.jpg',
'/image?src=test_images/cathedral.jpg',
'/image?src=test_images/blue bells.jpg',
'/image?src=test_images/quru470.png',
]
url_list2 = []
for url in url_list:
url_list2.append(url)
url_list2.append(url + '&page=2')
url_list2.append(url + '&page=2&width=200')
url_list2.append(url + '&page=2&width=200&flip=h')
url_list2.append(url + '&width=200&height=200')
url_list2.append(url + '&width=200&height=200&fill=red')
etags_list = []
# Request all the URLs
for url in url_list2:
rv = self.app.get(url)
assert rv.status_code == 200
assert rv.headers.get('ETag') is not None
etags_list.append(rv.headers['ETag'])
# There should be no dupes
assert len(etags_list) == len(set(etags_list))
# Test that clients with an up-to-date cached image don't have to download it again
def test_304_Not_Modified(self):
# Check the expected default expires header value
self.assertEqual(im.get_default_template().expiry_secs(), 604800)
# This should work with both 'image' and 'original'
for api in ['image', 'original']:
# Setup
img_url = '/' + api + '?src=test_images/dorset.jpg&width=440&angle=90&top=0.2&tile=3:4'
rv = self.app.get(img_url)
self.assertEqual(rv.status_code, 200)
etag = rv.headers.get('ETag')
# Client sending an Etag should get a 304 Not Modified if the Etag is still valid
rv = self.app.get(img_url, headers={
'If-None-Match': etag
})
self.assertEqual(rv.status_code, 304)
# http://stackoverflow.com/a/4393499/1671320
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.5
self.assertEqual(rv.headers.get('ETag'), etag)
self.assertIsNotNone(rv.headers.get('Date'))
self.assertIn(
rv.headers.get('Expires'), [
http_date(int(time.time() + 604800 - 1)), # If time() has wrapped to the next second
http_date(int(time.time() + 604800)) # Expected
]
)
self.assertIn(rv.headers.get('Cache-Control'), ['public, max-age=604800', 'max-age=604800, public'])
# Flask bug? Content type gets here but is correctly absent outside of unit tests
# self.assertIsNone(rv.headers.get('Content-Type'))
self.assertIsNone(rv.headers.get('Content-Length'))
self.assertIsNone(rv.headers.get('X-From-Cache'))
self.assertIsNone(rv.headers.get('Content-Disposition'))
self.assertEqual(rv.data, b'')
# Now reset the image
if api == 'image':
im.reset_image(ImageAttrs('test_images/dorset.jpg'))
else:
os.utime(get_abs_path('test_images/dorset.jpg'), None) # Touch
# Client should get a new image and Etag when the old one is no longer valid
rv = self.app.get(img_url, headers={
'If-None-Match': etag
})
self.assertEqual(rv.status_code, 200)
self.assertNotEqual(rv.headers.get('ETag'), etag)
self.assertGreater(len(rv.data), 0)
# #2668 Make sure things work properly behind a reverse proxy / load balancer
def test_proxy_server(self):
from imageserver.flask_ext import add_proxy_server_support
# Set standard settings
flask_app.config['PROXY_SERVERS'] = 0
flask_app.config['INTERNAL_BROWSING_SSL'] = True
flask_app.config['SESSION_COOKIE_SECURE'] = True
# As standard, expect the X-Forwarded-For and X-Forwarded-Proto headers to be ignored
rv = self.app.get('/login/', headers={'X-Forwarded-Proto': 'https'})
# Should be redirecting us to HTTPS
self.assertEqual(rv.status_code, 302)
self.assertIn('https://', rv.data.decode('utf8'))
# Should log localhost as the IP
with mock.patch('imageserver.flask_app.logger.error') as mocklog:
self.app.get(
'/image?src=../../../notallowed',
headers={'X-Forwarded-For': '1.2.3.4'},
environ_base={'REMOTE_ADDR': '127.0.0.1'}
)
mocklog.assert_called_once_with(mock.ANY)
self.assertIn('IP 127.0.0.1', mocklog.call_args[0][0])
# With proxy support enabled, expect the headers to be respected
flask_app.config['PROXY_SERVERS'] = 1
add_proxy_server_support(flask_app, flask_app.config['PROXY_SERVERS'])
rv = self.app.get('/login/', headers={'X-Forwarded-Proto': 'https'})
# Should now just serve the login page
self.assertEqual(rv.status_code, 200)
# Should now log 1.2.3.4 as the IP
with mock.patch('imageserver.flask_app.logger.error') as mocklog:
self.app.get(
'/image?src=../../../notallowed',
headers={'X-Forwarded-For': '1.2.3.4'},
environ_base={'REMOTE_ADDR': '127.0.0.1'}
)
mocklog.assert_called_once_with(mock.ANY)
self.assertIn('IP 1.2.3.4', mocklog.call_args[0][0])
# #2799 User names should be case insensitive
def test_username_case(self):
try:
# Get 2 identical user objects, but with username in different case
newuser = User(
'Jango', 'Fett', 'jango@bountyhunters.info', 'jangofett', 'Tipoca',
User.AUTH_TYPE_PASSWORD, False, User.STATUS_ACTIVE
)
cloneuser = User(
'Jango', 'Fett', 'jango@bountyhunters.info', 'JangoFett', 'Tipoca',
User.AUTH_TYPE_PASSWORD, False, User.STATUS_ACTIVE
)
# Create the new user
dm.create_user(newuser)
# We should be able to read this back with username in any case
u = dm.get_user(username='jangofett')
self.assertIsNotNone(u)
u = dm.get_user(username='JangoFett')
self.assertIsNotNone(u)
# Creating the clone user should fail
self.assertRaises(AlreadyExistsError, dm.create_user, cloneuser)
finally:
# Tidy up
u = dm.get_user(username='jangofett')
if u: dm.delete_object(u)
u2 = dm.get_user(username='JangoFett')
if u2: dm.delete_object(u2)
# v4.1 #11 Make an attempt to filter out secrets from error messages
def test_error_message_redaction(self):
import imageserver.views_util
imageserver.views_util._safe_error_str_replacements = None
# The XREF setting is just one thing that could be sensitive, should not appear in errors
flask_app.config['XREF_TRACKING_URL'] = 'https://my.internal.service/'
dummy_error = 'Failed to invoke URL ' + flask_app.config['XREF_TRACKING_URL']
for api in ['/image', '/original']:
with mock.patch('imageserver.views.invoke_http_async') as mockhttp:
mockhttp.side_effect = Exception(dummy_error)
rv = self.app.get(api + '?src=test_images/cathedral.jpg&xref=1')
self.assertEqual(rv.status_code, 500)
err_msg = rv.data.decode('utf8')
self.assertIn('Failed to invoke URL', err_msg)
# The setting value should have been replaced with the setting name
self.assertNotIn(flask_app.config['XREF_TRACKING_URL'], err_msg)
self.assertIn('XREF_TRACKING_URL', err_msg)
class ImageServerCacheTests(BaseTestCase):
# Test basic cache
def test_cache_engine_raw(self):
ret = cm.raw_get('knight')
self.assertIsNone(ret, 'Test object already in cache - reset cache and re-run tests')
ret = cm.raw_put('knight', ImageAttrs('round/table.jpg', 1000))
self.assertTrue(ret)
ret = cm.raw_get('knight')
self.assertIsNotNone(ret)
self.assertIsInstance(ret, ImageAttrs)
self.assertEqual(ret.filename(), 'round/table.jpg')
self.assertEqual(ret.database_id(), 1000)
ret = cm.raw_delete('knight')
self.assertTrue(ret)
ret = cm.raw_get('knight')
self.assertIsNone(ret)
# Test managed cache
def test_cache_engine(self):
ret = cm.get('grail')
self.assertIsNone(ret, 'Test object already in cache - reset cache and re-run tests')
ok = cm.put('grail', 'the knights who say Ni', 0, {
'searchfield1': -1, 'searchfield2': 100, 'searchfield3': 100,
'searchfield4': None, 'searchfield5': None, 'metadata': 'Rockery'
})
self.assertTrue(ok)
ret = cm.get('grail')
self.assertIsNotNone(ret, 'Failed to retrieve object from cache')
self.assertEqual(ret, 'the knights who say Ni', 'Wrong object retrieved from cache')
# Add something else to filter out from the search
ok = cm.put('elderberry', 'Go away', 0, {
'searchfield1': -1, 'searchfield2': 50, 'searchfield3': 100,
'searchfield4': None, 'searchfield5': None, 'metadata': 'Hamsters'
})
self.assertTrue(ok)
# Test search
ret = cm.search(order=None, max_rows=1, searchfield1__eq=-1, searchfield2__gt=99, searchfield3__lt=101)
self.assertEqual(len(ret), 1, 'Failed to search cache')
result = ret[0]
self.assertEqual(result['key'], 'grail', 'Wrong key from cache search')
self.assertEqual(result['metadata'], 'Rockery', 'Wrong metadata from cache search')
ok = cm.delete('grail')
self.assertTrue(ok)
ret = cm.get('grail')
self.assertIsNone(ret, 'Failed to delete object from cache')
# Test no one has tinkered incorrectly with the caching slot allocation code
def test_cache_slot_headers(self):
from imageserver.cache_manager import SLOT_HEADER_SIZE
from imageserver.cache_manager import MAX_OBJECT_SLOTS
header1 = cm._get_slot_header(1, True)
self.assertEqual(len(header1), SLOT_HEADER_SIZE)
header2 = cm._get_slot_header(MAX_OBJECT_SLOTS, True)
self.assertEqual(len(header2), SLOT_HEADER_SIZE)
class UtilityTests(unittest.TestCase):
def test_image_attrs_serialisation(self):
ia = ImageAttrs('some/path', -1, page=2, iformat='gif', template='smalljpeg',
width=2000, height=1000, size_fit=False,
fill='black', colorspace='rgb', strip=False,
tile_spec=(3, 16))
ia_dict = ia.to_dict()
self.assertEqual(ia_dict['template'], 'smalljpeg')
self.assertEqual(ia_dict['fill'], 'black')
self.assertEqual(ia_dict['page'], 2)
self.assertEqual(ia_dict['size_fit'], False)
self.assertEqual(ia_dict['format'], 'gif')
self.assertEqual(ia_dict['colorspace'], 'rgb')
self.assertEqual(ia_dict['width'], 2000)
self.assertEqual(ia_dict['height'], 1000)
self.assertEqual(ia_dict['tile'], (3, 16))
rev = ImageAttrs.from_dict(ia_dict)
rev_dict = rev.to_dict()
self.assertEqual(ia_dict, rev_dict)
def test_image_attrs_bad_serialisation(self):
bad_dict = {
'filename': 'some/path',
'format': 'potato'
}
self.assertRaises(ValueError, ImageAttrs.from_dict, bad_dict)
bad_dict = {
'filename': 'some/path',
'width': '-1'
}
self.assertRaises(ValueError, ImageAttrs.from_dict, bad_dict)
bad_dict = {
'filename': ''
}
self.assertRaises(ValueError, ImageAttrs.from_dict, bad_dict)
def test_template_attrs_serialisation(self):
# Test the standard constructor
ta = TemplateAttrs('abcdef', {
'width': {'value': 2000},
'height': {'value': 1000},
'tile': {'value': (3, 16)},
'expiry_secs': {'value': 50000},
'record_stats': {'value': True}
})
self.assertEqual(ta.name(), 'abcdef')
self.assertEqual(ta.get_image_attrs().filename(), 'abcdef')
self.assertEqual(ta.get_image_attrs().tile_spec(), (3, 16))
self.assertEqual(ta.expiry_secs(), 50000)
self.assertEqual(ta.record_stats(), True)
self.assertIsNone(ta.attachment())
# Test values dict
ta_dict = ta.get_values_dict()
self.assertEqual(ta_dict['filename'], 'abcdef')
self.assertEqual(ta_dict['width'], 2000)
self.assertEqual(ta_dict['height'], 1000)
self.assertEqual(ta_dict['tile'], (3, 16))
self.assertEqual(ta_dict['expiry_secs'], 50000)
self.assertEqual(ta_dict['record_stats'], True)
def test_template_attrs_bad_serialisation(self):
# Old dict format
bad_dict = {
'filename': 'some/path',
'expiry_secs': 50000
}
self.assertRaises(ValueError, TemplateAttrs, 'badtemplate', bad_dict)
# New dict format, bad values
bad_dict = {
'filename': {'value': 'some/path'},
'expiry_secs': {'value': -2}
}
self.assertRaises(ValueError, TemplateAttrs, 'badtemplate', bad_dict)
bad_dict = {
'filename': {'value': 'some/path'},
'expiry_secs': {'value': 'not an int'}
}
self.assertRaises(ValueError, TemplateAttrs, 'badtemplate', bad_dict)
bad_dict = {
'filename': {'value': 'some/path'},
'expiry_secs': {'value': 50000},
'record_stats': {'value': 'not a bool'}
}
self.assertRaises(ValueError, TemplateAttrs, 'badtemplate', bad_dict)
|
quru/qis
|
src/tests/tests.py
|
Python
|
agpl-3.0
| 123,814
|
[
"Galaxy"
] |
b6e2346384c48c190053fd74d6353e68bc958083c991d568b803c137835d9505
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RAgdex(RPackage):
"""A tool to evaluate agreement of differential expression
for cross-species genomics."""
homepage = "http://bioconductor.org/packages/AGDEX/"
url = "https://git.bioconductor.org/packages/AGDEX"
version('1.24.0', git='https://git.bioconductor.org/packages/AGDEX', commit='29c6bcfa6919a5c6d8bcb36b44e75145a60ce7b5')
depends_on('r@3.4.0:3.4.9', when='@1.24.0')
depends_on('r-biobase', type=('build', 'run'))
depends_on('r-gseabase', type=('build', 'run'))
|
lgarren/spack
|
var/spack/repos/builtin/packages/r-agdex/package.py
|
Python
|
lgpl-2.1
| 1,774
|
[
"Bioconductor"
] |
d005eeb93d1a2c9468e5a21a9b8e79ce67ad182f93c8e87d2df669bb6656e1f6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.