text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
import json
import os
import unittest
import warnings
from sympy import Number, Symbol
from pymatgen.analysis.surface_analysis import (
NanoscaleStability,
SlabEntry,
SurfaceEnergyPlotter,
WorkFunctionAnalyzer,
)
from pymatgen.analysis.wulff import WulffShape
from pymatgen.entries.computed_entries import ComputedStructureEntry
from pymatgen.util.testing import PymatgenTest
__author__ = "Richard Tran"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Richard Tran"
__email__ = "rit001@eng.ucsd.edu"
__date__ = "Aug 24, 2017"
def get_path(path_str):
cwd = os.path.abspath(os.path.dirname(__file__))
path = os.path.join(cwd, "..", "..", "..", "test_files", "surface_tests", path_str)
return path
class SlabEntryTest(PymatgenTest):
def setUp(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
with open(os.path.join(get_path(""), "ucell_entries.txt")) as ucell_entries:
ucell_entries = json.loads(ucell_entries.read())
self.ucell_entries = ucell_entries
# Load objects for O adsorption tests
self.metals_O_entry_dict = load_O_adsorption()
# Load objects for Cu test
self.Cu_entry_dict = get_entry_dict(os.path.join(get_path(""), "Cu_entries.txt"))
self.assertEqual(len(self.Cu_entry_dict.keys()), 13)
self.Cu_ucell_entry = ComputedStructureEntry.from_dict(self.ucell_entries["Cu"])
# Load dummy MgO slab entries
self.MgO_ucell_entry = ComputedStructureEntry.from_dict(self.ucell_entries["MgO"])
self.Mg_ucell_entry = ComputedStructureEntry.from_dict(self.ucell_entries["Mg"])
self.MgO_slab_entry_dict = get_entry_dict(os.path.join(get_path(""), "MgO_slab_entries.txt"))
def test_properties(self):
# Test cases for getting adsorption related quantities for a 1/4
# monolalyer adsorption of O on the low MMI surfaces of Pt, Ni and Rh
for el in self.metals_O_entry_dict.keys():
el_ucell = ComputedStructureEntry.from_dict(self.ucell_entries[el])
for hkl in self.metals_O_entry_dict[el].keys():
for clean in self.metals_O_entry_dict[el][hkl].keys():
for ads in self.metals_O_entry_dict[el][hkl][clean]:
ml = ads.get_unit_primitive_area
self.assertAlmostEqual(ml, 4, 2)
self.assertAlmostEqual(ads.get_monolayer, 1 / 4, 2)
Nads = ads.Nads_in_slab
self.assertEqual(Nads, 1)
self.assertEqual(ads.Nsurfs_ads_in_slab, 1)
# Determine the correct binding energy
with open(os.path.join(get_path(""), "isolated_O_entry.txt")) as isolated_O_entry:
isolated_O_entry = json.loads(isolated_O_entry.read())
O = ComputedStructureEntry.from_dict(isolated_O_entry)
gbind = (ads.energy - ml * clean.energy) / Nads - O.energy_per_atom
self.assertEqual(gbind, ads.gibbs_binding_energy())
# Determine the correction Gibbs adsorption energy
eads = Nads * gbind
self.assertEqual(eads, ads.gibbs_binding_energy(eads=True))
se = ads.surface_energy(el_ucell)
self.assertAlmostEqual(
se.as_coefficients_dict()[Symbol("delu_O")],
(-1 / 2) * ads.surface_area ** (-1),
)
def test_create_slab_label(self):
for el in self.metals_O_entry_dict.keys():
for hkl in self.metals_O_entry_dict[el].keys():
# Test WulffShape for adsorbed surfaces
for clean in self.metals_O_entry_dict[el][hkl]:
label = clean.create_slab_label
comp = str(clean.composition.reduced_composition)
self.assertEqual(str(hkl) + " %s" % (comp), label)
for ads in self.metals_O_entry_dict[el][hkl][clean]:
label = ads.create_slab_label
self.assertEqual(label, str(hkl) + " %s+O, 0.250 ML" % (comp))
def test_surface_energy(self):
# For a nonstoichiometric case, the cheimcal potentials do not
# cancel out, they serve as a reservoir for any missing atoms
for slab_entry in self.MgO_slab_entry_dict[(1, 1, 1)].keys():
se = slab_entry.surface_energy(self.MgO_ucell_entry, ref_entries=[self.Mg_ucell_entry])
self.assertEqual(tuple(se.as_coefficients_dict().keys()), (Number(1), Symbol("delu_Mg")))
# For the case of a clean, stoichiometric slab, the surface energy
# should be constant (i.e. surface energy is a constant).
all_se = []
ECu = self.Cu_ucell_entry.energy_per_atom
for hkl in self.Cu_entry_dict.keys():
slab_entry = list(self.Cu_entry_dict[hkl].keys())[0]
se = slab_entry.surface_energy(self.Cu_ucell_entry)
all_se.append(se)
# Manually calculate surface energy
manual_se = (slab_entry.energy - ECu * len(slab_entry.structure)) / (2 * slab_entry.surface_area)
self.assertArrayAlmostEqual(float(se), manual_se, 10)
# The (111) facet should be the most stable
clean111_entry = list(self.Cu_entry_dict[(1, 1, 1)].keys())[0]
se_Cu111 = clean111_entry.surface_energy(self.Cu_ucell_entry)
self.assertEqual(min(all_se), se_Cu111)
def test_cleaned_up_slab(self):
# The cleaned up slab should have the same reduced formula as a clean slab
for el in self.metals_O_entry_dict.keys():
for hkl in self.metals_O_entry_dict[el].keys():
for clean in self.metals_O_entry_dict[el][hkl].keys():
for ads in self.metals_O_entry_dict[el][hkl][clean]:
s = ads.cleaned_up_slab
self.assertEqual(
s.composition.reduced_composition,
clean.composition.reduced_composition,
)
class SurfaceEnergyPlotterTest(PymatgenTest):
def setUp(self):
entry_dict = get_entry_dict(os.path.join(get_path(""), "Cu_entries.txt"))
self.Cu_entry_dict = entry_dict
with open(os.path.join(get_path(""), "ucell_entries.txt")) as ucell_entries:
ucell_entries = json.loads(ucell_entries.read())
self.Cu_ucell_entry = ComputedStructureEntry.from_dict(ucell_entries["Cu"])
self.Cu_analyzer = SurfaceEnergyPlotter(entry_dict, self.Cu_ucell_entry)
self.metals_O_entry_dict = load_O_adsorption()
ucell_entry = ComputedStructureEntry.from_dict(ucell_entries["Pt"])
self.Pt_analyzer = SurfaceEnergyPlotter(self.metals_O_entry_dict["Pt"], ucell_entry)
ucell_entry = ComputedStructureEntry.from_dict(ucell_entries["Ni"])
self.Ni_analyzer = SurfaceEnergyPlotter(self.metals_O_entry_dict["Ni"], ucell_entry)
ucell_entry = ComputedStructureEntry.from_dict(ucell_entries["Rh"])
self.Rh_analyzer = SurfaceEnergyPlotter(self.metals_O_entry_dict["Rh"], ucell_entry)
self.Oads_analyzer_dict = {
"Pt": self.Pt_analyzer,
"Ni": self.Ni_analyzer,
"Rh": self.Rh_analyzer,
}
def test_get_stable_entry_at_u(self):
for el in self.Oads_analyzer_dict.keys():
plotter = self.Oads_analyzer_dict[el]
for hkl in plotter.all_slab_entries.keys():
# Test that the surface energy is clean for specific range of chempot
entry1, gamma1 = plotter.get_stable_entry_at_u(hkl, delu_dict={Symbol("delu_O"): -7})
entry2, gamma2 = plotter.get_stable_entry_at_u(hkl, delu_dict={Symbol("delu_O"): -6})
self.assertEqual(gamma1, gamma2)
self.assertEqual(entry1.label, entry2.label)
# Now test that for a high chempot, adsorption
# occurs and gamma is not equal to clean gamma
entry3, gamma3 = plotter.get_stable_entry_at_u(hkl, delu_dict={Symbol("delu_O"): -1})
self.assertNotEqual(entry3.label, entry2.label)
self.assertNotEqual(gamma3, gamma2)
# For any chempot greater than -6, surface energy should vary
# but the configuration should remain the same
entry4, gamma4 = plotter.get_stable_entry_at_u(hkl, delu_dict={Symbol("delu_O"): 0})
self.assertEqual(entry3.label, entry4.label)
self.assertNotEqual(gamma3, gamma4)
def test_wulff_from_chempot(self):
# Test if it generates a Wulff shape, test if
# all the facets for Cu wulff shape are inside.
Cu_wulff = self.Cu_analyzer.wulff_from_chempot()
area_frac_dict = Cu_wulff.area_fraction_dict
facets_hkl = [
(1, 1, 1),
(3, 3, 1),
(3, 1, 0),
(1, 0, 0),
(3, 1, 1),
(2, 1, 0),
(2, 2, 1),
]
for hkl in area_frac_dict.keys():
if hkl in facets_hkl:
self.assertNotEqual(area_frac_dict[hkl], 0)
else:
self.assertEqual(area_frac_dict[hkl], 0)
for el in self.Oads_analyzer_dict.keys():
# Test WulffShape for adsorbed surfaces
analyzer = self.Oads_analyzer_dict[el]
# chempot = analyzer.max_adsorption_chempot_range(0)
wulff = analyzer.wulff_from_chempot(delu_default=-6)
se = wulff.weighted_surface_energy
# Test if a different Wulff shape is generated
# for Ni when adsorption comes into play
wulff_neg7 = self.Oads_analyzer_dict["Ni"].wulff_from_chempot(delu_default=-7)
wulff_neg6 = self.Oads_analyzer_dict["Ni"].wulff_from_chempot(delu_default=-6)
self.assertEqual(wulff_neg7.weighted_surface_energy, wulff_neg6.weighted_surface_energy)
wulff_neg55 = self.Oads_analyzer_dict["Ni"].wulff_from_chempot(delu_default=-5.5)
self.assertNotEqual(wulff_neg55.weighted_surface_energy, wulff_neg6.weighted_surface_energy)
wulff_neg525 = self.Oads_analyzer_dict["Ni"].wulff_from_chempot(delu_default=-5.25)
self.assertNotEqual(wulff_neg55.weighted_surface_energy, wulff_neg525.weighted_surface_energy)
def test_color_palette_dict(self):
for el in self.metals_O_entry_dict.keys():
analyzer = self.Oads_analyzer_dict[el]
color_dict = analyzer.color_palette_dict()
for hkl in self.metals_O_entry_dict[el].keys():
for clean in self.metals_O_entry_dict[el][hkl].keys():
color = color_dict[clean]
for ads in self.metals_O_entry_dict[el][hkl][clean]:
color = color_dict[ads]
def test_get_surface_equilibrium(self):
# For clean stoichiometric system, the two equations should
# be parallel because the surface energy is a constant. Then
# get_surface_equilibrium should return None
clean111_entry = list(self.Cu_entry_dict[(1, 1, 1)].keys())[0]
clean100_entry = list(self.Cu_entry_dict[(1, 0, 0)].keys())[0]
soln = self.Cu_analyzer.get_surface_equilibrium([clean111_entry, clean100_entry])
self.assertFalse(soln)
# For adsorbed system, we should find one intercept
Pt_entries = self.metals_O_entry_dict["Pt"]
clean = list(Pt_entries[(1, 1, 1)].keys())[0]
ads = Pt_entries[(1, 1, 1)][clean][0]
Pt_analyzer = self.Oads_analyzer_dict["Pt"]
soln = Pt_analyzer.get_surface_equilibrium([clean, ads])
self.assertNotEqual(list(soln.values())[0], list(soln.values())[1])
# Check if the number of parameters for adsorption are correct
self.assertEqual((Symbol("delu_O"), Symbol("gamma")), tuple(soln.keys()))
# Adsorbed systems have a b2=(-1*Nads) / (Nsurfs * Aads)
se = ads.surface_energy(Pt_analyzer.ucell_entry, Pt_analyzer.ref_entries)
self.assertAlmostEqual(se.as_coefficients_dict()[Symbol("delu_O")], -1 / (2 * ads.surface_area))
def test_stable_u_range_dict(self):
for el in self.Oads_analyzer_dict.keys():
analyzer = self.Oads_analyzer_dict[el]
stable_u_range = analyzer.stable_u_range_dict([-1, 0], Symbol("delu_O"), no_doped=False)
all_u = []
for entry in stable_u_range.keys():
all_u.extend(stable_u_range[entry])
self.assertGreater(len(all_u), 1)
def test_entry_dict_from_list(self):
# Plug in a list of entries to see if it works
all_Pt_slab_entries = []
Pt_entries = self.Pt_analyzer.all_slab_entries
for hkl in Pt_entries.keys():
for clean in Pt_entries[hkl].keys():
all_Pt_slab_entries.append(clean)
all_Pt_slab_entries.extend(Pt_entries[hkl][clean])
a = SurfaceEnergyPlotter(all_Pt_slab_entries, self.Pt_analyzer.ucell_entry)
self.assertEqual(type(a).__name__, "SurfaceEnergyPlotter")
# def test_monolayer_vs_BE(self):
# for el in self.Oads_analyzer_dict.keys():
# # Test WulffShape for adsorbed surfaces
# analyzer = self.Oads_analyzer_dict[el]
# plt = analyzer.monolayer_vs_BE()
#
# def test_area_frac_vs_chempot_plot(self):
#
# for el in self.Oads_analyzer_dict.keys():
# # Test WulffShape for adsorbed surfaces
# analyzer = self.Oads_analyzer_dict[el]
# plt = analyzer.area_frac_vs_chempot_plot(x_is_u_ads=True)
#
# def test_chempot_vs_gamma_clean(self):
#
# plt = self.Cu_analyzer.chempot_vs_gamma_clean()
# for el in self.Oads_analyzer_dict.keys():
# # Test WulffShape for adsorbed surfaces
# analyzer = self.Oads_analyzer_dict[el]
# plt = analyzer.chempot_vs_gamma_clean(x_is_u_ads=True)
#
# def test_chempot_vs_gamma_facet(self):
#
# for el in self.metals_O_entry_dict.keys():
# for hkl in self.metals_O_entry_dict[el].keys():
# # Test WulffShape for adsorbed surfaces
# analyzer = self.Oads_analyzer_dict[el]
# plt = analyzer.chempot_vs_gamma_facet(hkl)
# def test_surface_chempot_range_map(self):
#
# for el in self.metals_O_entry_dict.keys():
# for hkl in self.metals_O_entry_dict[el].keys():
# # Test WulffShape for adsorbed surfaces
# analyzer = self.Oads_analyzer_dict[el]
# plt = analyzer.chempot_vs_gamma_facet(hkl)
class WorkfunctionAnalyzerTest(PymatgenTest):
def setUp(self):
self.kwargs = {
"poscar_filename": get_path("CONTCAR.relax1.gz"),
"locpot_filename": get_path("LOCPOT.gz"),
"outcar_filename": get_path("OUTCAR.relax1.gz"),
}
self.wf_analyzer = WorkFunctionAnalyzer.from_files(**self.kwargs)
def test_shift(self):
wf_analyzer_shift = WorkFunctionAnalyzer.from_files(shift=-0.25, blength=3.7, **self.kwargs)
self.assertEqual(
"%.f" % (self.wf_analyzer.ave_bulk_p),
"%.f" % (wf_analyzer_shift.ave_bulk_p),
)
def test_is_converged(self):
self.assertTrue(self.wf_analyzer.is_converged())
class NanoscaleStabilityTest(PymatgenTest):
def setUp(self):
# Load all entries
La_hcp_entry_dict = get_entry_dict(os.path.join(get_path(""), "La_hcp_entries.txt"))
La_fcc_entry_dict = get_entry_dict(os.path.join(get_path(""), "La_fcc_entries.txt"))
with open(os.path.join(get_path(""), "ucell_entries.txt")) as ucell_entries:
ucell_entries = json.loads(ucell_entries.read())
La_hcp_ucell_entry = ComputedStructureEntry.from_dict(ucell_entries["La_hcp"])
La_fcc_ucell_entry = ComputedStructureEntry.from_dict(ucell_entries["La_fcc"])
# Set up the NanoscaleStabilityClass
self.La_hcp_analyzer = SurfaceEnergyPlotter(La_hcp_entry_dict, La_hcp_ucell_entry)
self.La_fcc_analyzer = SurfaceEnergyPlotter(La_fcc_entry_dict, La_fcc_ucell_entry)
self.nanoscale_stability = NanoscaleStability([self.La_fcc_analyzer, self.La_hcp_analyzer])
def test_stability_at_r(self):
# Check that we have a different polymorph that is
# stable below or above the equilibrium particle size
r = self.nanoscale_stability.solve_equilibrium_point(self.La_hcp_analyzer, self.La_fcc_analyzer) * 10
# hcp phase of La particle should be the stable
# polymorph above the equilibrium radius
hcp_wulff = self.La_hcp_analyzer.wulff_from_chempot()
bulk = self.La_hcp_analyzer.ucell_entry
ghcp, rhcp = self.nanoscale_stability.wulff_gform_and_r(hcp_wulff, bulk, r + 10, from_sphere_area=True)
fcc_wulff = self.La_fcc_analyzer.wulff_from_chempot()
bulk = self.La_fcc_analyzer.ucell_entry
gfcc, rfcc = self.nanoscale_stability.wulff_gform_and_r(fcc_wulff, bulk, r + 10, from_sphere_area=True)
self.assertGreater(gfcc, ghcp)
# fcc phase of La particle should be the stable
# polymorph below the equilibrium radius
hcp_wulff = self.La_hcp_analyzer.wulff_from_chempot()
bulk = self.La_hcp_analyzer.ucell_entry
ghcp, rhcp = self.nanoscale_stability.wulff_gform_and_r(hcp_wulff, bulk, r - 10, from_sphere_area=True)
fcc_wulff = self.La_fcc_analyzer.wulff_from_chempot()
bulk = self.La_fcc_analyzer.ucell_entry
gfcc, rfcc = self.nanoscale_stability.wulff_gform_and_r(fcc_wulff, bulk, r - 10, from_sphere_area=True)
self.assertLess(gfcc, ghcp)
def test_scaled_wulff(self):
# Ensure for a given radius, the effective radius
# of the Wulff shape is the same (correctly scaled)
hcp_wulff = self.La_hcp_analyzer.wulff_from_chempot()
fcc_wulff = self.La_fcc_analyzer.wulff_from_chempot()
w1 = self.nanoscale_stability.scaled_wulff(hcp_wulff, 10)
w2 = self.nanoscale_stability.scaled_wulff(fcc_wulff, 10)
self.assertAlmostEqual(w1.effective_radius, w2.effective_radius)
self.assertAlmostEqual(w1.effective_radius, 10)
self.assertAlmostEqual(10, w2.effective_radius)
def get_entry_dict(filename):
# helper to generate an entry_dict
entry_dict = {}
with open(filename) as entries:
entries = json.loads(entries.read())
for k in entries.keys():
n = k[25:]
miller_index = []
for i, s in enumerate(n):
if s == "_":
break
if s == "-":
continue
t = int(s)
if n[i - 1] == "-":
t *= -1
miller_index.append(t)
hkl = tuple(miller_index)
if hkl not in entry_dict.keys():
entry_dict[hkl] = {}
entry = ComputedStructureEntry.from_dict(entries[k])
entry_dict[hkl][SlabEntry(entry.structure, entry.energy, hkl, label=k)] = []
return entry_dict
def load_O_adsorption():
# Loads the dictionary for clean and O adsorbed Rh, Pt, and Ni entries
# Load the adsorbate as an entry
with open(os.path.join(get_path(""), "isolated_O_entry.txt")) as isolated_O_entry:
isolated_O_entry = json.loads(isolated_O_entry.read())
O = ComputedStructureEntry.from_dict(isolated_O_entry)
# entry_dict for the adsorption case, O adsorption on Ni, Rh and Pt
metals_O_entry_dict = {
"Ni": {(1, 1, 1): {}, (1, 0, 0): {}},
"Pt": {(1, 1, 1): {}},
"Rh": {(1, 0, 0): {}},
}
with open(os.path.join(get_path(""), "csentries_slabs.json")) as entries:
entries = json.loads(entries.read())
for k in entries.keys():
entry = ComputedStructureEntry.from_dict(entries[k])
for el in metals_O_entry_dict.keys():
if el in k:
if "111" in k:
clean = SlabEntry(entry.structure, entry.energy, (1, 1, 1), label=k + "_clean")
metals_O_entry_dict[el][(1, 1, 1)][clean] = []
if "110" in k:
clean = SlabEntry(entry.structure, entry.energy, (1, 1, 0), label=k + "_clean")
metals_O_entry_dict[el][(1, 1, 0)][clean] = []
if "100" in k:
clean = SlabEntry(entry.structure, entry.energy, (1, 0, 0), label=k + "_clean")
metals_O_entry_dict[el][(1, 0, 0)][clean] = []
with open(os.path.join(get_path(""), "csentries_o_ads.json")) as entries:
entries = json.loads(entries.read())
for k in entries.keys():
entry = ComputedStructureEntry.from_dict(entries[k])
for el in metals_O_entry_dict.keys():
if el in k:
if "111" in k:
clean = list(metals_O_entry_dict[el][(1, 1, 1)].keys())[0]
ads = SlabEntry(
entry.structure,
entry.energy,
(1, 1, 1),
label=k + "_O",
adsorbates=[O],
clean_entry=clean,
)
metals_O_entry_dict[el][(1, 1, 1)][clean] = [ads]
if "110" in k:
clean = list(metals_O_entry_dict[el][(1, 1, 0)].keys())[0]
ads = SlabEntry(
entry.structure,
entry.energy,
(1, 1, 0),
label=k + "_O",
adsorbates=[O],
clean_entry=clean,
)
metals_O_entry_dict[el][(1, 1, 0)][clean] = [ads]
if "100" in k:
clean = list(metals_O_entry_dict[el][(1, 0, 0)].keys())[0]
ads = SlabEntry(
entry.structure,
entry.energy,
(1, 0, 0),
label=k + "_O",
adsorbates=[O],
clean_entry=clean,
)
metals_O_entry_dict[el][(1, 0, 0)][clean] = [ads]
return metals_O_entry_dict
if __name__ == "__main__":
unittest.main()
|
gmatteo/pymatgen
|
pymatgen/analysis/tests/test_surface_analysis.py
|
Python
|
mit
| 22,561
|
[
"pymatgen"
] |
2c02bf7fa5c170338812b9eb9da8297495b8997b7a7dae561f8fce72407a3b4a
|
# Copyright 2002 Gary Strangman. All rights reserved
# Copyright 2002-2016 The SciPy Developers
#
# The original code from Gary Strangman was heavily adapted for
# use in SciPy by Travis Oliphant. The original code came with the
# following disclaimer:
#
# This software is provided "as-is". There are no expressed or implied
# warranties of any kind, including, but not limited to, the warranties
# of merchantability and fitness for a given application. In no event
# shall Gary Strangman be liable for any direct, indirect, incidental,
# special, exemplary or consequential damages (including, but not limited
# to, loss of use, data or profits, or business interruption) however
# caused and on any theory of liability, whether in contract, strict
# liability or tort (including negligence or otherwise) arising in any way
# out of the use of this software, even if advised of the possibility of
# such damage.
"""
A collection of basic statistical functions for Python. The function
names appear below.
Some scalar functions defined here are also available in the scipy.special
package where they work on arbitrary sized arrays.
Disclaimers: The function list is obviously incomplete and, worse, the
functions are not optimized. All functions have been tested (some more
so than others), but they are far from bulletproof. Thus, as with any
free software, no warranty or guarantee is expressed or implied. :-) A
few extra functions that don't appear in the list below can be found by
interested treasure-hunters. These functions don't necessarily have
both list and array versions but were deemed useful.
Central Tendency
----------------
.. autosummary::
:toctree: generated/
gmean
hmean
mode
Moments
-------
.. autosummary::
:toctree: generated/
moment
variation
skew
kurtosis
normaltest
Altered Versions
----------------
.. autosummary::
:toctree: generated/
tmean
tvar
tstd
tsem
describe
Frequency Stats
---------------
.. autosummary::
:toctree: generated/
itemfreq
scoreatpercentile
percentileofscore
cumfreq
relfreq
Variability
-----------
.. autosummary::
:toctree: generated/
obrientransform
sem
zmap
zscore
gstd
iqr
median_absolute_deviation
Trimming Functions
------------------
.. autosummary::
:toctree: generated/
trimboth
trim1
Correlation Functions
---------------------
.. autosummary::
:toctree: generated/
pearsonr
fisher_exact
spearmanr
pointbiserialr
kendalltau
weightedtau
linregress
theilslopes
multiscale_graphcorr
Inferential Stats
-----------------
.. autosummary::
:toctree: generated/
ttest_1samp
ttest_ind
ttest_ind_from_stats
ttest_rel
chisquare
power_divergence
ks_2samp
epps_singleton_2samp
mannwhitneyu
ranksums
wilcoxon
kruskal
friedmanchisquare
brunnermunzel
combine_pvalues
Statistical Distances
---------------------
.. autosummary::
:toctree: generated/
wasserstein_distance
energy_distance
ANOVA Functions
---------------
.. autosummary::
:toctree: generated/
f_oneway
Support Functions
-----------------
.. autosummary::
:toctree: generated/
rankdata
rvs_ratio_uniforms
References
----------
.. [CRCProbStat2000] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
"""
import warnings
import math
from math import gcd
from collections import namedtuple
import numpy as np
from numpy import array, asarray, ma
from scipy.spatial.distance import cdist
from scipy.ndimage import measurements
from scipy._lib._util import (_lazywhere, check_random_state, MapWrapper,
rng_integers)
import scipy.special as special
from scipy import linalg
from . import distributions
from . import mstats_basic
from .mstats_basic import _contains_nan
from ._stats_mstats_common import (_find_repeats, linregress, theilslopes,
siegelslopes)
from ._stats import (_kendall_dis, _toint64, _weightedrankedtau,
_local_correlations)
from ._rvs_sampling import rvs_ratio_uniforms
from ._hypotests import epps_singleton_2samp
__all__ = ['find_repeats', 'gmean', 'hmean', 'mode', 'tmean', 'tvar',
'tmin', 'tmax', 'tstd', 'tsem', 'moment', 'variation',
'skew', 'kurtosis', 'describe', 'skewtest', 'kurtosistest',
'normaltest', 'jarque_bera', 'itemfreq',
'scoreatpercentile', 'percentileofscore',
'cumfreq', 'relfreq', 'obrientransform',
'sem', 'zmap', 'zscore', 'iqr', 'gstd', 'median_absolute_deviation',
'sigmaclip', 'trimboth', 'trim1', 'trim_mean',
'f_oneway', 'F_onewayConstantInputWarning',
'PearsonRConstantInputWarning', 'PearsonRNearConstantInputWarning',
'pearsonr', 'fisher_exact', 'SpearmanRConstantInputWarning',
'spearmanr', 'pointbiserialr',
'kendalltau', 'weightedtau', 'multiscale_graphcorr',
'linregress', 'siegelslopes', 'theilslopes', 'ttest_1samp',
'ttest_ind', 'ttest_ind_from_stats', 'ttest_rel', 'kstest',
'chisquare', 'power_divergence', 'ks_2samp', 'mannwhitneyu',
'tiecorrect', 'ranksums', 'kruskal', 'friedmanchisquare',
'rankdata', 'rvs_ratio_uniforms',
'combine_pvalues', 'wasserstein_distance', 'energy_distance',
'brunnermunzel', 'epps_singleton_2samp']
def _chk_asarray(a, axis):
if axis is None:
a = np.ravel(a)
outaxis = 0
else:
a = np.asarray(a)
outaxis = axis
if a.ndim == 0:
a = np.atleast_1d(a)
return a, outaxis
def _chk2_asarray(a, b, axis):
if axis is None:
a = np.ravel(a)
b = np.ravel(b)
outaxis = 0
else:
a = np.asarray(a)
b = np.asarray(b)
outaxis = axis
if a.ndim == 0:
a = np.atleast_1d(a)
if b.ndim == 0:
b = np.atleast_1d(b)
return a, b, outaxis
def gmean(a, axis=0, dtype=None):
"""
Compute the geometric mean along the specified axis.
Return the geometric average of the array elements.
That is: n-th root of (x1 * x2 * ... * xn)
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : int or None, optional
Axis along which the geometric mean is computed. Default is 0.
If None, compute over the whole array `a`.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If dtype is not specified, it defaults to the
dtype of a, unless a has an integer dtype with a precision less than
that of the default platform integer. In that case, the default
platform integer is used.
Returns
-------
gmean : ndarray
See `dtype` parameter above.
See Also
--------
numpy.mean : Arithmetic average
numpy.average : Weighted average
hmean : Harmonic mean
Notes
-----
The geometric average is computed over a single dimension of the input
array, axis=0 by default, or all values in the array if axis=None.
float64 intermediate and return values are used for integer inputs.
Use masked arrays to ignore any non-finite values in the input or that
arise in the calculations such as Not a Number and infinity because masked
arrays automatically mask any non-finite values.
Examples
--------
>>> from scipy.stats import gmean
>>> gmean([1, 4])
2.0
>>> gmean([1, 2, 3, 4, 5, 6, 7])
3.3800151591412964
"""
if not isinstance(a, np.ndarray):
# if not an ndarray object attempt to convert it
log_a = np.log(np.array(a, dtype=dtype))
elif dtype:
# Must change the default dtype allowing array type
if isinstance(a, np.ma.MaskedArray):
log_a = np.log(np.ma.asarray(a, dtype=dtype))
else:
log_a = np.log(np.asarray(a, dtype=dtype))
else:
log_a = np.log(a)
return np.exp(log_a.mean(axis=axis))
def hmean(a, axis=0, dtype=None):
"""
Calculate the harmonic mean along the specified axis.
That is: n / (1/x1 + 1/x2 + ... + 1/xn)
Parameters
----------
a : array_like
Input array, masked array or object that can be converted to an array.
axis : int or None, optional
Axis along which the harmonic mean is computed. Default is 0.
If None, compute over the whole array `a`.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If `dtype` is not specified, it defaults to the
dtype of `a`, unless `a` has an integer `dtype` with a precision less
than that of the default platform integer. In that case, the default
platform integer is used.
Returns
-------
hmean : ndarray
See `dtype` parameter above.
See Also
--------
numpy.mean : Arithmetic average
numpy.average : Weighted average
gmean : Geometric mean
Notes
-----
The harmonic mean is computed over a single dimension of the input
array, axis=0 by default, or all values in the array if axis=None.
float64 intermediate and return values are used for integer inputs.
Use masked arrays to ignore any non-finite values in the input or that
arise in the calculations such as Not a Number and infinity.
Examples
--------
>>> from scipy.stats import hmean
>>> hmean([1, 4])
1.6000000000000001
>>> hmean([1, 2, 3, 4, 5, 6, 7])
2.6997245179063363
"""
if not isinstance(a, np.ndarray):
a = np.array(a, dtype=dtype)
if np.all(a >= 0):
# Harmonic mean only defined if greater than or equal to to zero.
if isinstance(a, np.ma.MaskedArray):
size = a.count(axis)
else:
if axis is None:
a = a.ravel()
size = a.shape[0]
else:
size = a.shape[axis]
with np.errstate(divide='ignore'):
return size / np.sum(1.0 / a, axis=axis, dtype=dtype)
else:
raise ValueError("Harmonic mean only defined if all elements greater "
"than or equal to zero")
ModeResult = namedtuple('ModeResult', ('mode', 'count'))
def mode(a, axis=0, nan_policy='propagate'):
"""
Return an array of the modal (most common) value in the passed array.
If there is more than one such value, only the smallest is returned.
The bin-count for the modal bins is also returned.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
mode : ndarray
Array of modal values.
count : ndarray
Array of counts for each mode.
Examples
--------
>>> a = np.array([[6, 8, 3, 0],
... [3, 2, 1, 7],
... [8, 1, 8, 4],
... [5, 3, 0, 5],
... [4, 7, 5, 9]])
>>> from scipy import stats
>>> stats.mode(a)
ModeResult(mode=array([[3, 1, 0, 0]]), count=array([[1, 1, 1, 1]]))
To get mode of whole array, specify ``axis=None``:
>>> stats.mode(a, axis=None)
ModeResult(mode=array([3]), count=array([3]))
"""
a, axis = _chk_asarray(a, axis)
if a.size == 0:
return ModeResult(np.array([]), np.array([]))
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.mode(a, axis)
if a.dtype == object and np.nan in set(a.ravel()):
# Fall back to a slower method since np.unique does not work with NaN
scores = set(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape, dtype=a.dtype)
oldcounts = np.zeros(testshape, dtype=int)
for score in scores:
template = (a == score)
counts = np.expand_dims(np.sum(template, axis), axis)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
return ModeResult(mostfrequent, oldcounts)
def _mode1D(a):
vals, cnts = np.unique(a, return_counts=True)
return vals[cnts.argmax()], cnts.max()
# np.apply_along_axis will convert the _mode1D tuples to a numpy array, casting types in the process
# This recreates the results without that issue
# View of a, rotated so the requested axis is last
in_dims = list(range(a.ndim))
a_view = np.transpose(a, in_dims[:axis] + in_dims[axis+1:] + [axis])
inds = np.ndindex(a_view.shape[:-1])
modes = np.empty(a_view.shape[:-1], dtype=a.dtype)
counts = np.zeros(a_view.shape[:-1], dtype=np.int)
for ind in inds:
modes[ind], counts[ind] = _mode1D(a_view[ind])
newshape = list(a.shape)
newshape[axis] = 1
return ModeResult(modes.reshape(newshape), counts.reshape(newshape))
def _mask_to_limits(a, limits, inclusive):
"""Mask an array for values outside of given limits.
This is primarily a utility function.
Parameters
----------
a : array
limits : (float or None, float or None)
A tuple consisting of the (lower limit, upper limit). Values in the
input array less than the lower limit or greater than the upper limit
will be masked out. None implies no limit.
inclusive : (bool, bool)
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to lower or upper are allowed.
Returns
-------
A MaskedArray.
Raises
------
A ValueError if there are no values within the given limits.
"""
lower_limit, upper_limit = limits
lower_include, upper_include = inclusive
am = ma.MaskedArray(a)
if lower_limit is not None:
if lower_include:
am = ma.masked_less(am, lower_limit)
else:
am = ma.masked_less_equal(am, lower_limit)
if upper_limit is not None:
if upper_include:
am = ma.masked_greater(am, upper_limit)
else:
am = ma.masked_greater_equal(am, upper_limit)
if am.count() == 0:
raise ValueError("No array values within given limits")
return am
def tmean(a, limits=None, inclusive=(True, True), axis=None):
"""
Compute the trimmed mean.
This function finds the arithmetic mean of given values, ignoring values
outside the given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None (default), then all
values are used. Either of the limit values in the tuple can also be
None representing a half-open interval.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to compute test. Default is None.
Returns
-------
tmean : float
Trimmed mean.
See Also
--------
trim_mean : Returns mean after trimming a proportion from both tails.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmean(x)
9.5
>>> stats.tmean(x, (3,17))
10.0
"""
a = asarray(a)
if limits is None:
return np.mean(a, None)
am = _mask_to_limits(a.ravel(), limits, inclusive)
return am.mean(axis=axis)
def tvar(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""
Compute the trimmed variance.
This function computes the sample variance of an array of values,
while ignoring values which are outside of given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tvar : float
Trimmed variance.
Notes
-----
`tvar` computes the unbiased sample variance, i.e. it uses a correction
factor ``n / (n - 1)``.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tvar(x)
35.0
>>> stats.tvar(x, (3,17))
20.0
"""
a = asarray(a)
a = a.astype(float)
if limits is None:
return a.var(ddof=ddof, axis=axis)
am = _mask_to_limits(a, limits, inclusive)
amnan = am.filled(fill_value=np.nan)
return np.nanvar(amnan, ddof=ddof, axis=axis)
def tmin(a, lowerlimit=None, axis=0, inclusive=True, nan_policy='propagate'):
"""
Compute the trimmed minimum.
This function finds the miminum value of an array `a` along the
specified axis, but only considering values greater than a specified
lower limit.
Parameters
----------
a : array_like
Array of values.
lowerlimit : None or float, optional
Values in the input array less than the given limit will be ignored.
When lowerlimit is None, then all values are used. The default value
is None.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
inclusive : {True, False}, optional
This flag determines whether values exactly equal to the lower limit
are included. The default value is True.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
tmin : float, int or ndarray
Trimmed minimum.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmin(x)
0
>>> stats.tmin(x, 13)
13
>>> stats.tmin(x, 13, inclusive=False)
14
"""
a, axis = _chk_asarray(a, axis)
am = _mask_to_limits(a, (lowerlimit, None), (inclusive, False))
contains_nan, nan_policy = _contains_nan(am, nan_policy)
if contains_nan and nan_policy == 'omit':
am = ma.masked_invalid(am)
res = ma.minimum.reduce(am, axis).data
if res.ndim == 0:
return res[()]
return res
def tmax(a, upperlimit=None, axis=0, inclusive=True, nan_policy='propagate'):
"""
Compute the trimmed maximum.
This function computes the maximum value of an array along a given axis,
while ignoring values larger than a specified upper limit.
Parameters
----------
a : array_like
Array of values.
upperlimit : None or float, optional
Values in the input array greater than the given limit will be ignored.
When upperlimit is None, then all values are used. The default value
is None.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
inclusive : {True, False}, optional
This flag determines whether values exactly equal to the upper limit
are included. The default value is True.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
tmax : float, int or ndarray
Trimmed maximum.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmax(x)
19
>>> stats.tmax(x, 13)
13
>>> stats.tmax(x, 13, inclusive=False)
12
"""
a, axis = _chk_asarray(a, axis)
am = _mask_to_limits(a, (None, upperlimit), (False, inclusive))
contains_nan, nan_policy = _contains_nan(am, nan_policy)
if contains_nan and nan_policy == 'omit':
am = ma.masked_invalid(am)
res = ma.maximum.reduce(am, axis).data
if res.ndim == 0:
return res[()]
return res
def tstd(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""
Compute the trimmed sample standard deviation.
This function finds the sample standard deviation of given values,
ignoring values outside the given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tstd : float
Trimmed sample standard deviation.
Notes
-----
`tstd` computes the unbiased sample standard deviation, i.e. it uses a
correction factor ``n / (n - 1)``.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tstd(x)
5.9160797830996161
>>> stats.tstd(x, (3,17))
4.4721359549995796
"""
return np.sqrt(tvar(a, limits, inclusive, axis, ddof))
def tsem(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""
Compute the trimmed standard error of the mean.
This function finds the standard error of the mean for given
values, ignoring values outside the given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tsem : float
Trimmed standard error of the mean.
Notes
-----
`tsem` uses unbiased sample standard deviation, i.e. it uses a
correction factor ``n / (n - 1)``.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tsem(x)
1.3228756555322954
>>> stats.tsem(x, (3,17))
1.1547005383792515
"""
a = np.asarray(a).ravel()
if limits is None:
return a.std(ddof=ddof) / np.sqrt(a.size)
am = _mask_to_limits(a, limits, inclusive)
sd = np.sqrt(np.ma.var(am, ddof=ddof, axis=axis))
return sd / np.sqrt(am.count())
#####################################
# MOMENTS #
#####################################
def moment(a, moment=1, axis=0, nan_policy='propagate'):
r"""
Calculate the nth moment about the mean for a sample.
A moment is a specific quantitative measure of the shape of a set of
points. It is often used to calculate coefficients of skewness and kurtosis
due to its close relationship with them.
Parameters
----------
a : array_like
Input array.
moment : int or array_like of ints, optional
Order of central moment that is returned. Default is 1.
axis : int or None, optional
Axis along which the central moment is computed. Default is 0.
If None, compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
n-th central moment : ndarray or float
The appropriate moment along the given axis or over all values if axis
is None. The denominator for the moment calculation is the number of
observations, no degrees of freedom correction is done.
See Also
--------
kurtosis, skew, describe
Notes
-----
The k-th central moment of a data sample is:
.. math::
m_k = \frac{1}{n} \sum_{i = 1}^n (x_i - \bar{x})^k
Where n is the number of samples and x-bar is the mean. This function uses
exponentiation by squares [1]_ for efficiency.
References
----------
.. [1] https://eli.thegreenplace.net/2009/03/21/efficient-integer-exponentiation-algorithms
Examples
--------
>>> from scipy.stats import moment
>>> moment([1, 2, 3, 4, 5], moment=1)
0.0
>>> moment([1, 2, 3, 4, 5], moment=2)
2.0
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.moment(a, moment, axis)
if a.size == 0:
# empty array, return nan(s) with shape matching `moment`
if np.isscalar(moment):
return np.nan
else:
return np.full(np.asarray(moment).shape, np.nan, dtype=np.float64)
# for array_like moment input, return a value for each.
if not np.isscalar(moment):
mmnt = [_moment(a, i, axis) for i in moment]
return np.array(mmnt)
else:
return _moment(a, moment, axis)
def _moment(a, moment, axis):
if np.abs(moment - np.round(moment)) > 0:
raise ValueError("All moment parameters must be integers")
if moment == 0:
# When moment equals 0, the result is 1, by definition.
shape = list(a.shape)
del shape[axis]
if shape:
# return an actual array of the appropriate shape
return np.ones(shape, dtype=float)
else:
# the input was 1D, so return a scalar instead of a rank-0 array
return 1.0
elif moment == 1:
# By definition the first moment about the mean is 0.
shape = list(a.shape)
del shape[axis]
if shape:
# return an actual array of the appropriate shape
return np.zeros(shape, dtype=float)
else:
# the input was 1D, so return a scalar instead of a rank-0 array
return np.float64(0.0)
else:
# Exponentiation by squares: form exponent sequence
n_list = [moment]
current_n = moment
while current_n > 2:
if current_n % 2:
current_n = (current_n - 1) / 2
else:
current_n /= 2
n_list.append(current_n)
# Starting point for exponentiation by squares
a_zero_mean = a - np.expand_dims(np.mean(a, axis), axis)
if n_list[-1] == 1:
s = a_zero_mean.copy()
else:
s = a_zero_mean**2
# Perform multiplications
for n in n_list[-2::-1]:
s = s**2
if n % 2:
s *= a_zero_mean
return np.mean(s, axis)
def variation(a, axis=0, nan_policy='propagate'):
"""
Compute the coefficient of variation.
The coefficient of variation is the ratio of the biased standard
deviation to the mean.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate the coefficient of variation. Default
is 0. If None, compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
variation : ndarray
The calculated variation along the requested axis.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Examples
--------
>>> from scipy.stats import variation
>>> variation([1, 2, 3, 4, 5])
0.47140452079103173
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.variation(a, axis)
return a.std(axis) / a.mean(axis)
def skew(a, axis=0, bias=True, nan_policy='propagate'):
r"""
Compute the sample skewness of a data set.
For normally distributed data, the skewness should be about zero. For
unimodal continuous distributions, a skewness value greater than zero means
that there is more weight in the right tail of the distribution. The
function `skewtest` can be used to determine if the skewness value
is close enough to zero, statistically speaking.
Parameters
----------
a : ndarray
Input array.
axis : int or None, optional
Axis along which skewness is calculated. Default is 0.
If None, compute over the whole array `a`.
bias : bool, optional
If False, then the calculations are corrected for statistical bias.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
skewness : ndarray
The skewness of values along an axis, returning 0 where all values are
equal.
Notes
-----
The sample skewness is computed as the Fisher-Pearson coefficient
of skewness, i.e.
.. math::
g_1=\frac{m_3}{m_2^{3/2}}
where
.. math::
m_i=\frac{1}{N}\sum_{n=1}^N(x[n]-\bar{x})^i
is the biased sample :math:`i\texttt{th}` central moment, and :math:`\bar{x}` is
the sample mean. If ``bias`` is False, the calculations are
corrected for bias and the value computed is the adjusted
Fisher-Pearson standardized moment coefficient, i.e.
.. math::
G_1=\frac{k_3}{k_2^{3/2}}=
\frac{\sqrt{N(N-1)}}{N-2}\frac{m_3}{m_2^{3/2}}.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Section 2.2.24.1
Examples
--------
>>> from scipy.stats import skew
>>> skew([1, 2, 3, 4, 5])
0.0
>>> skew([2, 8, 0, 4, 1, 9, 9, 0])
0.2650554122698573
"""
a, axis = _chk_asarray(a, axis)
n = a.shape[axis]
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.skew(a, axis, bias)
m2 = moment(a, 2, axis)
m3 = moment(a, 3, axis)
zero = (m2 == 0)
vals = _lazywhere(~zero, (m2, m3),
lambda m2, m3: m3 / m2**1.5,
0.)
if not bias:
can_correct = (n > 2) & (m2 > 0)
if can_correct.any():
m2 = np.extract(can_correct, m2)
m3 = np.extract(can_correct, m3)
nval = np.sqrt((n - 1.0) * n) / (n - 2.0) * m3 / m2**1.5
np.place(vals, can_correct, nval)
if vals.ndim == 0:
return vals.item()
return vals
def kurtosis(a, axis=0, fisher=True, bias=True, nan_policy='propagate'):
"""
Compute the kurtosis (Fisher or Pearson) of a dataset.
Kurtosis is the fourth central moment divided by the square of the
variance. If Fisher's definition is used, then 3.0 is subtracted from
the result to give 0.0 for a normal distribution.
If bias is False then the kurtosis is calculated using k statistics to
eliminate bias coming from biased moment estimators
Use `kurtosistest` to see if result is close enough to normal.
Parameters
----------
a : array
Data for which the kurtosis is calculated.
axis : int or None, optional
Axis along which the kurtosis is calculated. Default is 0.
If None, compute over the whole array `a`.
fisher : bool, optional
If True, Fisher's definition is used (normal ==> 0.0). If False,
Pearson's definition is used (normal ==> 3.0).
bias : bool, optional
If False, then the calculations are corrected for statistical bias.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
kurtosis : array
The kurtosis of values along an axis. If all values are equal,
return -3 for Fisher's definition and 0 for Pearson's definition.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Examples
--------
In Fisher's definiton, the kurtosis of the normal distribution is zero.
In the following example, the kurtosis is close to zero, because it was
calculated from the dataset, not from the continuous distribution.
>>> from scipy.stats import norm, kurtosis
>>> data = norm.rvs(size=1000, random_state=3)
>>> kurtosis(data)
-0.06928694200380558
The distribution with a higher kurtosis has a heavier tail.
The zero valued kurtosis of the normal distribution in Fisher's definition
can serve as a reference point.
>>> import matplotlib.pyplot as plt
>>> import scipy.stats as stats
>>> from scipy.stats import kurtosis
>>> x = np.linspace(-5, 5, 100)
>>> ax = plt.subplot()
>>> distnames = ['laplace', 'norm', 'uniform']
>>> for distname in distnames:
... if distname == 'uniform':
... dist = getattr(stats, distname)(loc=-2, scale=4)
... else:
... dist = getattr(stats, distname)
... data = dist.rvs(size=1000)
... kur = kurtosis(data, fisher=True)
... y = dist.pdf(x)
... ax.plot(x, y, label="{}, {}".format(distname, round(kur, 3)))
... ax.legend()
The Laplace distribution has a heavier tail than the normal distribution.
The uniform distribution (which has negative kurtosis) has the thinnest
tail.
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.kurtosis(a, axis, fisher, bias)
n = a.shape[axis]
m2 = moment(a, 2, axis)
m4 = moment(a, 4, axis)
zero = (m2 == 0)
olderr = np.seterr(all='ignore')
try:
vals = np.where(zero, 0, m4 / m2**2.0)
finally:
np.seterr(**olderr)
if not bias:
can_correct = (n > 3) & (m2 > 0)
if can_correct.any():
m2 = np.extract(can_correct, m2)
m4 = np.extract(can_correct, m4)
nval = 1.0/(n-2)/(n-3) * ((n**2-1.0)*m4/m2**2.0 - 3*(n-1)**2.0)
np.place(vals, can_correct, nval + 3.0)
if vals.ndim == 0:
vals = vals.item() # array scalar
return vals - 3 if fisher else vals
DescribeResult = namedtuple('DescribeResult',
('nobs', 'minmax', 'mean', 'variance', 'skewness',
'kurtosis'))
def describe(a, axis=0, ddof=1, bias=True, nan_policy='propagate'):
"""
Compute several descriptive statistics of the passed array.
Parameters
----------
a : array_like
Input data.
axis : int or None, optional
Axis along which statistics are calculated. Default is 0.
If None, compute over the whole array `a`.
ddof : int, optional
Delta degrees of freedom (only for variance). Default is 1.
bias : bool, optional
If False, then the skewness and kurtosis calculations are corrected for
statistical bias.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
nobs : int or ndarray of ints
Number of observations (length of data along `axis`).
When 'omit' is chosen as nan_policy, each column is counted separately.
minmax: tuple of ndarrays or floats
Minimum and maximum value of data array.
mean : ndarray or float
Arithmetic mean of data along axis.
variance : ndarray or float
Unbiased variance of the data along axis, denominator is number of
observations minus one.
skewness : ndarray or float
Skewness, based on moment calculations with denominator equal to
the number of observations, i.e. no degrees of freedom correction.
kurtosis : ndarray or float
Kurtosis (Fisher). The kurtosis is normalized so that it is
zero for the normal distribution. No degrees of freedom are used.
See Also
--------
skew, kurtosis
Examples
--------
>>> from scipy import stats
>>> a = np.arange(10)
>>> stats.describe(a)
DescribeResult(nobs=10, minmax=(0, 9), mean=4.5, variance=9.166666666666666,
skewness=0.0, kurtosis=-1.2242424242424244)
>>> b = [[1, 2], [3, 4]]
>>> stats.describe(b)
DescribeResult(nobs=2, minmax=(array([1, 2]), array([3, 4])),
mean=array([2., 3.]), variance=array([2., 2.]),
skewness=array([0., 0.]), kurtosis=array([-2., -2.]))
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.describe(a, axis, ddof, bias)
if a.size == 0:
raise ValueError("The input must not be empty.")
n = a.shape[axis]
mm = (np.min(a, axis=axis), np.max(a, axis=axis))
m = np.mean(a, axis=axis)
v = np.var(a, axis=axis, ddof=ddof)
sk = skew(a, axis, bias=bias)
kurt = kurtosis(a, axis, bias=bias)
return DescribeResult(n, mm, m, v, sk, kurt)
#####################################
# NORMALITY TESTS #
#####################################
SkewtestResult = namedtuple('SkewtestResult', ('statistic', 'pvalue'))
def skewtest(a, axis=0, nan_policy='propagate'):
"""
Test whether the skew is different from the normal distribution.
This function tests the null hypothesis that the skewness of
the population that the sample was drawn from is the same
as that of a corresponding normal distribution.
Parameters
----------
a : array
The data to be tested.
axis : int or None, optional
Axis along which statistics are calculated. Default is 0.
If None, compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
statistic : float
The computed z-score for this test.
pvalue : float
Two-sided p-value for the hypothesis test.
Notes
-----
The sample size must be at least 8.
References
----------
.. [1] R. B. D'Agostino, A. J. Belanger and R. B. D'Agostino Jr.,
"A suggestion for using powerful and informative tests of
normality", American Statistician 44, pp. 316-321, 1990.
Examples
--------
>>> from scipy.stats import skewtest
>>> skewtest([1, 2, 3, 4, 5, 6, 7, 8])
SkewtestResult(statistic=1.0108048609177787, pvalue=0.3121098361421897)
>>> skewtest([2, 8, 0, 4, 1, 9, 9, 0])
SkewtestResult(statistic=0.44626385374196975, pvalue=0.6554066631275459)
>>> skewtest([1, 2, 3, 4, 5, 6, 7, 8000])
SkewtestResult(statistic=3.571773510360407, pvalue=0.0003545719905823133)
>>> skewtest([100, 100, 100, 100, 100, 100, 100, 101])
SkewtestResult(statistic=3.5717766638478072, pvalue=0.000354567720281634)
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.skewtest(a, axis)
if axis is None:
a = np.ravel(a)
axis = 0
b2 = skew(a, axis)
n = a.shape[axis]
if n < 8:
raise ValueError(
"skewtest is not valid with less than 8 samples; %i samples"
" were given." % int(n))
y = b2 * math.sqrt(((n + 1) * (n + 3)) / (6.0 * (n - 2)))
beta2 = (3.0 * (n**2 + 27*n - 70) * (n+1) * (n+3) /
((n-2.0) * (n+5) * (n+7) * (n+9)))
W2 = -1 + math.sqrt(2 * (beta2 - 1))
delta = 1 / math.sqrt(0.5 * math.log(W2))
alpha = math.sqrt(2.0 / (W2 - 1))
y = np.where(y == 0, 1, y)
Z = delta * np.log(y / alpha + np.sqrt((y / alpha)**2 + 1))
return SkewtestResult(Z, 2 * distributions.norm.sf(np.abs(Z)))
KurtosistestResult = namedtuple('KurtosistestResult', ('statistic', 'pvalue'))
def kurtosistest(a, axis=0, nan_policy='propagate'):
"""
Test whether a dataset has normal kurtosis.
This function tests the null hypothesis that the kurtosis
of the population from which the sample was drawn is that
of the normal distribution: ``kurtosis = 3(n-1)/(n+1)``.
Parameters
----------
a : array
Array of the sample data.
axis : int or None, optional
Axis along which to compute test. Default is 0. If None,
compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
statistic : float
The computed z-score for this test.
pvalue : float
The two-sided p-value for the hypothesis test.
Notes
-----
Valid only for n>20. This function uses the method described in [1]_.
References
----------
.. [1] see e.g. F. J. Anscombe, W. J. Glynn, "Distribution of the kurtosis
statistic b2 for normal samples", Biometrika, vol. 70, pp. 227-234, 1983.
Examples
--------
>>> from scipy.stats import kurtosistest
>>> kurtosistest(list(range(20)))
KurtosistestResult(statistic=-1.7058104152122062, pvalue=0.08804338332528348)
>>> np.random.seed(28041990)
>>> s = np.random.normal(0, 1, 1000)
>>> kurtosistest(s)
KurtosistestResult(statistic=1.2317590987707365, pvalue=0.21803908613450895)
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.kurtosistest(a, axis)
n = a.shape[axis]
if n < 5:
raise ValueError(
"kurtosistest requires at least 5 observations; %i observations"
" were given." % int(n))
if n < 20:
warnings.warn("kurtosistest only valid for n>=20 ... continuing "
"anyway, n=%i" % int(n))
b2 = kurtosis(a, axis, fisher=False)
E = 3.0*(n-1) / (n+1)
varb2 = 24.0*n*(n-2)*(n-3) / ((n+1)*(n+1.)*(n+3)*(n+5)) # [1]_ Eq. 1
x = (b2-E) / np.sqrt(varb2) # [1]_ Eq. 4
# [1]_ Eq. 2:
sqrtbeta1 = 6.0*(n*n-5*n+2)/((n+7)*(n+9)) * np.sqrt((6.0*(n+3)*(n+5)) /
(n*(n-2)*(n-3)))
# [1]_ Eq. 3:
A = 6.0 + 8.0/sqrtbeta1 * (2.0/sqrtbeta1 + np.sqrt(1+4.0/(sqrtbeta1**2)))
term1 = 1 - 2/(9.0*A)
denom = 1 + x*np.sqrt(2/(A-4.0))
term2 = np.sign(denom) * np.where(denom == 0.0, np.nan,
np.power((1-2.0/A)/np.abs(denom), 1/3.0))
if np.any(denom == 0):
msg = "Test statistic not defined in some cases due to division by " \
"zero. Return nan in that case..."
warnings.warn(msg, RuntimeWarning)
Z = (term1 - term2) / np.sqrt(2/(9.0*A)) # [1]_ Eq. 5
if Z.ndim == 0:
Z = Z[()]
# zprob uses upper tail, so Z needs to be positive
return KurtosistestResult(Z, 2 * distributions.norm.sf(np.abs(Z)))
NormaltestResult = namedtuple('NormaltestResult', ('statistic', 'pvalue'))
def normaltest(a, axis=0, nan_policy='propagate'):
"""
Test whether a sample differs from a normal distribution.
This function tests the null hypothesis that a sample comes
from a normal distribution. It is based on D'Agostino and
Pearson's [1]_, [2]_ test that combines skew and kurtosis to
produce an omnibus test of normality.
Parameters
----------
a : array_like
The array containing the sample to be tested.
axis : int or None, optional
Axis along which to compute test. Default is 0. If None,
compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
statistic : float or array
``s^2 + k^2``, where ``s`` is the z-score returned by `skewtest` and
``k`` is the z-score returned by `kurtosistest`.
pvalue : float or array
A 2-sided chi squared probability for the hypothesis test.
References
----------
.. [1] D'Agostino, R. B. (1971), "An omnibus test of normality for
moderate and large sample size", Biometrika, 58, 341-348
.. [2] D'Agostino, R. and Pearson, E. S. (1973), "Tests for departure from
normality", Biometrika, 60, 613-622
Examples
--------
>>> from scipy import stats
>>> pts = 1000
>>> np.random.seed(28041990)
>>> a = np.random.normal(0, 1, size=pts)
>>> b = np.random.normal(2, 1, size=pts)
>>> x = np.concatenate((a, b))
>>> k2, p = stats.normaltest(x)
>>> alpha = 1e-3
>>> print("p = {:g}".format(p))
p = 3.27207e-11
>>> if p < alpha: # null hypothesis: x comes from a normal distribution
... print("The null hypothesis can be rejected")
... else:
... print("The null hypothesis cannot be rejected")
The null hypothesis can be rejected
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.normaltest(a, axis)
s, _ = skewtest(a, axis)
k, _ = kurtosistest(a, axis)
k2 = s*s + k*k
return NormaltestResult(k2, distributions.chi2.sf(k2, 2))
def jarque_bera(x):
"""
Perform the Jarque-Bera goodness of fit test on sample data.
The Jarque-Bera test tests whether the sample data has the skewness and
kurtosis matching a normal distribution.
Note that this test only works for a large enough number of data samples
(>2000) as the test statistic asymptotically has a Chi-squared distribution
with 2 degrees of freedom.
Parameters
----------
x : array_like
Observations of a random variable.
Returns
-------
jb_value : float
The test statistic.
p : float
The p-value for the hypothesis test.
References
----------
.. [1] Jarque, C. and Bera, A. (1980) "Efficient tests for normality,
homoscedasticity and serial independence of regression residuals",
6 Econometric Letters 255-259.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(987654321)
>>> x = np.random.normal(0, 1, 100000)
>>> y = np.random.rayleigh(1, 100000)
>>> stats.jarque_bera(x)
(4.7165707989581342, 0.09458225503041906)
>>> stats.jarque_bera(y)
(6713.7098548143422, 0.0)
"""
x = np.asarray(x)
n = x.size
if n == 0:
raise ValueError('At least one observation is required.')
mu = x.mean()
diffx = x - mu
skewness = (1 / n * np.sum(diffx**3)) / (1 / n * np.sum(diffx**2))**(3 / 2.)
kurtosis = (1 / n * np.sum(diffx**4)) / (1 / n * np.sum(diffx**2))**2
jb_value = n / 6 * (skewness**2 + (kurtosis - 3)**2 / 4)
p = 1 - distributions.chi2.cdf(jb_value, 2)
return jb_value, p
#####################################
# FREQUENCY FUNCTIONS #
#####################################
@np.deprecate(message="`itemfreq` is deprecated and will be removed in a "
"future version. Use instead `np.unique(..., return_counts=True)`")
def itemfreq(a):
"""
Return a 2-D array of item frequencies.
Parameters
----------
a : (N,) array_like
Input array.
Returns
-------
itemfreq : (K, 2) ndarray
A 2-D frequency table. Column 1 contains sorted, unique values from
`a`, column 2 contains their respective counts.
Examples
--------
>>> from scipy import stats
>>> a = np.array([1, 1, 5, 0, 1, 2, 2, 0, 1, 4])
>>> stats.itemfreq(a)
array([[ 0., 2.],
[ 1., 4.],
[ 2., 2.],
[ 4., 1.],
[ 5., 1.]])
>>> np.bincount(a)
array([2, 4, 2, 0, 1, 1])
>>> stats.itemfreq(a/10.)
array([[ 0. , 2. ],
[ 0.1, 4. ],
[ 0.2, 2. ],
[ 0.4, 1. ],
[ 0.5, 1. ]])
"""
items, inv = np.unique(a, return_inverse=True)
freq = np.bincount(inv)
return np.array([items, freq]).T
def scoreatpercentile(a, per, limit=(), interpolation_method='fraction',
axis=None):
"""
Calculate the score at a given percentile of the input sequence.
For example, the score at `per=50` is the median. If the desired quantile
lies between two data points, we interpolate between them, according to
the value of `interpolation`. If the parameter `limit` is provided, it
should be a tuple (lower, upper) of two values.
Parameters
----------
a : array_like
A 1-D array of values from which to extract score.
per : array_like
Percentile(s) at which to extract score. Values should be in range
[0,100].
limit : tuple, optional
Tuple of two scalars, the lower and upper limits within which to
compute the percentile. Values of `a` outside
this (closed) interval will be ignored.
interpolation_method : {'fraction', 'lower', 'higher'}, optional
Specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`
The following options are available (default is 'fraction'):
* 'fraction': ``i + (j - i) * fraction`` where ``fraction`` is the
fractional part of the index surrounded by ``i`` and ``j``
* 'lower': ``i``
* 'higher': ``j``
axis : int, optional
Axis along which the percentiles are computed. Default is None. If
None, compute over the whole array `a`.
Returns
-------
score : float or ndarray
Score at percentile(s).
See Also
--------
percentileofscore, numpy.percentile
Notes
-----
This function will become obsolete in the future.
For NumPy 1.9 and higher, `numpy.percentile` provides all the functionality
that `scoreatpercentile` provides. And it's significantly faster.
Therefore it's recommended to use `numpy.percentile` for users that have
numpy >= 1.9.
Examples
--------
>>> from scipy import stats
>>> a = np.arange(100)
>>> stats.scoreatpercentile(a, 50)
49.5
"""
# adapted from NumPy's percentile function. When we require numpy >= 1.8,
# the implementation of this function can be replaced by np.percentile.
a = np.asarray(a)
if a.size == 0:
# empty array, return nan(s) with shape matching `per`
if np.isscalar(per):
return np.nan
else:
return np.full(np.asarray(per).shape, np.nan, dtype=np.float64)
if limit:
a = a[(limit[0] <= a) & (a <= limit[1])]
sorted_ = np.sort(a, axis=axis)
if axis is None:
axis = 0
return _compute_qth_percentile(sorted_, per, interpolation_method, axis)
# handle sequence of per's without calling sort multiple times
def _compute_qth_percentile(sorted_, per, interpolation_method, axis):
if not np.isscalar(per):
score = [_compute_qth_percentile(sorted_, i,
interpolation_method, axis)
for i in per]
return np.array(score)
if not (0 <= per <= 100):
raise ValueError("percentile must be in the range [0, 100]")
indexer = [slice(None)] * sorted_.ndim
idx = per / 100. * (sorted_.shape[axis] - 1)
if int(idx) != idx:
# round fractional indices according to interpolation method
if interpolation_method == 'lower':
idx = int(np.floor(idx))
elif interpolation_method == 'higher':
idx = int(np.ceil(idx))
elif interpolation_method == 'fraction':
pass # keep idx as fraction and interpolate
else:
raise ValueError("interpolation_method can only be 'fraction', "
"'lower' or 'higher'")
i = int(idx)
if i == idx:
indexer[axis] = slice(i, i + 1)
weights = array(1)
sumval = 1.0
else:
indexer[axis] = slice(i, i + 2)
j = i + 1
weights = array([(j - idx), (idx - i)], float)
wshape = [1] * sorted_.ndim
wshape[axis] = 2
weights.shape = wshape
sumval = weights.sum()
# Use np.add.reduce (== np.sum but a little faster) to coerce data type
return np.add.reduce(sorted_[tuple(indexer)] * weights, axis=axis) / sumval
def percentileofscore(a, score, kind='rank'):
"""
Compute the percentile rank of a score relative to a list of scores.
A `percentileofscore` of, for example, 80% means that 80% of the
scores in `a` are below the given score. In the case of gaps or
ties, the exact definition depends on the optional keyword, `kind`.
Parameters
----------
a : array_like
Array of scores to which `score` is compared.
score : int or float
Score that is compared to the elements in `a`.
kind : {'rank', 'weak', 'strict', 'mean'}, optional
Specifies the interpretation of the resulting score.
The following options are available (default is 'rank'):
* 'rank': Average percentage ranking of score. In case of multiple
matches, average the percentage rankings of all matching scores.
* 'weak': This kind corresponds to the definition of a cumulative
distribution function. A percentileofscore of 80% means that 80%
of values are less than or equal to the provided score.
* 'strict': Similar to "weak", except that only values that are
strictly less than the given score are counted.
* 'mean': The average of the "weak" and "strict" scores, often used
in testing. See https://en.wikipedia.org/wiki/Percentile_rank
Returns
-------
pcos : float
Percentile-position of score (0-100) relative to `a`.
See Also
--------
numpy.percentile
Examples
--------
Three-quarters of the given values lie below a given score:
>>> from scipy import stats
>>> stats.percentileofscore([1, 2, 3, 4], 3)
75.0
With multiple matches, note how the scores of the two matches, 0.6
and 0.8 respectively, are averaged:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3)
70.0
Only 2/5 values are strictly less than 3:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='strict')
40.0
But 4/5 values are less than or equal to 3:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='weak')
80.0
The average between the weak and the strict scores is:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='mean')
60.0
"""
if np.isnan(score):
return np.nan
a = np.asarray(a)
n = len(a)
if n == 0:
return 100.0
if kind == 'rank':
left = np.count_nonzero(a < score)
right = np.count_nonzero(a <= score)
pct = (right + left + (1 if right > left else 0)) * 50.0/n
return pct
elif kind == 'strict':
return np.count_nonzero(a < score) / n * 100
elif kind == 'weak':
return np.count_nonzero(a <= score) / n * 100
elif kind == 'mean':
pct = (np.count_nonzero(a < score) + np.count_nonzero(a <= score)) / n * 50
return pct
else:
raise ValueError("kind can only be 'rank', 'strict', 'weak' or 'mean'")
HistogramResult = namedtuple('HistogramResult',
('count', 'lowerlimit', 'binsize', 'extrapoints'))
def _histogram(a, numbins=10, defaultlimits=None, weights=None, printextras=False):
"""
Create a histogram.
Separate the range into several bins and return the number of instances
in each bin.
Parameters
----------
a : array_like
Array of scores which will be put into bins.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultlimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in a is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
printextras : bool, optional
If True, if there are extra points (i.e. the points that fall outside
the bin limits) a warning is raised saying how many of those points
there are. Default is False.
Returns
-------
count : ndarray
Number of points (or sum of weights) in each bin.
lowerlimit : float
Lowest value of histogram, the lower limit of the first bin.
binsize : float
The size of the bins (all bins have the same size).
extrapoints : int
The number of points outside the range of the histogram.
See Also
--------
numpy.histogram
Notes
-----
This histogram is based on numpy's histogram but has a larger range by
default if default limits is not set.
"""
a = np.ravel(a)
if defaultlimits is None:
if a.size == 0:
# handle empty arrays. Undetermined range, so use 0-1.
defaultlimits = (0, 1)
else:
# no range given, so use values in `a`
data_min = a.min()
data_max = a.max()
# Have bins extend past min and max values slightly
s = (data_max - data_min) / (2. * (numbins - 1.))
defaultlimits = (data_min - s, data_max + s)
# use numpy's histogram method to compute bins
hist, bin_edges = np.histogram(a, bins=numbins, range=defaultlimits,
weights=weights)
# hist are not always floats, convert to keep with old output
hist = np.array(hist, dtype=float)
# fixed width for bins is assumed, as numpy's histogram gives
# fixed width bins for int values for 'bins'
binsize = bin_edges[1] - bin_edges[0]
# calculate number of extra points
extrapoints = len([v for v in a
if defaultlimits[0] > v or v > defaultlimits[1]])
if extrapoints > 0 and printextras:
warnings.warn("Points outside given histogram range = %s"
% extrapoints)
return HistogramResult(hist, defaultlimits[0], binsize, extrapoints)
CumfreqResult = namedtuple('CumfreqResult',
('cumcount', 'lowerlimit', 'binsize',
'extrapoints'))
def cumfreq(a, numbins=10, defaultreallimits=None, weights=None):
"""
Return a cumulative frequency histogram, using the histogram function.
A cumulative histogram is a mapping that counts the cumulative number of
observations in all of the bins up to the specified bin.
Parameters
----------
a : array_like
Input array.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultreallimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in `a` is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
Returns
-------
cumcount : ndarray
Binned values of cumulative frequency.
lowerlimit : float
Lower real limit
binsize : float
Width of each bin.
extrapoints : int
Extra points.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy import stats
>>> x = [1, 4, 2, 1, 3, 1]
>>> res = stats.cumfreq(x, numbins=4, defaultreallimits=(1.5, 5))
>>> res.cumcount
array([ 1., 2., 3., 3.])
>>> res.extrapoints
3
Create a normal distribution with 1000 random values
>>> rng = np.random.RandomState(seed=12345)
>>> samples = stats.norm.rvs(size=1000, random_state=rng)
Calculate cumulative frequencies
>>> res = stats.cumfreq(samples, numbins=25)
Calculate space of values for x
>>> x = res.lowerlimit + np.linspace(0, res.binsize*res.cumcount.size,
... res.cumcount.size)
Plot histogram and cumulative histogram
>>> fig = plt.figure(figsize=(10, 4))
>>> ax1 = fig.add_subplot(1, 2, 1)
>>> ax2 = fig.add_subplot(1, 2, 2)
>>> ax1.hist(samples, bins=25)
>>> ax1.set_title('Histogram')
>>> ax2.bar(x, res.cumcount, width=res.binsize)
>>> ax2.set_title('Cumulative histogram')
>>> ax2.set_xlim([x.min(), x.max()])
>>> plt.show()
"""
h, l, b, e = _histogram(a, numbins, defaultreallimits, weights=weights)
cumhist = np.cumsum(h * 1, axis=0)
return CumfreqResult(cumhist, l, b, e)
RelfreqResult = namedtuple('RelfreqResult',
('frequency', 'lowerlimit', 'binsize',
'extrapoints'))
def relfreq(a, numbins=10, defaultreallimits=None, weights=None):
"""
Return a relative frequency histogram, using the histogram function.
A relative frequency histogram is a mapping of the number of
observations in each of the bins relative to the total of observations.
Parameters
----------
a : array_like
Input array.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultreallimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in a is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
Returns
-------
frequency : ndarray
Binned values of relative frequency.
lowerlimit : float
Lower real limit.
binsize : float
Width of each bin.
extrapoints : int
Extra points.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy import stats
>>> a = np.array([2, 4, 1, 2, 3, 2])
>>> res = stats.relfreq(a, numbins=4)
>>> res.frequency
array([ 0.16666667, 0.5 , 0.16666667, 0.16666667])
>>> np.sum(res.frequency) # relative frequencies should add up to 1
1.0
Create a normal distribution with 1000 random values
>>> rng = np.random.RandomState(seed=12345)
>>> samples = stats.norm.rvs(size=1000, random_state=rng)
Calculate relative frequencies
>>> res = stats.relfreq(samples, numbins=25)
Calculate space of values for x
>>> x = res.lowerlimit + np.linspace(0, res.binsize*res.frequency.size,
... res.frequency.size)
Plot relative frequency histogram
>>> fig = plt.figure(figsize=(5, 4))
>>> ax = fig.add_subplot(1, 1, 1)
>>> ax.bar(x, res.frequency, width=res.binsize)
>>> ax.set_title('Relative frequency histogram')
>>> ax.set_xlim([x.min(), x.max()])
>>> plt.show()
"""
a = np.asanyarray(a)
h, l, b, e = _histogram(a, numbins, defaultreallimits, weights=weights)
h = h / a.shape[0]
return RelfreqResult(h, l, b, e)
#####################################
# VARIABILITY FUNCTIONS #
#####################################
def obrientransform(*args):
"""
Compute the O'Brien transform on input data (any number of arrays).
Used to test for homogeneity of variance prior to running one-way stats.
Each array in ``*args`` is one level of a factor.
If `f_oneway` is run on the transformed data and found significant,
the variances are unequal. From Maxwell and Delaney [1]_, p.112.
Parameters
----------
args : tuple of array_like
Any number of arrays.
Returns
-------
obrientransform : ndarray
Transformed data for use in an ANOVA. The first dimension
of the result corresponds to the sequence of transformed
arrays. If the arrays given are all 1-D of the same length,
the return value is a 2-D array; otherwise it is a 1-D array
of type object, with each element being an ndarray.
References
----------
.. [1] S. E. Maxwell and H. D. Delaney, "Designing Experiments and
Analyzing Data: A Model Comparison Perspective", Wadsworth, 1990.
Examples
--------
We'll test the following data sets for differences in their variance.
>>> x = [10, 11, 13, 9, 7, 12, 12, 9, 10]
>>> y = [13, 21, 5, 10, 8, 14, 10, 12, 7, 15]
Apply the O'Brien transform to the data.
>>> from scipy.stats import obrientransform
>>> tx, ty = obrientransform(x, y)
Use `scipy.stats.f_oneway` to apply a one-way ANOVA test to the
transformed data.
>>> from scipy.stats import f_oneway
>>> F, p = f_oneway(tx, ty)
>>> p
0.1314139477040335
If we require that ``p < 0.05`` for significance, we cannot conclude
that the variances are different.
"""
TINY = np.sqrt(np.finfo(float).eps)
# `arrays` will hold the transformed arguments.
arrays = []
sLast = None
for arg in args:
a = np.asarray(arg)
n = len(a)
mu = np.mean(a)
sq = (a - mu)**2
sumsq = sq.sum()
# The O'Brien transform.
t = ((n - 1.5) * n * sq - 0.5 * sumsq) / ((n - 1) * (n - 2))
# Check that the mean of the transformed data is equal to the
# original variance.
var = sumsq / (n - 1)
if abs(var - np.mean(t)) > TINY:
raise ValueError('Lack of convergence in obrientransform.')
arrays.append(t)
sLast = a.shape
if sLast:
for arr in arrays[:-1]:
if sLast != arr.shape:
return np.array(arrays, dtype=object)
return np.array(arrays)
def sem(a, axis=0, ddof=1, nan_policy='propagate'):
"""
Compute standard error of the mean.
Calculate the standard error of the mean (or standard error of
measurement) of the values in the input array.
Parameters
----------
a : array_like
An array containing the values for which the standard error is
returned.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Delta degrees-of-freedom. How many degrees of freedom to adjust
for bias in limited samples relative to the population estimate
of variance. Defaults to 1.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
s : ndarray or float
The standard error of the mean in the sample(s), along the input axis.
Notes
-----
The default value for `ddof` is different to the default (0) used by other
ddof containing routines, such as np.std and np.nanstd.
Examples
--------
Find standard error along the first axis:
>>> from scipy import stats
>>> a = np.arange(20).reshape(5,4)
>>> stats.sem(a)
array([ 2.8284, 2.8284, 2.8284, 2.8284])
Find standard error across the whole array, using n degrees of freedom:
>>> stats.sem(a, axis=None, ddof=0)
1.2893796958227628
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.sem(a, axis, ddof)
n = a.shape[axis]
s = np.std(a, axis=axis, ddof=ddof) / np.sqrt(n)
return s
def zscore(a, axis=0, ddof=0, nan_policy='propagate'):
"""
Compute the z score.
Compute the z score of each value in the sample, relative to the
sample mean and standard deviation.
Parameters
----------
a : array_like
An array like object containing the sample data.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Degrees of freedom correction in the calculation of the
standard deviation. Default is 0.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
zscore : array_like
The z-scores, standardized by mean and standard deviation of
input array `a`.
Notes
-----
This function preserves ndarray subclasses, and works also with
matrices and masked arrays (it uses `asanyarray` instead of
`asarray` for parameters).
Examples
--------
>>> a = np.array([ 0.7972, 0.0767, 0.4383, 0.7866, 0.8091,
... 0.1954, 0.6307, 0.6599, 0.1065, 0.0508])
>>> from scipy import stats
>>> stats.zscore(a)
array([ 1.1273, -1.247 , -0.0552, 1.0923, 1.1664, -0.8559, 0.5786,
0.6748, -1.1488, -1.3324])
Computing along a specified axis, using n-1 degrees of freedom
(``ddof=1``) to calculate the standard deviation:
>>> b = np.array([[ 0.3148, 0.0478, 0.6243, 0.4608],
... [ 0.7149, 0.0775, 0.6072, 0.9656],
... [ 0.6341, 0.1403, 0.9759, 0.4064],
... [ 0.5918, 0.6948, 0.904 , 0.3721],
... [ 0.0921, 0.2481, 0.1188, 0.1366]])
>>> stats.zscore(b, axis=1, ddof=1)
array([[-0.19264823, -1.28415119, 1.07259584, 0.40420358],
[ 0.33048416, -1.37380874, 0.04251374, 1.00081084],
[ 0.26796377, -1.12598418, 1.23283094, -0.37481053],
[-0.22095197, 0.24468594, 1.19042819, -1.21416216],
[-0.82780366, 1.4457416 , -0.43867764, -0.1792603 ]])
"""
a = np.asanyarray(a)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
mns = np.nanmean(a=a, axis=axis, keepdims=True)
sstd = np.nanstd(a=a, axis=axis, ddof=ddof, keepdims=True)
else:
mns = a.mean(axis=axis, keepdims=True)
sstd = a.std(axis=axis, ddof=ddof, keepdims=True)
return (a - mns) / sstd
def zmap(scores, compare, axis=0, ddof=0):
"""
Calculate the relative z-scores.
Return an array of z-scores, i.e., scores that are standardized to
zero mean and unit variance, where mean and variance are calculated
from the comparison array.
Parameters
----------
scores : array_like
The input for which z-scores are calculated.
compare : array_like
The input from which the mean and standard deviation of the
normalization are taken; assumed to have the same dimension as
`scores`.
axis : int or None, optional
Axis over which mean and variance of `compare` are calculated.
Default is 0. If None, compute over the whole array `scores`.
ddof : int, optional
Degrees of freedom correction in the calculation of the
standard deviation. Default is 0.
Returns
-------
zscore : array_like
Z-scores, in the same shape as `scores`.
Notes
-----
This function preserves ndarray subclasses, and works also with
matrices and masked arrays (it uses `asanyarray` instead of
`asarray` for parameters).
Examples
--------
>>> from scipy.stats import zmap
>>> a = [0.5, 2.0, 2.5, 3]
>>> b = [0, 1, 2, 3, 4]
>>> zmap(a, b)
array([-1.06066017, 0. , 0.35355339, 0.70710678])
"""
scores, compare = map(np.asanyarray, [scores, compare])
mns = compare.mean(axis=axis, keepdims=True)
sstd = compare.std(axis=axis, ddof=ddof, keepdims=True)
return (scores - mns) / sstd
def gstd(a, axis=0, ddof=1):
"""
Calculate the geometric standard deviation of an array.
The geometric standard deviation describes the spread of a set of numbers
where the geometric mean is preferred. It is a multiplicative factor, and
so a dimensionless quantity.
It is defined as the exponent of the standard deviation of ``log(a)``.
Mathematically the population geometric standard deviation can be
evaluated as::
gstd = exp(std(log(a)))
.. versionadded:: 1.3.0
Parameters
----------
a : array_like
An array like object containing the sample data.
axis : int, tuple or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Degree of freedom correction in the calculation of the
geometric standard deviation. Default is 1.
Returns
-------
ndarray or float
An array of the geometric standard deviation. If `axis` is None or `a`
is a 1d array a float is returned.
Notes
-----
As the calculation requires the use of logarithms the geometric standard
deviation only supports strictly positive values. Any non-positive or
infinite values will raise a `ValueError`.
The geometric standard deviation is sometimes confused with the exponent of
the standard deviation, ``exp(std(a))``. Instead the geometric standard
deviation is ``exp(std(log(a)))``.
The default value for `ddof` is different to the default value (0) used
by other ddof containing functions, such as ``np.std`` and ``np.nanstd``.
Examples
--------
Find the geometric standard deviation of a log-normally distributed sample.
Note that the standard deviation of the distribution is one, on a
log scale this evaluates to approximately ``exp(1)``.
>>> from scipy.stats import gstd
>>> np.random.seed(123)
>>> sample = np.random.lognormal(mean=0, sigma=1, size=1000)
>>> gstd(sample)
2.7217860664589946
Compute the geometric standard deviation of a multidimensional array and
of a given axis.
>>> a = np.arange(1, 25).reshape(2, 3, 4)
>>> gstd(a, axis=None)
2.2944076136018947
>>> gstd(a, axis=2)
array([[1.82424757, 1.22436866, 1.13183117],
[1.09348306, 1.07244798, 1.05914985]])
>>> gstd(a, axis=(1,2))
array([2.12939215, 1.22120169])
The geometric standard deviation further handles masked arrays.
>>> a = np.arange(1, 25).reshape(2, 3, 4)
>>> ma = np.ma.masked_where(a > 16, a)
>>> ma
masked_array(
data=[[[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]],
[[13, 14, 15, 16],
[--, --, --, --],
[--, --, --, --]]],
mask=[[[False, False, False, False],
[False, False, False, False],
[False, False, False, False]],
[[False, False, False, False],
[ True, True, True, True],
[ True, True, True, True]]],
fill_value=999999)
>>> gstd(ma, axis=2)
masked_array(
data=[[1.8242475707663655, 1.2243686572447428, 1.1318311657788478],
[1.0934830582350938, --, --]],
mask=[[False, False, False],
[False, True, True]],
fill_value=999999)
"""
a = np.asanyarray(a)
log = ma.log if isinstance(a, ma.MaskedArray) else np.log
try:
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
return np.exp(np.std(log(a), axis=axis, ddof=ddof))
except RuntimeWarning as w:
if np.isinf(a).any():
raise ValueError(
'Infinite value encountered. The geometric standard deviation '
'is defined for strictly positive values only.')
a_nan = np.isnan(a)
a_nan_any = a_nan.any()
# exclude NaN's from negativity check, but
# avoid expensive masking for arrays with no NaN
if ((a_nan_any and np.less_equal(np.nanmin(a), 0)) or
(not a_nan_any and np.less_equal(a, 0).any())):
raise ValueError(
'Non positive value encountered. The geometric standard '
'deviation is defined for strictly positive values only.')
elif 'Degrees of freedom <= 0 for slice' == str(w):
raise ValueError(w)
else:
# Remaining warnings don't need to be exceptions.
return np.exp(np.std(log(a, where=~a_nan), axis=axis, ddof=ddof))
except TypeError:
raise ValueError(
'Invalid array input. The inputs could not be '
'safely coerced to any supported types')
# Private dictionary initialized only once at module level
# See https://en.wikipedia.org/wiki/Robust_measures_of_scale
_scale_conversions = {'raw': 1.0,
'normal': special.erfinv(0.5) * 2.0 * math.sqrt(2.0)}
def iqr(x, axis=None, rng=(25, 75), scale='raw', nan_policy='propagate',
interpolation='linear', keepdims=False):
r"""
Compute the interquartile range of the data along the specified axis.
The interquartile range (IQR) is the difference between the 75th and
25th percentile of the data. It is a measure of the dispersion
similar to standard deviation or variance, but is much more robust
against outliers [2]_.
The ``rng`` parameter allows this function to compute other
percentile ranges than the actual IQR. For example, setting
``rng=(0, 100)`` is equivalent to `numpy.ptp`.
The IQR of an empty array is `np.nan`.
.. versionadded:: 0.18.0
Parameters
----------
x : array_like
Input array or object that can be converted to an array.
axis : int or sequence of int, optional
Axis along which the range is computed. The default is to
compute the IQR for the entire array.
rng : Two-element sequence containing floats in range of [0,100] optional
Percentiles over which to compute the range. Each must be
between 0 and 100, inclusive. The default is the true IQR:
`(25, 75)`. The order of the elements is not important.
scale : scalar or str, optional
The numerical value of scale will be divided out of the final
result. The following string values are recognized:
'raw' : No scaling, just return the raw IQR.
'normal' : Scale by :math:`2 \sqrt{2} erf^{-1}(\frac{1}{2}) \approx 1.349`.
The default is 'raw'. Array-like scale is also allowed, as long
as it broadcasts correctly to the output such that
``out / scale`` is a valid operation. The output dimensions
depend on the input array, `x`, the `axis` argument, and the
`keepdims` flag.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}, optional
Specifies the interpolation method to use when the percentile
boundaries lie between two data points `i` and `j`.
The following options are available (default is 'linear'):
* 'linear': `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* 'lower': `i`.
* 'higher': `j`.
* 'nearest': `i` or `j` whichever is nearest.
* 'midpoint': `(i + j) / 2`.
keepdims : bool, optional
If this is set to `True`, the reduced axes are left in the
result as dimensions with size one. With this option, the result
will broadcast correctly against the original array `x`.
Returns
-------
iqr : scalar or ndarray
If ``axis=None``, a scalar is returned. If the input contains
integers or floats of smaller precision than ``np.float64``, then the
output data-type is ``np.float64``. Otherwise, the output data-type is
the same as that of the input.
See Also
--------
numpy.std, numpy.var
Notes
-----
This function is heavily dependent on the version of `numpy` that is
installed. Versions greater than 1.11.0b3 are highly recommended, as they
include a number of enhancements and fixes to `numpy.percentile` and
`numpy.nanpercentile` that affect the operation of this function. The
following modifications apply:
Below 1.10.0 : `nan_policy` is poorly defined.
The default behavior of `numpy.percentile` is used for 'propagate'. This
is a hybrid of 'omit' and 'propagate' that mostly yields a skewed
version of 'omit' since NaNs are sorted to the end of the data. A
warning is raised if there are NaNs in the data.
Below 1.9.0: `numpy.nanpercentile` does not exist.
This means that `numpy.percentile` is used regardless of `nan_policy`
and a warning is issued. See previous item for a description of the
behavior.
Below 1.9.0: `keepdims` and `interpolation` are not supported.
The keywords get ignored with a warning if supplied with non-default
values. However, multiple axes are still supported.
References
----------
.. [1] "Interquartile range" https://en.wikipedia.org/wiki/Interquartile_range
.. [2] "Robust measures of scale" https://en.wikipedia.org/wiki/Robust_measures_of_scale
.. [3] "Quantile" https://en.wikipedia.org/wiki/Quantile
Examples
--------
>>> from scipy.stats import iqr
>>> x = np.array([[10, 7, 4], [3, 2, 1]])
>>> x
array([[10, 7, 4],
[ 3, 2, 1]])
>>> iqr(x)
4.0
>>> iqr(x, axis=0)
array([ 3.5, 2.5, 1.5])
>>> iqr(x, axis=1)
array([ 3., 1.])
>>> iqr(x, axis=1, keepdims=True)
array([[ 3.],
[ 1.]])
"""
x = asarray(x)
# This check prevents percentile from raising an error later. Also, it is
# consistent with `np.var` and `np.std`.
if not x.size:
return np.nan
# An error may be raised here, so fail-fast, before doing lengthy
# computations, even though `scale` is not used until later
if isinstance(scale, str):
scale_key = scale.lower()
if scale_key not in _scale_conversions:
raise ValueError("{0} not a valid scale for `iqr`".format(scale))
scale = _scale_conversions[scale_key]
# Select the percentile function to use based on nans and policy
contains_nan, nan_policy = _contains_nan(x, nan_policy)
if contains_nan and nan_policy == 'omit':
percentile_func = np.nanpercentile
else:
percentile_func = np.percentile
if len(rng) != 2:
raise TypeError("quantile range must be two element sequence")
if np.isnan(rng).any():
raise ValueError("range must not contain NaNs")
rng = sorted(rng)
pct = percentile_func(x, rng, axis=axis, interpolation=interpolation,
keepdims=keepdims)
out = np.subtract(pct[1], pct[0])
if scale != 1.0:
out /= scale
return out
def median_absolute_deviation(x, axis=0, center=np.median, scale=1.4826,
nan_policy='propagate'):
"""
Compute the median absolute deviation of the data along the given axis.
The median absolute deviation (MAD, [1]_) computes the median over the
absolute deviations from the median. It is a measure of dispersion
similar to the standard deviation but more robust to outliers [2]_.
The MAD of an empty array is ``np.nan``.
.. versionadded:: 1.3.0
Parameters
----------
x : array_like
Input array or object that can be converted to an array.
axis : int or None, optional
Axis along which the range is computed. Default is 0. If None, compute
the MAD over the entire array.
center : callable, optional
A function that will return the central value. The default is to use
np.median. Any user defined function used will need to have the function
signature ``func(arr, axis)``.
scale : int, optional
The scaling factor applied to the MAD. The default scale (1.4826)
ensures consistency with the standard deviation for normally distributed
data.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
mad : scalar or ndarray
If ``axis=None``, a scalar is returned. If the input contains
integers or floats of smaller precision than ``np.float64``, then the
output data-type is ``np.float64``. Otherwise, the output data-type is
the same as that of the input.
See Also
--------
numpy.std, numpy.var, numpy.median, scipy.stats.iqr, scipy.stats.tmean,
scipy.stats.tstd, scipy.stats.tvar
Notes
-----
The `center` argument only affects the calculation of the central value
around which the MAD is calculated. That is, passing in ``center=np.mean``
will calculate the MAD around the mean - it will not calculate the *mean*
absolute deviation.
References
----------
.. [1] "Median absolute deviation" https://en.wikipedia.org/wiki/Median_absolute_deviation
.. [2] "Robust measures of scale" https://en.wikipedia.org/wiki/Robust_measures_of_scale
Examples
--------
When comparing the behavior of `median_absolute_deviation` with ``np.std``,
the latter is affected when we change a single value of an array to have an
outlier value while the MAD hardly changes:
>>> from scipy import stats
>>> x = stats.norm.rvs(size=100, scale=1, random_state=123456)
>>> x.std()
0.9973906394005013
>>> stats.median_absolute_deviation(x)
1.2280762773108278
>>> x[0] = 345.6
>>> x.std()
34.42304872314415
>>> stats.median_absolute_deviation(x)
1.2340335571164334
Axis handling example:
>>> x = np.array([[10, 7, 4], [3, 2, 1]])
>>> x
array([[10, 7, 4],
[ 3, 2, 1]])
>>> stats.median_absolute_deviation(x)
array([5.1891, 3.7065, 2.2239])
>>> stats.median_absolute_deviation(x, axis=None)
2.9652
"""
x = asarray(x)
# Consistent with `np.var` and `np.std`.
if not x.size:
nan_shape = [item for i, item in enumerate(x.shape) if i != axis]
nan_array = np.full(nan_shape, np.nan)
if not nan_array.size:
return np.nan
else:
return nan_array
contains_nan, nan_policy = _contains_nan(x, nan_policy)
if contains_nan and nan_policy == 'propagate':
return np.nan
if contains_nan and nan_policy == 'omit':
# Way faster than carrying the masks around
arr = ma.masked_invalid(x).compressed()
else:
arr = x
if axis is None:
med = center(arr)
mad = np.median(np.abs(arr - med))
else:
med = np.apply_over_axes(center, arr, axis)
mad = np.median(np.abs(arr - med), axis=axis)
return scale * mad
#####################################
# TRIMMING FUNCTIONS #
#####################################
SigmaclipResult = namedtuple('SigmaclipResult', ('clipped', 'lower', 'upper'))
def sigmaclip(a, low=4., high=4.):
"""
Perform iterative sigma-clipping of array elements.
Starting from the full sample, all elements outside the critical range are
removed, i.e. all elements of the input array `c` that satisfy either of
the following conditions::
c < mean(c) - std(c)*low
c > mean(c) + std(c)*high
The iteration continues with the updated sample until no
elements are outside the (updated) range.
Parameters
----------
a : array_like
Data array, will be raveled if not 1-D.
low : float, optional
Lower bound factor of sigma clipping. Default is 4.
high : float, optional
Upper bound factor of sigma clipping. Default is 4.
Returns
-------
clipped : ndarray
Input array with clipped elements removed.
lower : float
Lower threshold value use for clipping.
upper : float
Upper threshold value use for clipping.
Examples
--------
>>> from scipy.stats import sigmaclip
>>> a = np.concatenate((np.linspace(9.5, 10.5, 31),
... np.linspace(0, 20, 5)))
>>> fact = 1.5
>>> c, low, upp = sigmaclip(a, fact, fact)
>>> c
array([ 9.96666667, 10. , 10.03333333, 10. ])
>>> c.var(), c.std()
(0.00055555555555555165, 0.023570226039551501)
>>> low, c.mean() - fact*c.std(), c.min()
(9.9646446609406727, 9.9646446609406727, 9.9666666666666668)
>>> upp, c.mean() + fact*c.std(), c.max()
(10.035355339059327, 10.035355339059327, 10.033333333333333)
>>> a = np.concatenate((np.linspace(9.5, 10.5, 11),
... np.linspace(-100, -50, 3)))
>>> c, low, upp = sigmaclip(a, 1.8, 1.8)
>>> (c == np.linspace(9.5, 10.5, 11)).all()
True
"""
c = np.asarray(a).ravel()
delta = 1
while delta:
c_std = c.std()
c_mean = c.mean()
size = c.size
critlower = c_mean - c_std * low
critupper = c_mean + c_std * high
c = c[(c >= critlower) & (c <= critupper)]
delta = size - c.size
return SigmaclipResult(c, critlower, critupper)
def trimboth(a, proportiontocut, axis=0):
"""
Slice off a proportion of items from both ends of an array.
Slice off the passed proportion of items from both ends of the passed
array (i.e., with `proportiontocut` = 0.1, slices leftmost 10% **and**
rightmost 10% of scores). The trimmed values are the lowest and
highest ones.
Slice off less if proportion results in a non-integer slice index (i.e.
conservatively slices off `proportiontocut`).
Parameters
----------
a : array_like
Data to trim.
proportiontocut : float
Proportion (in range 0-1) of total data set to trim of each end.
axis : int or None, optional
Axis along which to trim data. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
out : ndarray
Trimmed version of array `a`. The order of the trimmed content
is undefined.
See Also
--------
trim_mean
Examples
--------
>>> from scipy import stats
>>> a = np.arange(20)
>>> b = stats.trimboth(a, 0.1)
>>> b.shape
(16,)
"""
a = np.asarray(a)
if a.size == 0:
return a
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
lowercut = int(proportiontocut * nobs)
uppercut = nobs - lowercut
if (lowercut >= uppercut):
raise ValueError("Proportion too big.")
atmp = np.partition(a, (lowercut, uppercut - 1), axis)
sl = [slice(None)] * atmp.ndim
sl[axis] = slice(lowercut, uppercut)
return atmp[tuple(sl)]
def trim1(a, proportiontocut, tail='right', axis=0):
"""
Slice off a proportion from ONE end of the passed array distribution.
If `proportiontocut` = 0.1, slices off 'leftmost' or 'rightmost'
10% of scores. The lowest or highest values are trimmed (depending on
the tail).
Slice off less if proportion results in a non-integer slice index
(i.e. conservatively slices off `proportiontocut` ).
Parameters
----------
a : array_like
Input array.
proportiontocut : float
Fraction to cut off of 'left' or 'right' of distribution.
tail : {'left', 'right'}, optional
Defaults to 'right'.
axis : int or None, optional
Axis along which to trim data. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
trim1 : ndarray
Trimmed version of array `a`. The order of the trimmed content is
undefined.
"""
a = np.asarray(a)
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
# avoid possible corner case
if proportiontocut >= 1:
return []
if tail.lower() == 'right':
lowercut = 0
uppercut = nobs - int(proportiontocut * nobs)
elif tail.lower() == 'left':
lowercut = int(proportiontocut * nobs)
uppercut = nobs
atmp = np.partition(a, (lowercut, uppercut - 1), axis)
return atmp[lowercut:uppercut]
def trim_mean(a, proportiontocut, axis=0):
"""
Return mean of array after trimming distribution from both tails.
If `proportiontocut` = 0.1, slices off 'leftmost' and 'rightmost' 10% of
scores. The input is sorted before slicing. Slices off less if proportion
results in a non-integer slice index (i.e., conservatively slices off
`proportiontocut` ).
Parameters
----------
a : array_like
Input array.
proportiontocut : float
Fraction to cut off of both tails of the distribution.
axis : int or None, optional
Axis along which the trimmed means are computed. Default is 0.
If None, compute over the whole array `a`.
Returns
-------
trim_mean : ndarray
Mean of trimmed array.
See Also
--------
trimboth
tmean : Compute the trimmed mean ignoring values outside given `limits`.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.trim_mean(x, 0.1)
9.5
>>> x2 = x.reshape(5, 4)
>>> x2
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15],
[16, 17, 18, 19]])
>>> stats.trim_mean(x2, 0.25)
array([ 8., 9., 10., 11.])
>>> stats.trim_mean(x2, 0.25, axis=1)
array([ 1.5, 5.5, 9.5, 13.5, 17.5])
"""
a = np.asarray(a)
if a.size == 0:
return np.nan
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
lowercut = int(proportiontocut * nobs)
uppercut = nobs - lowercut
if (lowercut > uppercut):
raise ValueError("Proportion too big.")
atmp = np.partition(a, (lowercut, uppercut - 1), axis)
sl = [slice(None)] * atmp.ndim
sl[axis] = slice(lowercut, uppercut)
return np.mean(atmp[tuple(sl)], axis=axis)
F_onewayResult = namedtuple('F_onewayResult', ('statistic', 'pvalue'))
class F_onewayConstantInputWarning(RuntimeWarning):
"""Warning generated by `f_oneway` when an input is constant, e.g.
each of the samples provided is a constant array"""
def __init__(self, msg=None):
if msg is None:
msg = ("Each of the input arrays is constant;"
"the F-value is not defined or infinite")
self.args = (msg,)
def f_oneway(*args):
"""
Perform one-way ANOVA.
The one-way ANOVA tests the null hypothesis that two or more groups have
the same population mean. The test is applied to samples from two or
more groups, possibly with differing sizes.
Parameters
----------
sample1, sample2, ... : array_like
The sample measurements for each group.
Returns
-------
statistic : float
The computed F-value of the test.
pvalue : float
The associated p-value from the F-distribution.
Warns
-----
F_onewayConstantInputWarning
Raised if each of the input arrays is constant array.
In this case F-value is either infinite or isn't defined, so
``np.inf`` or ``np.nan`` is returned for F-value
Notes
-----
The ANOVA test has important assumptions that must be satisfied in order
for the associated p-value to be valid.
1. The samples are independent.
2. Each sample is from a normally distributed population.
3. The population standard deviations of the groups are all equal. This
property is known as homoscedasticity.
If these assumptions are not true for a given set of data, it may still be
possible to use the Kruskal-Wallis H-test (`scipy.stats.kruskal`) although
with some loss of power.
If each group is made of constant values, and
- There exist at least two groups with different values
the function returns (``np.inf``, 0)
- All values in all groups are the same, function returns (``np.nan``, ``np.nan``)
The algorithm is from Heiman[2], pp.394-7.
References
----------
.. [1] R. Lowry, "Concepts and Applications of Inferential Statistics",
Chapter 14, 2014, http://vassarstats.net/textbook/
.. [2] G.W. Heiman, "Understanding research methods and statistics: An
integrated introduction for psychology", Houghton, Mifflin and
Company, 2001.
.. [3] G.H. McDonald, "Handbook of Biological Statistics", One-way ANOVA.
http://www.biostathandbook.com/onewayanova.html
Examples
--------
>>> import scipy.stats as stats
[3]_ Here are some data on a shell measurement (the length of the anterior
adductor muscle scar, standardized by dividing by length) in the mussel
Mytilus trossulus from five locations: Tillamook, Oregon; Newport, Oregon;
Petersburg, Alaska; Magadan, Russia; and Tvarminne, Finland, taken from a
much larger data set used in McDonald et al. (1991).
>>> tillamook = [0.0571, 0.0813, 0.0831, 0.0976, 0.0817, 0.0859, 0.0735,
... 0.0659, 0.0923, 0.0836]
>>> newport = [0.0873, 0.0662, 0.0672, 0.0819, 0.0749, 0.0649, 0.0835,
... 0.0725]
>>> petersburg = [0.0974, 0.1352, 0.0817, 0.1016, 0.0968, 0.1064, 0.105]
>>> magadan = [0.1033, 0.0915, 0.0781, 0.0685, 0.0677, 0.0697, 0.0764,
... 0.0689]
>>> tvarminne = [0.0703, 0.1026, 0.0956, 0.0973, 0.1039, 0.1045]
>>> stats.f_oneway(tillamook, newport, petersburg, magadan, tvarminne)
(7.1210194716424473, 0.00028122423145345439)
"""
args = [np.asarray(arg, dtype=float) for arg in args]
# ANOVA on N groups, each in its own array
num_groups = len(args)
alldata = np.concatenate(args)
bign = len(alldata)
# Check if the values within each group are constant
# And the common value in at least one group is different
# from that in another group - special cases
# Based on https://github.com/scipy/scipy/issues/11669
const_groups = True
for group in args:
if not all(x == group[0] for x in group):
const_groups = False
break
if const_groups:
warnings.warn(F_onewayConstantInputWarning())
if len(set(group[0] for group in args)) > 1:
return F_onewayResult(np.inf, 0)
else:
return F_onewayResult(np.nan, np.nan)
# Determine the mean of the data, and subtract that from all inputs to a
# variance (via sum_of_sq / sq_of_sum) calculation. Variance is invariance
# to a shift in location, and centering all data around zero vastly
# improves numerical stability.
offset = alldata.mean()
alldata -= offset
normalized_ss = _square_of_sums(alldata) / bign
sstot = _sum_of_squares(alldata) - normalized_ss
ssbn = 0
for a in args:
ssbn += _square_of_sums(a - offset) / len(a)
# Naming: variables ending in bn/b are for "between treatments", wn/w are
# for "within treatments"
ssbn -= normalized_ss
sswn = sstot - ssbn
dfbn = num_groups - 1
dfwn = bign - num_groups
msb = ssbn / dfbn
msw = sswn / dfwn
f = msb / msw
prob = special.fdtrc(dfbn, dfwn, f) # equivalent to stats.f.sf
return F_onewayResult(f, prob)
class PearsonRConstantInputWarning(RuntimeWarning):
"""Warning generated by `pearsonr` when an input is constant."""
def __init__(self, msg=None):
if msg is None:
msg = ("An input array is constant; the correlation coefficent "
"is not defined.")
self.args = (msg,)
class PearsonRNearConstantInputWarning(RuntimeWarning):
"""Warning generated by `pearsonr` when an input is nearly constant."""
def __init__(self, msg=None):
if msg is None:
msg = ("An input array is nearly constant; the computed "
"correlation coefficent may be inaccurate.")
self.args = (msg,)
def pearsonr(x, y):
r"""
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient [1]_ measures the linear relationship
between two datasets. The calculation of the p-value relies on the
assumption that each dataset is normally distributed. (See Kowalski [3]_
for a discussion of the effects of non-normality of the input on the
distribution of the correlation coefficient.) Like other correlation
coefficients, this one varies between -1 and +1 with 0 implying no
correlation. Correlations of -1 or +1 imply an exact linear relationship.
Positive correlations imply that as x increases, so does y. Negative
correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Pearson correlation at least as extreme
as the one computed from these datasets.
Parameters
----------
x : (N,) array_like
Input array.
y : (N,) array_like
Input array.
Returns
-------
r : float
Pearson's correlation coefficient.
p-value : float
Two-tailed p-value.
Warns
-----
PearsonRConstantInputWarning
Raised if an input is a constant array. The correlation coefficient
is not defined in this case, so ``np.nan`` is returned.
PearsonRNearConstantInputWarning
Raised if an input is "nearly" constant. The array ``x`` is considered
nearly constant if ``norm(x - mean(x)) < 1e-13 * abs(mean(x))``.
Numerical errors in the calculation ``x - mean(x)`` in this case might
result in an inaccurate calculation of r.
See Also
--------
spearmanr : Spearman rank-order correlation coefficient.
kendalltau : Kendall's tau, a correlation measure for ordinal data.
Notes
-----
The correlation coefficient is calculated as follows:
.. math::
r = \frac{\sum (x - m_x) (y - m_y)}
{\sqrt{\sum (x - m_x)^2 \sum (y - m_y)^2}}
where :math:`m_x` is the mean of the vector :math:`x` and :math:`m_y` is
the mean of the vector :math:`y`.
Under the assumption that x and y are drawn from independent normal
distributions (so the population correlation coefficient is 0), the
probability density function of the sample correlation coefficient r
is ([1]_, [2]_)::
(1 - r**2)**(n/2 - 2)
f(r) = ---------------------
B(1/2, n/2 - 1)
where n is the number of samples, and B is the beta function. This
is sometimes referred to as the exact distribution of r. This is
the distribution that is used in `pearsonr` to compute the p-value.
The distribution is a beta distribution on the interval [-1, 1],
with equal shape parameters a = b = n/2 - 1. In terms of SciPy's
implementation of the beta distribution, the distribution of r is::
dist = scipy.stats.beta(n/2 - 1, n/2 - 1, loc=-1, scale=2)
The p-value returned by `pearsonr` is a two-sided p-value. For a
given sample with correlation coefficient r, the p-value is
the probability that abs(r') of a random sample x' and y' drawn from
the population with zero correlation would be greater than or equal
to abs(r). In terms of the object ``dist`` shown above, the p-value
for a given r and length n can be computed as::
p = 2*dist.cdf(-abs(r))
When n is 2, the above continuous distribution is not well-defined.
One can interpret the limit of the beta distribution as the shape
parameters a and b approach a = b = 0 as a discrete distribution with
equal probability masses at r = 1 and r = -1. More directly, one
can observe that, given the data x = [x1, x2] and y = [y1, y2], and
assuming x1 != x2 and y1 != y2, the only possible values for r are 1
and -1. Because abs(r') for any sample x' and y' with length 2 will
be 1, the two-sided p-value for a sample of length 2 is always 1.
References
----------
.. [1] "Pearson correlation coefficient", Wikipedia,
https://en.wikipedia.org/wiki/Pearson_correlation_coefficient
.. [2] Student, "Probable error of a correlation coefficient",
Biometrika, Volume 6, Issue 2-3, 1 September 1908, pp. 302-310.
.. [3] C. J. Kowalski, "On the Effects of Non-Normality on the Distribution
of the Sample Product-Moment Correlation Coefficient"
Journal of the Royal Statistical Society. Series C (Applied
Statistics), Vol. 21, No. 1 (1972), pp. 1-12.
Examples
--------
>>> from scipy import stats
>>> a = np.array([0, 0, 0, 1, 1, 1, 1])
>>> b = np.arange(7)
>>> stats.pearsonr(a, b)
(0.8660254037844386, 0.011724811003954649)
>>> stats.pearsonr([1, 2, 3, 4, 5], [10, 9, 2.5, 6, 4])
(-0.7426106572325057, 0.1505558088534455)
"""
n = len(x)
if n != len(y):
raise ValueError('x and y must have the same length.')
if n < 2:
raise ValueError('x and y must have length at least 2.')
x = np.asarray(x)
y = np.asarray(y)
# If an input is constant, the correlation coefficient is not defined.
if (x == x[0]).all() or (y == y[0]).all():
warnings.warn(PearsonRConstantInputWarning())
return np.nan, np.nan
# dtype is the data type for the calculations. This expression ensures
# that the data type is at least 64 bit floating point. It might have
# more precision if the input is, for example, np.longdouble.
dtype = type(1.0 + x[0] + y[0])
if n == 2:
return dtype(np.sign(x[1] - x[0])*np.sign(y[1] - y[0])), 1.0
xmean = x.mean(dtype=dtype)
ymean = y.mean(dtype=dtype)
# By using `astype(dtype)`, we ensure that the intermediate calculations
# use at least 64 bit floating point.
xm = x.astype(dtype) - xmean
ym = y.astype(dtype) - ymean
# Unlike np.linalg.norm or the expression sqrt((xm*xm).sum()),
# scipy.linalg.norm(xm) does not overflow if xm is, for example,
# [-5e210, 5e210, 3e200, -3e200]
normxm = linalg.norm(xm)
normym = linalg.norm(ym)
threshold = 1e-13
if normxm < threshold*abs(xmean) or normym < threshold*abs(ymean):
# If all the values in x (likewise y) are very close to the mean,
# the loss of precision that occurs in the subtraction xm = x - xmean
# might result in large errors in r.
warnings.warn(PearsonRNearConstantInputWarning())
r = np.dot(xm/normxm, ym/normym)
# Presumably, if abs(r) > 1, then it is only some small artifact of
# floating point arithmetic.
r = max(min(r, 1.0), -1.0)
# As explained in the docstring, the p-value can be computed as
# p = 2*dist.cdf(-abs(r))
# where dist is the beta distribution on [-1, 1] with shape parameters
# a = b = n/2 - 1. `special.btdtr` is the CDF for the beta distribution
# on [0, 1]. To use it, we make the transformation x = (r + 1)/2; the
# shape parameters do not change. Then -abs(r) used in `cdf(-abs(r))`
# becomes x = (-abs(r) + 1)/2 = 0.5*(1 - abs(r)). (r is cast to float64
# to avoid a TypeError raised by btdtr when r is higher precision.)
ab = n/2 - 1
prob = 2*special.btdtr(ab, ab, 0.5*(1 - abs(np.float64(r))))
return r, prob
def fisher_exact(table, alternative='two-sided'):
"""
Perform a Fisher exact test on a 2x2 contingency table.
Parameters
----------
table : array_like of ints
A 2x2 contingency table. Elements should be non-negative integers.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is 'two-sided'):
* 'two-sided'
* 'less': one-sided
* 'greater': one-sided
Returns
-------
oddsratio : float
This is prior odds ratio and not a posterior estimate.
p_value : float
P-value, the probability of obtaining a distribution at least as
extreme as the one that was actually observed, assuming that the
null hypothesis is true.
See Also
--------
chi2_contingency : Chi-square test of independence of variables in a
contingency table.
Notes
-----
The calculated odds ratio is different from the one R uses. This scipy
implementation returns the (more common) "unconditional Maximum
Likelihood Estimate", while R uses the "conditional Maximum Likelihood
Estimate".
For tables with large numbers, the (inexact) chi-square test implemented
in the function `chi2_contingency` can also be used.
Examples
--------
Say we spend a few days counting whales and sharks in the Atlantic and
Indian oceans. In the Atlantic ocean we find 8 whales and 1 shark, in the
Indian ocean 2 whales and 5 sharks. Then our contingency table is::
Atlantic Indian
whales 8 2
sharks 1 5
We use this table to find the p-value:
>>> import scipy.stats as stats
>>> oddsratio, pvalue = stats.fisher_exact([[8, 2], [1, 5]])
>>> pvalue
0.0349...
The probability that we would observe this or an even more imbalanced ratio
by chance is about 3.5%. A commonly used significance level is 5%--if we
adopt that, we can therefore conclude that our observed imbalance is
statistically significant; whales prefer the Atlantic while sharks prefer
the Indian ocean.
"""
hypergeom = distributions.hypergeom
c = np.asarray(table, dtype=np.int64) # int32 is not enough for the algorithm
if not c.shape == (2, 2):
raise ValueError("The input `table` must be of shape (2, 2).")
if np.any(c < 0):
raise ValueError("All values in `table` must be nonnegative.")
if 0 in c.sum(axis=0) or 0 in c.sum(axis=1):
# If both values in a row or column are zero, the p-value is 1 and
# the odds ratio is NaN.
return np.nan, 1.0
if c[1, 0] > 0 and c[0, 1] > 0:
oddsratio = c[0, 0] * c[1, 1] / (c[1, 0] * c[0, 1])
else:
oddsratio = np.inf
n1 = c[0, 0] + c[0, 1]
n2 = c[1, 0] + c[1, 1]
n = c[0, 0] + c[1, 0]
def binary_search(n, n1, n2, side):
"""Binary search for where to begin halves in two-sided test."""
if side == "upper":
minval = mode
maxval = n
else:
minval = 0
maxval = mode
guess = -1
while maxval - minval > 1:
if maxval == minval + 1 and guess == minval:
guess = maxval
else:
guess = (maxval + minval) // 2
pguess = hypergeom.pmf(guess, n1 + n2, n1, n)
if side == "upper":
ng = guess - 1
else:
ng = guess + 1
if pguess <= pexact < hypergeom.pmf(ng, n1 + n2, n1, n):
break
elif pguess < pexact:
maxval = guess
else:
minval = guess
if guess == -1:
guess = minval
if side == "upper":
while guess > 0 and hypergeom.pmf(guess, n1 + n2, n1, n) < pexact * epsilon:
guess -= 1
while hypergeom.pmf(guess, n1 + n2, n1, n) > pexact / epsilon:
guess += 1
else:
while hypergeom.pmf(guess, n1 + n2, n1, n) < pexact * epsilon:
guess += 1
while guess > 0 and hypergeom.pmf(guess, n1 + n2, n1, n) > pexact / epsilon:
guess -= 1
return guess
if alternative == 'less':
pvalue = hypergeom.cdf(c[0, 0], n1 + n2, n1, n)
elif alternative == 'greater':
# Same formula as the 'less' case, but with the second column.
pvalue = hypergeom.cdf(c[0, 1], n1 + n2, n1, c[0, 1] + c[1, 1])
elif alternative == 'two-sided':
mode = int((n + 1) * (n1 + 1) / (n1 + n2 + 2))
pexact = hypergeom.pmf(c[0, 0], n1 + n2, n1, n)
pmode = hypergeom.pmf(mode, n1 + n2, n1, n)
epsilon = 1 - 1e-4
if np.abs(pexact - pmode) / np.maximum(pexact, pmode) <= 1 - epsilon:
return oddsratio, 1.
elif c[0, 0] < mode:
plower = hypergeom.cdf(c[0, 0], n1 + n2, n1, n)
if hypergeom.pmf(n, n1 + n2, n1, n) > pexact / epsilon:
return oddsratio, plower
guess = binary_search(n, n1, n2, "upper")
pvalue = plower + hypergeom.sf(guess - 1, n1 + n2, n1, n)
else:
pupper = hypergeom.sf(c[0, 0] - 1, n1 + n2, n1, n)
if hypergeom.pmf(0, n1 + n2, n1, n) > pexact / epsilon:
return oddsratio, pupper
guess = binary_search(n, n1, n2, "lower")
pvalue = pupper + hypergeom.cdf(guess, n1 + n2, n1, n)
else:
msg = "`alternative` should be one of {'two-sided', 'less', 'greater'}"
raise ValueError(msg)
pvalue = min(pvalue, 1.0)
return oddsratio, pvalue
class SpearmanRConstantInputWarning(RuntimeWarning):
"""Warning generated by `spearmanr` when an input is constant."""
def __init__(self, msg=None):
if msg is None:
msg = ("An input array is constant; the correlation coefficent "
"is not defined.")
self.args = (msg,)
SpearmanrResult = namedtuple('SpearmanrResult', ('correlation', 'pvalue'))
def spearmanr(a, b=None, axis=0, nan_policy='propagate'):
"""
Calculate a Spearman correlation coefficient with associated p-value.
The Spearman rank-order correlation coefficient is a nonparametric measure
of the monotonicity of the relationship between two datasets. Unlike the
Pearson correlation, the Spearman correlation does not assume that both
datasets are normally distributed. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Correlations of -1 or +1 imply an exact monotonic relationship. Positive
correlations imply that as x increases, so does y. Negative correlations
imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
Parameters
----------
a, b : 1D or 2D array_like, b is optional
One or two 1-D or 2-D arrays containing multiple variables and
observations. When these are 1-D, each represents a vector of
observations of a single variable. For the behavior in the 2-D case,
see under ``axis``, below.
Both arrays need to have the same length in the ``axis`` dimension.
axis : int or None, optional
If axis=0 (default), then each column represents a variable, with
observations in the rows. If axis=1, the relationship is transposed:
each row represents a variable, while the columns contain observations.
If axis=None, then both arrays will be raveled.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
correlation : float or ndarray (2-D square)
Spearman correlation matrix or correlation coefficient (if only 2
variables are given as parameters. Correlation matrix is square with
length equal to total number of variables (columns or rows) in ``a``
and ``b`` combined.
pvalue : float
The two-sided p-value for a hypothesis test whose null hypothesis is
that two sets of data are uncorrelated, has same dimension as rho.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Section 14.7
Examples
--------
>>> from scipy import stats
>>> stats.spearmanr([1,2,3,4,5], [5,6,7,8,7])
(0.82078268166812329, 0.088587005313543798)
>>> np.random.seed(1234321)
>>> x2n = np.random.randn(100, 2)
>>> y2n = np.random.randn(100, 2)
>>> stats.spearmanr(x2n)
(0.059969996999699973, 0.55338590803773591)
>>> stats.spearmanr(x2n[:,0], x2n[:,1])
(0.059969996999699973, 0.55338590803773591)
>>> rho, pval = stats.spearmanr(x2n, y2n)
>>> rho
array([[ 1. , 0.05997 , 0.18569457, 0.06258626],
[ 0.05997 , 1. , 0.110003 , 0.02534653],
[ 0.18569457, 0.110003 , 1. , 0.03488749],
[ 0.06258626, 0.02534653, 0.03488749, 1. ]])
>>> pval
array([[ 0. , 0.55338591, 0.06435364, 0.53617935],
[ 0.55338591, 0. , 0.27592895, 0.80234077],
[ 0.06435364, 0.27592895, 0. , 0.73039992],
[ 0.53617935, 0.80234077, 0.73039992, 0. ]])
>>> rho, pval = stats.spearmanr(x2n.T, y2n.T, axis=1)
>>> rho
array([[ 1. , 0.05997 , 0.18569457, 0.06258626],
[ 0.05997 , 1. , 0.110003 , 0.02534653],
[ 0.18569457, 0.110003 , 1. , 0.03488749],
[ 0.06258626, 0.02534653, 0.03488749, 1. ]])
>>> stats.spearmanr(x2n, y2n, axis=None)
(0.10816770419260482, 0.1273562188027364)
>>> stats.spearmanr(x2n.ravel(), y2n.ravel())
(0.10816770419260482, 0.1273562188027364)
>>> xint = np.random.randint(10, size=(100, 2))
>>> stats.spearmanr(xint)
(0.052760927029710199, 0.60213045837062351)
"""
if axis is not None and axis > 1:
raise ValueError("spearmanr only handles 1-D or 2-D arrays, supplied axis argument {}, please use only values 0, 1 or None for axis".format(axis))
a, axisout = _chk_asarray(a, axis)
if a.ndim > 2:
raise ValueError("spearmanr only handles 1-D or 2-D arrays")
if b is None:
if a.ndim < 2:
raise ValueError("`spearmanr` needs at least 2 variables to compare")
else:
# Concatenate a and b, so that we now only have to handle the case
# of a 2-D `a`.
b, _ = _chk_asarray(b, axis)
if axisout == 0:
a = np.column_stack((a, b))
else:
a = np.row_stack((a, b))
n_vars = a.shape[1 - axisout]
n_obs = a.shape[axisout]
if n_obs <= 1:
# Handle empty arrays or single observations.
return SpearmanrResult(np.nan, np.nan)
if axisout == 0:
if (a[:, 0][0] == a[:, 0]).all() or (a[:, 1][0] == a[:, 1]).all():
# If an input is constant, the correlation coefficient is not defined.
warnings.warn(SpearmanRConstantInputWarning())
return SpearmanrResult(np.nan, np.nan)
else: # case when axisout == 1 b/c a is 2 dim only
if (a[0, :][0] == a[0, :]).all() or (a[1, :][0] == a[1, :]).all():
# If an input is constant, the correlation coefficient is not defined.
warnings.warn(SpearmanRConstantInputWarning())
return SpearmanrResult(np.nan, np.nan)
a_contains_nan, nan_policy = _contains_nan(a, nan_policy)
variable_has_nan = np.zeros(n_vars, dtype=bool)
if a_contains_nan:
if nan_policy == 'omit':
return mstats_basic.spearmanr(a, axis=axis, nan_policy=nan_policy)
elif nan_policy == 'propagate':
if a.ndim == 1 or n_vars <= 2:
return SpearmanrResult(np.nan, np.nan)
else:
# Keep track of variables with NaNs, set the outputs to NaN
# only for those variables
variable_has_nan = np.isnan(a).sum(axis=axisout)
a_ranked = np.apply_along_axis(rankdata, axisout, a)
rs = np.corrcoef(a_ranked, rowvar=axisout)
dof = n_obs - 2 # degrees of freedom
# rs can have elements equal to 1, so avoid zero division warnings
olderr = np.seterr(divide='ignore')
try:
# clip the small negative values possibly caused by rounding
# errors before taking the square root
t = rs * np.sqrt((dof/((rs+1.0)*(1.0-rs))).clip(0))
finally:
np.seterr(**olderr)
prob = 2 * distributions.t.sf(np.abs(t), dof)
# For backwards compatibility, return scalars when comparing 2 columns
if rs.shape == (2, 2):
return SpearmanrResult(rs[1, 0], prob[1, 0])
else:
rs[variable_has_nan, :] = np.nan
rs[:, variable_has_nan] = np.nan
return SpearmanrResult(rs, prob)
PointbiserialrResult = namedtuple('PointbiserialrResult',
('correlation', 'pvalue'))
def pointbiserialr(x, y):
r"""
Calculate a point biserial correlation coefficient and its p-value.
The point biserial correlation is used to measure the relationship
between a binary variable, x, and a continuous variable, y. Like other
correlation coefficients, this one varies between -1 and +1 with 0
implying no correlation. Correlations of -1 or +1 imply a determinative
relationship.
This function uses a shortcut formula but produces the same result as
`pearsonr`.
Parameters
----------
x : array_like of bools
Input array.
y : array_like
Input array.
Returns
-------
correlation : float
R value.
pvalue : float
Two-sided p-value.
Notes
-----
`pointbiserialr` uses a t-test with ``n-1`` degrees of freedom.
It is equivalent to `pearsonr.`
The value of the point-biserial correlation can be calculated from:
.. math::
r_{pb} = \frac{\overline{Y_{1}} -
\overline{Y_{0}}}{s_{y}}\sqrt{\frac{N_{1} N_{2}}{N (N - 1))}}
Where :math:`Y_{0}` and :math:`Y_{1}` are means of the metric
observations coded 0 and 1 respectively; :math:`N_{0}` and :math:`N_{1}`
are number of observations coded 0 and 1 respectively; :math:`N` is the
total number of observations and :math:`s_{y}` is the standard
deviation of all the metric observations.
A value of :math:`r_{pb}` that is significantly different from zero is
completely equivalent to a significant difference in means between the two
groups. Thus, an independent groups t Test with :math:`N-2` degrees of
freedom may be used to test whether :math:`r_{pb}` is nonzero. The
relation between the t-statistic for comparing two independent groups and
:math:`r_{pb}` is given by:
.. math::
t = \sqrt{N - 2}\frac{r_{pb}}{\sqrt{1 - r^{2}_{pb}}}
References
----------
.. [1] J. Lev, "The Point Biserial Coefficient of Correlation", Ann. Math.
Statist., Vol. 20, no.1, pp. 125-126, 1949.
.. [2] R.F. Tate, "Correlation Between a Discrete and a Continuous
Variable. Point-Biserial Correlation.", Ann. Math. Statist., Vol. 25,
np. 3, pp. 603-607, 1954.
.. [3] D. Kornbrot "Point Biserial Correlation", In Wiley StatsRef:
Statistics Reference Online (eds N. Balakrishnan, et al.), 2014.
https://doi.org/10.1002/9781118445112.stat06227
Examples
--------
>>> from scipy import stats
>>> a = np.array([0, 0, 0, 1, 1, 1, 1])
>>> b = np.arange(7)
>>> stats.pointbiserialr(a, b)
(0.8660254037844386, 0.011724811003954652)
>>> stats.pearsonr(a, b)
(0.86602540378443871, 0.011724811003954626)
>>> np.corrcoef(a, b)
array([[ 1. , 0.8660254],
[ 0.8660254, 1. ]])
"""
rpb, prob = pearsonr(x, y)
return PointbiserialrResult(rpb, prob)
KendalltauResult = namedtuple('KendalltauResult', ('correlation', 'pvalue'))
def kendalltau(x, y, initial_lexsort=None, nan_policy='propagate', method='auto'):
"""
Calculate Kendall's tau, a correlation measure for ordinal data.
Kendall's tau is a measure of the correspondence between two rankings.
Values close to 1 indicate strong agreement, values close to -1 indicate
strong disagreement. This is the 1945 "tau-b" version of Kendall's
tau [2]_, which can account for ties and which reduces to the 1938 "tau-a"
version [1]_ in absence of ties.
Parameters
----------
x, y : array_like
Arrays of rankings, of the same shape. If arrays are not 1-D, they will
be flattened to 1-D.
initial_lexsort : bool, optional
Unused (deprecated).
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
method : {'auto', 'asymptotic', 'exact'}, optional
Defines which method is used to calculate the p-value [5]_.
The following options are available (default is 'auto'):
* 'auto': selects the appropriate method based on a trade-off between
speed and accuracy
* 'asymptotic': uses a normal approximation valid for large samples
* 'exact': computes the exact p-value, but can only be used if no ties
are present
Returns
-------
correlation : float
The tau statistic.
pvalue : float
The two-sided p-value for a hypothesis test whose null hypothesis is
an absence of association, tau = 0.
See Also
--------
spearmanr : Calculates a Spearman rank-order correlation coefficient.
theilslopes : Computes the Theil-Sen estimator for a set of points (x, y).
weightedtau : Computes a weighted version of Kendall's tau.
Notes
-----
The definition of Kendall's tau that is used is [2]_::
tau = (P - Q) / sqrt((P + Q + T) * (P + Q + U))
where P is the number of concordant pairs, Q the number of discordant
pairs, T the number of ties only in `x`, and U the number of ties only in
`y`. If a tie occurs for the same pair in both `x` and `y`, it is not
added to either T or U.
References
----------
.. [1] Maurice G. Kendall, "A New Measure of Rank Correlation", Biometrika
Vol. 30, No. 1/2, pp. 81-93, 1938.
.. [2] Maurice G. Kendall, "The treatment of ties in ranking problems",
Biometrika Vol. 33, No. 3, pp. 239-251. 1945.
.. [3] Gottfried E. Noether, "Elements of Nonparametric Statistics", John
Wiley & Sons, 1967.
.. [4] Peter M. Fenwick, "A new data structure for cumulative frequency
tables", Software: Practice and Experience, Vol. 24, No. 3,
pp. 327-336, 1994.
.. [5] Maurice G. Kendall, "Rank Correlation Methods" (4th Edition),
Charles Griffin & Co., 1970.
Examples
--------
>>> from scipy import stats
>>> x1 = [12, 2, 1, 12, 2]
>>> x2 = [1, 4, 7, 1, 0]
>>> tau, p_value = stats.kendalltau(x1, x2)
>>> tau
-0.47140452079103173
>>> p_value
0.2827454599327748
"""
x = np.asarray(x).ravel()
y = np.asarray(y).ravel()
if x.size != y.size:
raise ValueError("All inputs to `kendalltau` must be of the same size, "
"found x-size %s and y-size %s" % (x.size, y.size))
elif not x.size or not y.size:
return KendalltauResult(np.nan, np.nan) # Return NaN if arrays are empty
# check both x and y
cnx, npx = _contains_nan(x, nan_policy)
cny, npy = _contains_nan(y, nan_policy)
contains_nan = cnx or cny
if npx == 'omit' or npy == 'omit':
nan_policy = 'omit'
if contains_nan and nan_policy == 'propagate':
return KendalltauResult(np.nan, np.nan)
elif contains_nan and nan_policy == 'omit':
x = ma.masked_invalid(x)
y = ma.masked_invalid(y)
return mstats_basic.kendalltau(x, y, method=method)
if initial_lexsort is not None: # deprecate to drop!
warnings.warn('"initial_lexsort" is gone!')
def count_rank_tie(ranks):
cnt = np.bincount(ranks).astype('int64', copy=False)
cnt = cnt[cnt > 1]
return ((cnt * (cnt - 1) // 2).sum(),
(cnt * (cnt - 1.) * (cnt - 2)).sum(),
(cnt * (cnt - 1.) * (2*cnt + 5)).sum())
size = x.size
perm = np.argsort(y) # sort on y and convert y to dense ranks
x, y = x[perm], y[perm]
y = np.r_[True, y[1:] != y[:-1]].cumsum(dtype=np.intp)
# stable sort on x and convert x to dense ranks
perm = np.argsort(x, kind='mergesort')
x, y = x[perm], y[perm]
x = np.r_[True, x[1:] != x[:-1]].cumsum(dtype=np.intp)
dis = _kendall_dis(x, y) # discordant pairs
obs = np.r_[True, (x[1:] != x[:-1]) | (y[1:] != y[:-1]), True]
cnt = np.diff(np.nonzero(obs)[0]).astype('int64', copy=False)
ntie = (cnt * (cnt - 1) // 2).sum() # joint ties
xtie, x0, x1 = count_rank_tie(x) # ties in x, stats
ytie, y0, y1 = count_rank_tie(y) # ties in y, stats
tot = (size * (size - 1)) // 2
if xtie == tot or ytie == tot:
return KendalltauResult(np.nan, np.nan)
# Note that tot = con + dis + (xtie - ntie) + (ytie - ntie) + ntie
# = con + dis + xtie + ytie - ntie
con_minus_dis = tot - xtie - ytie + ntie - 2 * dis
tau = con_minus_dis / np.sqrt(tot - xtie) / np.sqrt(tot - ytie)
# Limit range to fix computational errors
tau = min(1., max(-1., tau))
if method == 'exact' and (xtie != 0 or ytie != 0):
raise ValueError("Ties found, exact method cannot be used.")
if method == 'auto':
if (xtie == 0 and ytie == 0) and (size <= 33 or min(dis, tot-dis) <= 1):
method = 'exact'
else:
method = 'asymptotic'
if xtie == 0 and ytie == 0 and method == 'exact':
# Exact p-value, see p. 68 of Maurice G. Kendall, "Rank Correlation Methods" (4th Edition), Charles Griffin & Co., 1970.
c = min(dis, tot-dis)
if size <= 0:
raise ValueError
elif c < 0 or 2*c > size*(size-1):
raise ValueError
elif size == 1:
pvalue = 1.0
elif size == 2:
pvalue = 1.0
elif c == 0:
pvalue = 2.0/math.factorial(size) if size < 171 else 0.0
elif c == 1:
pvalue = 2.0/math.factorial(size-1) if (size-1) < 171 else 0.0
elif 2*c == tot:
pvalue = 1.0
else:
new = [0.0]*(c+1)
new[0] = 1.0
new[1] = 1.0
for j in range(3,size+1):
old = new[:]
for k in range(1,min(j,c+1)):
new[k] += new[k-1]
for k in range(j,c+1):
new[k] += new[k-1] - old[k-j]
pvalue = 2.0*sum(new)/math.factorial(size) if size < 171 else 0.0
elif method == 'asymptotic':
# con_minus_dis is approx normally distributed with this variance [3]_
var = (size * (size - 1) * (2.*size + 5) - x1 - y1) / 18. + (
2. * xtie * ytie) / (size * (size - 1)) + x0 * y0 / (9. *
size * (size - 1) * (size - 2))
pvalue = special.erfc(np.abs(con_minus_dis) / np.sqrt(var) / np.sqrt(2))
else:
raise ValueError("Unknown method "+str(method)+" specified, please use auto, exact or asymptotic.")
return KendalltauResult(tau, pvalue)
WeightedTauResult = namedtuple('WeightedTauResult', ('correlation', 'pvalue'))
def weightedtau(x, y, rank=True, weigher=None, additive=True):
r"""
Compute a weighted version of Kendall's :math:`\tau`.
The weighted :math:`\tau` is a weighted version of Kendall's
:math:`\tau` in which exchanges of high weight are more influential than
exchanges of low weight. The default parameters compute the additive
hyperbolic version of the index, :math:`\tau_\mathrm h`, which has
been shown to provide the best balance between important and
unimportant elements [1]_.
The weighting is defined by means of a rank array, which assigns a
nonnegative rank to each element, and a weigher function, which
assigns a weight based from the rank to each element. The weight of an
exchange is then the sum or the product of the weights of the ranks of
the exchanged elements. The default parameters compute
:math:`\tau_\mathrm h`: an exchange between elements with rank
:math:`r` and :math:`s` (starting from zero) has weight
:math:`1/(r+1) + 1/(s+1)`.
Specifying a rank array is meaningful only if you have in mind an
external criterion of importance. If, as it usually happens, you do
not have in mind a specific rank, the weighted :math:`\tau` is
defined by averaging the values obtained using the decreasing
lexicographical rank by (`x`, `y`) and by (`y`, `x`). This is the
behavior with default parameters.
Note that if you are computing the weighted :math:`\tau` on arrays of
ranks, rather than of scores (i.e., a larger value implies a lower
rank) you must negate the ranks, so that elements of higher rank are
associated with a larger value.
Parameters
----------
x, y : array_like
Arrays of scores, of the same shape. If arrays are not 1-D, they will
be flattened to 1-D.
rank : array_like of ints or bool, optional
A nonnegative rank assigned to each element. If it is None, the
decreasing lexicographical rank by (`x`, `y`) will be used: elements of
higher rank will be those with larger `x`-values, using `y`-values to
break ties (in particular, swapping `x` and `y` will give a different
result). If it is False, the element indices will be used
directly as ranks. The default is True, in which case this
function returns the average of the values obtained using the
decreasing lexicographical rank by (`x`, `y`) and by (`y`, `x`).
weigher : callable, optional
The weigher function. Must map nonnegative integers (zero
representing the most important element) to a nonnegative weight.
The default, None, provides hyperbolic weighing, that is,
rank :math:`r` is mapped to weight :math:`1/(r+1)`.
additive : bool, optional
If True, the weight of an exchange is computed by adding the
weights of the ranks of the exchanged elements; otherwise, the weights
are multiplied. The default is True.
Returns
-------
correlation : float
The weighted :math:`\tau` correlation index.
pvalue : float
Presently ``np.nan``, as the null statistics is unknown (even in the
additive hyperbolic case).
See Also
--------
kendalltau : Calculates Kendall's tau.
spearmanr : Calculates a Spearman rank-order correlation coefficient.
theilslopes : Computes the Theil-Sen estimator for a set of points (x, y).
Notes
-----
This function uses an :math:`O(n \log n)`, mergesort-based algorithm
[1]_ that is a weighted extension of Knight's algorithm for Kendall's
:math:`\tau` [2]_. It can compute Shieh's weighted :math:`\tau` [3]_
between rankings without ties (i.e., permutations) by setting
`additive` and `rank` to False, as the definition given in [1]_ is a
generalization of Shieh's.
NaNs are considered the smallest possible score.
.. versionadded:: 0.19.0
References
----------
.. [1] Sebastiano Vigna, "A weighted correlation index for rankings with
ties", Proceedings of the 24th international conference on World
Wide Web, pp. 1166-1176, ACM, 2015.
.. [2] W.R. Knight, "A Computer Method for Calculating Kendall's Tau with
Ungrouped Data", Journal of the American Statistical Association,
Vol. 61, No. 314, Part 1, pp. 436-439, 1966.
.. [3] Grace S. Shieh. "A weighted Kendall's tau statistic", Statistics &
Probability Letters, Vol. 39, No. 1, pp. 17-24, 1998.
Examples
--------
>>> from scipy import stats
>>> x = [12, 2, 1, 12, 2]
>>> y = [1, 4, 7, 1, 0]
>>> tau, p_value = stats.weightedtau(x, y)
>>> tau
-0.56694968153682723
>>> p_value
nan
>>> tau, p_value = stats.weightedtau(x, y, additive=False)
>>> tau
-0.62205716951801038
NaNs are considered the smallest possible score:
>>> x = [12, 2, 1, 12, 2]
>>> y = [1, 4, 7, 1, np.nan]
>>> tau, _ = stats.weightedtau(x, y)
>>> tau
-0.56694968153682723
This is exactly Kendall's tau:
>>> x = [12, 2, 1, 12, 2]
>>> y = [1, 4, 7, 1, 0]
>>> tau, _ = stats.weightedtau(x, y, weigher=lambda x: 1)
>>> tau
-0.47140452079103173
>>> x = [12, 2, 1, 12, 2]
>>> y = [1, 4, 7, 1, 0]
>>> stats.weightedtau(x, y, rank=None)
WeightedTauResult(correlation=-0.4157652301037516, pvalue=nan)
>>> stats.weightedtau(y, x, rank=None)
WeightedTauResult(correlation=-0.7181341329699028, pvalue=nan)
"""
x = np.asarray(x).ravel()
y = np.asarray(y).ravel()
if x.size != y.size:
raise ValueError("All inputs to `weightedtau` must be of the same size, "
"found x-size %s and y-size %s" % (x.size, y.size))
if not x.size:
return WeightedTauResult(np.nan, np.nan) # Return NaN if arrays are empty
# If there are NaNs we apply _toint64()
if np.isnan(np.sum(x)):
x = _toint64(x)
if np.isnan(np.sum(x)):
y = _toint64(y)
# Reduce to ranks unsupported types
if x.dtype != y.dtype:
if x.dtype != np.int64:
x = _toint64(x)
if y.dtype != np.int64:
y = _toint64(y)
else:
if x.dtype not in (np.int32, np.int64, np.float32, np.float64):
x = _toint64(x)
y = _toint64(y)
if rank is True:
return WeightedTauResult((
_weightedrankedtau(x, y, None, weigher, additive) +
_weightedrankedtau(y, x, None, weigher, additive)
) / 2, np.nan)
if rank is False:
rank = np.arange(x.size, dtype=np.intp)
elif rank is not None:
rank = np.asarray(rank).ravel()
if rank.size != x.size:
raise ValueError("All inputs to `weightedtau` must be of the same size, "
"found x-size %s and rank-size %s" % (x.size, rank.size))
return WeightedTauResult(_weightedrankedtau(x, y, rank, weigher, additive), np.nan)
# FROM MGCPY: https://github.com/neurodata/mgcpy
class _ParallelP(object):
"""
Helper function to calculate parallel p-value.
"""
def __init__(self, x, y, compute_distance, random_states):
self.x = x
self.y = y
self.compute_distance = compute_distance
self.random_states = random_states
def __call__(self, index):
permx = self.random_states[index].permutation(self.x)
permy = self.random_states[index].permutation(self.y)
# calculate permuted stats, store in null distribution
perm_stat = _mgc_stat(permx, permy, self.compute_distance)[0]
return perm_stat
def _perm_test(x, y, stat, compute_distance, reps=1000, workers=-1,
random_state=None):
r"""
Helper function that calculates the p-value. See below for uses.
Parameters
----------
x, y : ndarray
`x` and `y` have shapes `(n, p)` and `(n, q)`.
stat : float
The sample test statistic.
compute_distance : callable
A function that computes the distance or similarity among the samples
within each data matrix. Set to `None` if `x` and `y` are already
distance.
reps : int, optional
The number of replications used to estimate the null when using the
permutation test. The default is 1000 replications.
workers : int or map-like callable, optional
If `workers` is an int the population is subdivided into `workers`
sections and evaluated in parallel (uses
`multiprocessing.Pool <multiprocessing>`). Supply `-1` to use all cores
available to the Process. Alternatively supply a map-like callable,
such as `multiprocessing.Pool.map` for evaluating the population in
parallel. This evaluation is carried out as `workers(func, iterable)`.
Requires that `func` be pickleable.
random_state : int or np.random.RandomState instance, optional
If already a RandomState instance, use it.
If seed is an int, return a new RandomState instance seeded with seed.
If None, use np.random.RandomState. Default is None.
Returns
-------
pvalue : float
The sample test p-value.
null_dist : list
The approximated null distribution.
"""
# generate seeds for each rep (change to new parallel random number
# capabilities in numpy >= 1.17+)
random_state = check_random_state(random_state)
random_states = [np.random.RandomState(rng_integers(random_state, 1 << 32,
size=4, dtype=np.uint32)) for _ in range(reps)]
# parallelizes with specified workers over number of reps and set seeds
mapwrapper = MapWrapper(workers)
parallelp = _ParallelP(x=x, y=y, compute_distance=compute_distance,
random_states=random_states)
null_dist = np.array(list(mapwrapper(parallelp, range(reps))))
# calculate p-value and significant permutation map through list
pvalue = (null_dist >= stat).sum() / reps
# correct for a p-value of 0. This is because, with bootstrapping
# permutations, a p-value of 0 is incorrect
if pvalue == 0:
pvalue = 1 / reps
return pvalue, null_dist
def _euclidean_dist(x):
return cdist(x, x)
MGCResult = namedtuple('MGCResult', ('stat', 'pvalue', 'mgc_dict'))
def multiscale_graphcorr(x, y, compute_distance=_euclidean_dist, reps=1000,
workers=1, is_twosamp=False, random_state=None):
r"""
Computes the Multiscale Graph Correlation (MGC) test statistic.
Specifically, for each point, MGC finds the :math:`k`-nearest neighbors for
one property (e.g. cloud density), and the :math:`l`-nearest neighbors for
the other property (e.g. grass wetness) [1]_. This pair :math:`(k, l)` is
called the "scale". A priori, however, it is not know which scales will be
most informative. So, MGC computes all distance pairs, and then efficiently
computes the distance correlations for all scales. The local correlations
illustrate which scales are relatively informative about the relationship.
The key, therefore, to successfully discover and decipher relationships
between disparate data modalities is to adaptively determine which scales
are the most informative, and the geometric implication for the most
informative scales. Doing so not only provides an estimate of whether the
modalities are related, but also provides insight into how the
determination was made. This is especially important in high-dimensional
data, where simple visualizations do not reveal relationships to the
unaided human eye. Characterizations of this implementation in particular
have been derived from and benchmarked within in [2]_.
Parameters
----------
x, y : ndarray
If ``x`` and ``y`` have shapes ``(n, p)`` and ``(n, q)`` where `n` is
the number of samples and `p` and `q` are the number of dimensions,
then the MGC independence test will be run. Alternatively, ``x`` and
``y`` can have shapes ``(n, n)`` if they are distance or similarity
matrices, and ``compute_distance`` must be sent to ``None``. If ``x``
and ``y`` have shapes ``(n, p)`` and ``(m, p)``, an unpaired
two-sample MGC test will be run.
compute_distance : callable, optional
A function that computes the distance or similarity among the samples
within each data matrix. Set to ``None`` if ``x`` and ``y`` are
already distance matrices. The default uses the euclidean norm metric.
If you are calling a custom function, either create the distance
matrix before-hand or create a function of the form
``compute_distance(x)`` where `x` is the data matrix for which
pairwise distances are calculated.
reps : int, optional
The number of replications used to estimate the null when using the
permutation test. The default is ``1000``.
workers : int or map-like callable, optional
If ``workers`` is an int the population is subdivided into ``workers``
sections and evaluated in parallel (uses ``multiprocessing.Pool
<multiprocessing>``). Supply ``-1`` to use all cores available to the
Process. Alternatively supply a map-like callable, such as
``multiprocessing.Pool.map`` for evaluating the p-value in parallel.
This evaluation is carried out as ``workers(func, iterable)``.
Requires that `func` be pickleable. The default is ``1``.
is_twosamp : bool, optional
If `True`, a two sample test will be run. If ``x`` and ``y`` have
shapes ``(n, p)`` and ``(m, p)``, this optional will be overriden and
set to ``True``. Set to ``True`` if ``x`` and ``y`` both have shapes
``(n, p)`` and a two sample test is desired. The default is ``False``.
random_state : int or np.random.RandomState instance, optional
If already a RandomState instance, use it.
If seed is an int, return a new RandomState instance seeded with seed.
If None, use np.random.RandomState. Default is None.
Returns
-------
stat : float
The sample MGC test statistic within `[-1, 1]`.
pvalue : float
The p-value obtained via permutation.
mgc_dict : dict
Contains additional useful additional returns containing the following
keys:
- mgc_map : ndarray
A 2D representation of the latent geometry of the relationship.
of the relationship.
- opt_scale : (int, int)
The estimated optimal scale as a `(x, y)` pair.
- null_dist : list
The null distribution derived from the permuted matrices
See Also
--------
pearsonr : Pearson correlation coefficient and p-value for testing
non-correlation.
kendalltau : Calculates Kendall's tau.
spearmanr : Calculates a Spearman rank-order correlation coefficient.
Notes
-----
A description of the process of MGC and applications on neuroscience data
can be found in [1]_. It is performed using the following steps:
#. Two distance matrices :math:`D^X` and :math:`D^Y` are computed and
modified to be mean zero columnwise. This results in two
:math:`n \times n` distance matrices :math:`A` and :math:`B` (the
centering and unbiased modification) [3]_.
#. For all values :math:`k` and :math:`l` from :math:`1, ..., n`,
* The :math:`k`-nearest neighbor and :math:`l`-nearest neighbor graphs
are calculated for each property. Here, :math:`G_k (i, j)` indicates
the :math:`k`-smallest values of the :math:`i`-th row of :math:`A`
and :math:`H_l (i, j)` indicates the :math:`l` smallested values of
the :math:`i`-th row of :math:`B`
* Let :math:`\circ` denotes the entry-wise matrix product, then local
correlations are summed and normalized using the following statistic:
.. math::
c^{kl} = \frac{\sum_{ij} A G_k B H_l}
{\sqrt{\sum_{ij} A^2 G_k \times \sum_{ij} B^2 H_l}}
#. The MGC test statistic is the smoothed optimal local correlation of
:math:`\{ c^{kl} \}`. Denote the smoothing operation as :math:`R(\cdot)`
(which essentially set all isolated large correlations) as 0 and
connected large correlations the same as before, see [3]_.) MGC is,
.. math::
MGC_n (x, y) = \max_{(k, l)} R \left(c^{kl} \left( x_n, y_n \right)
\right)
The test statistic returns a value between :math:`(-1, 1)` since it is
normalized.
The p-value returned is calculated using a permutation test. This process
is completed by first randomly permuting :math:`y` to estimate the null
distribution and then calculating the probability of observing a test
statistic, under the null, at least as extreme as the observed test
statistic.
MGC requires at least 5 samples to run with reliable results. It can also
handle high-dimensional data sets.
In addition, by manipulating the input data matrices, the two-sample
testing problem can be reduced to the independence testing problem [4]_.
Given sample data :math:`U` and :math:`V` of sizes :math:`p \times n`
:math:`p \times m`, data matrix :math:`X` and :math:`Y` can be created as
follows:
.. math::
X = [U | V] \in \mathcal{R}^{p \times (n + m)}
Y = [0_{1 \times n} | 1_{1 \times m}] \in \mathcal{R}^{(n + m)}
Then, the MGC statistic can be calculated as normal. This methodology can
be extended to similar tests such as distance correlation [4]_.
.. versionadded:: 1.4.0
References
----------
.. [1] Vogelstein, J. T., Bridgeford, E. W., Wang, Q., Priebe, C. E.,
Maggioni, M., & Shen, C. (2019). Discovering and deciphering
relationships across disparate data modalities. ELife.
.. [2] Panda, S., Palaniappan, S., Xiong, J., Swaminathan, A.,
Ramachandran, S., Bridgeford, E. W., ... Vogelstein, J. T. (2019).
mgcpy: A Comprehensive High Dimensional Independence Testing Python
Package. ArXiv:1907.02088 [Cs, Stat].
.. [3] Shen, C., Priebe, C.E., & Vogelstein, J. T. (2019). From distance
correlation to multiscale graph correlation. Journal of the American
Statistical Association.
.. [4] Shen, C. & Vogelstein, J. T. (2018). The Exact Equivalence of
Distance and Kernel Methods for Hypothesis Testing. ArXiv:1806.05514
[Cs, Stat].
Examples
--------
>>> from scipy.stats import multiscale_graphcorr
>>> x = np.arange(100)
>>> y = x
>>> stat, pvalue, _ = multiscale_graphcorr(x, y, workers=-1)
>>> '%.1f, %.3f' % (stat, pvalue)
'1.0, 0.001'
Alternatively,
>>> x = np.arange(100)
>>> y = x
>>> mgc = multiscale_graphcorr(x, y)
>>> '%.1f, %.3f' % (mgc.stat, mgc.pvalue)
'1.0, 0.001'
To run an unpaired two-sample test,
>>> x = np.arange(100)
>>> y = np.arange(79)
>>> mgc = multiscale_graphcorr(x, y, random_state=1)
>>> '%.3f, %.2f' % (mgc.stat, mgc.pvalue)
'0.033, 0.02'
or, if shape of the inputs are the same,
>>> x = np.arange(100)
>>> y = x
>>> mgc = multiscale_graphcorr(x, y, is_twosamp=True)
>>> '%.3f, %.1f' % (mgc.stat, mgc.pvalue)
'-0.008, 1.0'
"""
if not isinstance(x, np.ndarray) or not isinstance(y, np.ndarray):
raise ValueError("x and y must be ndarrays")
# convert arrays of type (n,) to (n, 1)
if x.ndim == 1:
x = x[:, np.newaxis]
elif x.ndim != 2:
raise ValueError("Expected a 2-D array `x`, found shape "
"{}".format(x.shape))
if y.ndim == 1:
y = y[:, np.newaxis]
elif y.ndim != 2:
raise ValueError("Expected a 2-D array `y`, found shape "
"{}".format(y.shape))
nx, px = x.shape
ny, py = y.shape
# check for NaNs
_contains_nan(x, nan_policy='raise')
_contains_nan(y, nan_policy='raise')
# check for positive or negative infinity and raise error
if np.sum(np.isinf(x)) > 0 or np.sum(np.isinf(y)) > 0:
raise ValueError("Inputs contain infinities")
if nx != ny:
if px == py:
# reshape x and y for two sample testing
is_twosamp = True
else:
raise ValueError("Shape mismatch, x and y must have shape [n, p] "
"and [n, q] or have shape [n, p] and [m, p].")
if nx < 5 or ny < 5:
raise ValueError("MGC requires at least 5 samples to give reasonable "
"results.")
# convert x and y to float
x = x.astype(np.float64)
y = y.astype(np.float64)
# check if compute_distance_matrix if a callable()
if not callable(compute_distance) and compute_distance is not None:
raise ValueError("Compute_distance must be a function.")
# check if number of reps exists, integer, or > 0 (if under 1000 raises
# warning)
if not isinstance(reps, int) or reps < 0:
raise ValueError("Number of reps must be an integer greater than 0.")
elif reps < 1000:
msg = ("The number of replications is low (under 1000), and p-value "
"calculations may be unreliable. Use the p-value result, with "
"caution!")
warnings.warn(msg, RuntimeWarning)
if is_twosamp:
x, y = _two_sample_transform(x, y)
# calculate MGC stat
stat, stat_dict = _mgc_stat(x, y, compute_distance)
stat_mgc_map = stat_dict["stat_mgc_map"]
opt_scale = stat_dict["opt_scale"]
# calculate permutation MGC p-value
pvalue, null_dist = _perm_test(x, y, stat, compute_distance, reps=reps,
workers=workers, random_state=random_state)
# save all stats (other than stat/p-value) in dictionary
mgc_dict = {"mgc_map": stat_mgc_map,
"opt_scale": opt_scale,
"null_dist": null_dist}
return MGCResult(stat, pvalue, mgc_dict)
def _mgc_stat(x, y, compute_distance):
r"""
Helper function that calculates the MGC stat. See above for use.
Parameters
----------
x, y : ndarray
`x` and `y` have shapes `(n, p)` and `(n, q)` or `(n, n)` and `(n, n)`
if distance matrices.
compute_distance : callable
A function that computes the distance or similarity among the samples
within each data matrix. Set to `None` if `x` and `y` are already
distance.
Returns
-------
stat : float
The sample MGC test statistic within `[-1, 1]`.
stat_dict : dict
Contains additional useful additional returns containing the following
keys:
- stat_mgc_map : ndarray
MGC-map of the statistics.
- opt_scale : (float, float)
The estimated optimal scale as a `(x, y)` pair.
"""
# set distx and disty to x and y when compute_distance = None
distx = x
disty = y
if compute_distance is not None:
# compute distance matrices for x and y
distx = compute_distance(x)
disty = compute_distance(y)
# calculate MGC map and optimal scale
stat_mgc_map = _local_correlations(distx, disty, global_corr='mgc')
n, m = stat_mgc_map.shape
if m == 1 or n == 1:
# the global scale at is the statistic calculated at maximial nearest
# neighbors. There is not enough local scale to search over, so
# default to global scale
stat = stat_mgc_map[m - 1][n - 1]
opt_scale = m * n
else:
samp_size = len(distx) - 1
# threshold to find connected region of significant local correlations
sig_connect = _threshold_mgc_map(stat_mgc_map, samp_size)
# maximum within the significant region
stat, opt_scale = _smooth_mgc_map(sig_connect, stat_mgc_map)
stat_dict = {"stat_mgc_map": stat_mgc_map,
"opt_scale": opt_scale}
return stat, stat_dict
def _threshold_mgc_map(stat_mgc_map, samp_size):
r"""
Finds a connected region of significance in the MGC-map by thresholding.
Parameters
----------
stat_mgc_map : ndarray
All local correlations within `[-1,1]`.
samp_size : int
The sample size of original data.
Returns
-------
sig_connect : ndarray
A binary matrix with 1's indicating the significant region.
"""
m, n = stat_mgc_map.shape
# 0.02 is simply an empirical threshold, this can be set to 0.01 or 0.05
# with varying levels of performance. Threshold is based on a beta
# approximation.
per_sig = 1 - (0.02 / samp_size) # Percentile to consider as significant
threshold = samp_size * (samp_size - 3)/4 - 1/2 # Beta approximation
threshold = distributions.beta.ppf(per_sig, threshold, threshold) * 2 - 1
# the global scale at is the statistic calculated at maximial nearest
# neighbors. Threshold is the maximium on the global and local scales
threshold = max(threshold, stat_mgc_map[m - 1][n - 1])
# find the largest connected component of significant correlations
sig_connect = stat_mgc_map > threshold
if np.sum(sig_connect) > 0:
sig_connect, _ = measurements.label(sig_connect)
_, label_counts = np.unique(sig_connect, return_counts=True)
# skip the first element in label_counts, as it is count(zeros)
max_label = np.argmax(label_counts[1:]) + 1
sig_connect = sig_connect == max_label
else:
sig_connect = np.array([[False]])
return sig_connect
def _smooth_mgc_map(sig_connect, stat_mgc_map):
"""
Finds the smoothed maximal within the significant region R.
If area of R is too small it returns the last local correlation. Otherwise,
returns the maximum within significant_connected_region.
Parameters
----------
sig_connect: ndarray
A binary matrix with 1's indicating the significant region.
stat_mgc_map: ndarray
All local correlations within `[-1, 1]`.
Returns
-------
stat : float
The sample MGC statistic within `[-1, 1]`.
opt_scale: (float, float)
The estimated optimal scale as an `(x, y)` pair.
"""
m, n = stat_mgc_map.shape
# the global scale at is the statistic calculated at maximial nearest
# neighbors. By default, statistic and optimal scale are global.
stat = stat_mgc_map[m - 1][n - 1]
opt_scale = [m, n]
if np.linalg.norm(sig_connect) != 0:
# proceed only when the connected region's area is sufficiently large
# 0.02 is simply an empirical threshold, this can be set to 0.01 or 0.05
# with varying levels of performance
if np.sum(sig_connect) >= np.ceil(0.02 * max(m, n)) * min(m, n):
max_corr = max(stat_mgc_map[sig_connect])
# find all scales within significant_connected_region that maximize
# the local correlation
max_corr_index = np.where((stat_mgc_map >= max_corr) & sig_connect)
if max_corr >= stat:
stat = max_corr
k, l = max_corr_index
one_d_indices = k * n + l # 2D to 1D indexing
k = np.max(one_d_indices) // n
l = np.max(one_d_indices) % n
opt_scale = [k+1, l+1] # adding 1s to match R indexing
return stat, opt_scale
def _two_sample_transform(u, v):
"""
Helper function that concatenates x and y for two sample MGC stat. See
above for use.
Parameters
----------
u, v : ndarray
`u` and `v` have shapes `(n, p)` and `(m, p)`,
Returns
-------
x : ndarray
Concatenate `u` and `v` along the `axis = 0`. `x` thus has shape
`(2n, p)`.
y : ndarray
Label matrix for `x` where 0 refers to samples that comes from `u` and
1 refers to samples that come from `v`. `y` thus has shape `(2n, 1)`.
"""
nx = u.shape[0]
ny = v.shape[0]
x = np.concatenate([u, v], axis=0)
y = np.concatenate([np.zeros(nx), np.ones(ny)], axis=0).reshape(-1, 1)
return x, y
#####################################
# INFERENTIAL STATISTICS #
#####################################
Ttest_1sampResult = namedtuple('Ttest_1sampResult', ('statistic', 'pvalue'))
def ttest_1samp(a, popmean, axis=0, nan_policy='propagate'):
"""
Calculate the T-test for the mean of ONE group of scores.
This is a two-sided test for the null hypothesis that the expected value
(mean) of a sample of independent observations `a` is equal to the given
population mean, `popmean`.
Parameters
----------
a : array_like
Sample observation.
popmean : float or array_like
Expected value in null hypothesis. If array_like, then it must have the
same shape as `a` excluding the axis dimension.
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
statistic : float or array
t-statistic.
pvalue : float or array
Two-sided p-value.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(7654567) # fix seed to get the same result
>>> rvs = stats.norm.rvs(loc=5, scale=10, size=(50,2))
Test if mean of random sample is equal to true mean, and different mean.
We reject the null hypothesis in the second case and don't reject it in
the first case.
>>> stats.ttest_1samp(rvs,5.0)
(array([-0.68014479, -0.04323899]), array([ 0.49961383, 0.96568674]))
>>> stats.ttest_1samp(rvs,0.0)
(array([ 2.77025808, 4.11038784]), array([ 0.00789095, 0.00014999]))
Examples using axis and non-scalar dimension for population mean.
>>> stats.ttest_1samp(rvs,[5.0,0.0])
(array([-0.68014479, 4.11038784]), array([ 4.99613833e-01, 1.49986458e-04]))
>>> stats.ttest_1samp(rvs.T,[5.0,0.0],axis=1)
(array([-0.68014479, 4.11038784]), array([ 4.99613833e-01, 1.49986458e-04]))
>>> stats.ttest_1samp(rvs,[[5.0],[0.0]])
(array([[-0.68014479, -0.04323899],
[ 2.77025808, 4.11038784]]), array([[ 4.99613833e-01, 9.65686743e-01],
[ 7.89094663e-03, 1.49986458e-04]]))
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.ttest_1samp(a, popmean, axis)
n = a.shape[axis]
df = n - 1
d = np.mean(a, axis) - popmean
v = np.var(a, axis, ddof=1)
denom = np.sqrt(v / n)
with np.errstate(divide='ignore', invalid='ignore'):
t = np.divide(d, denom)
t, prob = _ttest_finish(df, t)
return Ttest_1sampResult(t, prob)
def _ttest_finish(df, t):
"""Common code between all 3 t-test functions."""
prob = distributions.t.sf(np.abs(t), df) * 2 # use np.abs to get upper tail
if t.ndim == 0:
t = t[()]
return t, prob
def _ttest_ind_from_stats(mean1, mean2, denom, df):
d = mean1 - mean2
with np.errstate(divide='ignore', invalid='ignore'):
t = np.divide(d, denom)
t, prob = _ttest_finish(df, t)
return (t, prob)
def _unequal_var_ttest_denom(v1, n1, v2, n2):
vn1 = v1 / n1
vn2 = v2 / n2
with np.errstate(divide='ignore', invalid='ignore'):
df = (vn1 + vn2)**2 / (vn1**2 / (n1 - 1) + vn2**2 / (n2 - 1))
# If df is undefined, variances are zero (assumes n1 > 0 & n2 > 0).
# Hence it doesn't matter what df is as long as it's not NaN.
df = np.where(np.isnan(df), 1, df)
denom = np.sqrt(vn1 + vn2)
return df, denom
def _equal_var_ttest_denom(v1, n1, v2, n2):
df = n1 + n2 - 2.0
svar = ((n1 - 1) * v1 + (n2 - 1) * v2) / df
denom = np.sqrt(svar * (1.0 / n1 + 1.0 / n2))
return df, denom
Ttest_indResult = namedtuple('Ttest_indResult', ('statistic', 'pvalue'))
def ttest_ind_from_stats(mean1, std1, nobs1, mean2, std2, nobs2,
equal_var=True):
r"""
T-test for means of two independent samples from descriptive statistics.
This is a two-sided test for the null hypothesis that two independent
samples have identical average (expected) values.
Parameters
----------
mean1 : array_like
The mean(s) of sample 1.
std1 : array_like
The standard deviation(s) of sample 1.
nobs1 : array_like
The number(s) of observations of sample 1.
mean2 : array_like
The mean(s) of sample 2.
std2 : array_like
The standard deviations(s) of sample 2.
nobs2 : array_like
The number(s) of observations of sample 2.
equal_var : bool, optional
If True (default), perform a standard independent 2 sample test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
Returns
-------
statistic : float or array
The calculated t-statistics.
pvalue : float or array
The two-tailed p-value.
See Also
--------
scipy.stats.ttest_ind
Notes
-----
.. versionadded:: 0.16.0
References
----------
.. [1] https://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] https://en.wikipedia.org/wiki/Welch%27s_t-test
Examples
--------
Suppose we have the summary data for two samples, as follows::
Sample Sample
Size Mean Variance
Sample 1 13 15.0 87.5
Sample 2 11 12.0 39.0
Apply the t-test to this data (with the assumption that the population
variances are equal):
>>> from scipy.stats import ttest_ind_from_stats
>>> ttest_ind_from_stats(mean1=15.0, std1=np.sqrt(87.5), nobs1=13,
... mean2=12.0, std2=np.sqrt(39.0), nobs2=11)
Ttest_indResult(statistic=0.9051358093310269, pvalue=0.3751996797581487)
For comparison, here is the data from which those summary statistics
were taken. With this data, we can compute the same result using
`scipy.stats.ttest_ind`:
>>> a = np.array([1, 3, 4, 6, 11, 13, 15, 19, 22, 24, 25, 26, 26])
>>> b = np.array([2, 4, 6, 9, 11, 13, 14, 15, 18, 19, 21])
>>> from scipy.stats import ttest_ind
>>> ttest_ind(a, b)
Ttest_indResult(statistic=0.905135809331027, pvalue=0.3751996797581486)
Suppose we instead have binary data and would like to apply a t-test to
compare the proportion of 1s in two independent groups::
Number of Sample Sample
Size ones Mean Variance
Sample 1 150 30 0.2 0.16
Sample 2 200 45 0.225 0.174375
The sample mean :math:`\hat{p}` is the proportion of ones in the sample
and the variance for a binary observation is estimated by
:math:`\hat{p}(1-\hat{p})`.
>>> ttest_ind_from_stats(mean1=0.2, std1=np.sqrt(0.16), nobs1=150,
... mean2=0.225, std2=np.sqrt(0.17437), nobs2=200)
Ttest_indResult(statistic=-0.564327545549774, pvalue=0.5728947691244874)
For comparison, we could compute the t statistic and p-value using
arrays of 0s and 1s and `scipy.stat.ttest_ind`, as above.
>>> group1 = np.array([1]*30 + [0]*(150-30))
>>> group2 = np.array([1]*45 + [0]*(200-45))
>>> ttest_ind(group1, group2)
Ttest_indResult(statistic=-0.5627179589855622, pvalue=0.573989277115258)
"""
if equal_var:
df, denom = _equal_var_ttest_denom(std1**2, nobs1, std2**2, nobs2)
else:
df, denom = _unequal_var_ttest_denom(std1**2, nobs1,
std2**2, nobs2)
res = _ttest_ind_from_stats(mean1, mean2, denom, df)
return Ttest_indResult(*res)
def ttest_ind(a, b, axis=0, equal_var=True, nan_policy='propagate'):
"""
Calculate the T-test for the means of *two independent* samples of scores.
This is a two-sided test for the null hypothesis that 2 independent samples
have identical average (expected) values. This test assumes that the
populations have identical variances by default.
Parameters
----------
a, b : array_like
The arrays must have the same shape, except in the dimension
corresponding to `axis` (the first, by default).
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
arrays, `a`, and `b`.
equal_var : bool, optional
If True (default), perform a standard independent 2 sample test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
.. versionadded:: 0.11.0
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
statistic : float or array
The calculated t-statistic.
pvalue : float or array
The two-tailed p-value.
Notes
-----
We can use this test, if we observe two independent samples from
the same or different population, e.g. exam scores of boys and
girls or of two ethnic groups. The test measures whether the
average (expected) value differs significantly across samples. If
we observe a large p-value, for example larger than 0.05 or 0.1,
then we cannot reject the null hypothesis of identical average scores.
If the p-value is smaller than the threshold, e.g. 1%, 5% or 10%,
then we reject the null hypothesis of equal averages.
References
----------
.. [1] https://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] https://en.wikipedia.org/wiki/Welch%27s_t-test
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678)
Test with sample with identical means:
>>> rvs1 = stats.norm.rvs(loc=5,scale=10,size=500)
>>> rvs2 = stats.norm.rvs(loc=5,scale=10,size=500)
>>> stats.ttest_ind(rvs1,rvs2)
(0.26833823296239279, 0.78849443369564776)
>>> stats.ttest_ind(rvs1,rvs2, equal_var = False)
(0.26833823296239279, 0.78849452749500748)
`ttest_ind` underestimates p for unequal variances:
>>> rvs3 = stats.norm.rvs(loc=5, scale=20, size=500)
>>> stats.ttest_ind(rvs1, rvs3)
(-0.46580283298287162, 0.64145827413436174)
>>> stats.ttest_ind(rvs1, rvs3, equal_var = False)
(-0.46580283298287162, 0.64149646246569292)
When n1 != n2, the equal variance t-statistic is no longer equal to the
unequal variance t-statistic:
>>> rvs4 = stats.norm.rvs(loc=5, scale=20, size=100)
>>> stats.ttest_ind(rvs1, rvs4)
(-0.99882539442782481, 0.3182832709103896)
>>> stats.ttest_ind(rvs1, rvs4, equal_var = False)
(-0.69712570584654099, 0.48716927725402048)
T-test with different means, variance, and n:
>>> rvs5 = stats.norm.rvs(loc=8, scale=20, size=100)
>>> stats.ttest_ind(rvs1, rvs5)
(-1.4679669854490653, 0.14263895620529152)
>>> stats.ttest_ind(rvs1, rvs5, equal_var = False)
(-0.94365973617132992, 0.34744170334794122)
"""
a, b, axis = _chk2_asarray(a, b, axis)
# check both a and b
cna, npa = _contains_nan(a, nan_policy)
cnb, npb = _contains_nan(b, nan_policy)
contains_nan = cna or cnb
if npa == 'omit' or npb == 'omit':
nan_policy = 'omit'
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
b = ma.masked_invalid(b)
return mstats_basic.ttest_ind(a, b, axis, equal_var)
if a.size == 0 or b.size == 0:
return Ttest_indResult(np.nan, np.nan)
v1 = np.var(a, axis, ddof=1)
v2 = np.var(b, axis, ddof=1)
n1 = a.shape[axis]
n2 = b.shape[axis]
if equal_var:
df, denom = _equal_var_ttest_denom(v1, n1, v2, n2)
else:
df, denom = _unequal_var_ttest_denom(v1, n1, v2, n2)
res = _ttest_ind_from_stats(np.mean(a, axis), np.mean(b, axis), denom, df)
return Ttest_indResult(*res)
Ttest_relResult = namedtuple('Ttest_relResult', ('statistic', 'pvalue'))
def ttest_rel(a, b, axis=0, nan_policy='propagate'):
"""
Calculate the t-test on TWO RELATED samples of scores, a and b.
This is a two-sided test for the null hypothesis that 2 related or
repeated samples have identical average (expected) values.
Parameters
----------
a, b : array_like
The arrays must have the same shape.
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
arrays, `a`, and `b`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
statistic : float or array
t-statistic.
pvalue : float or array
Two-sided p-value.
Notes
-----
Examples for use are scores of the same set of student in
different exams, or repeated sampling from the same units. The
test measures whether the average score differs significantly
across samples (e.g. exams). If we observe a large p-value, for
example greater than 0.05 or 0.1 then we cannot reject the null
hypothesis of identical average scores. If the p-value is smaller
than the threshold, e.g. 1%, 5% or 10%, then we reject the null
hypothesis of equal averages. Small p-values are associated with
large t-statistics.
References
----------
https://en.wikipedia.org/wiki/T-test#Dependent_t-test_for_paired_samples
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678) # fix random seed to get same numbers
>>> rvs1 = stats.norm.rvs(loc=5,scale=10,size=500)
>>> rvs2 = (stats.norm.rvs(loc=5,scale=10,size=500) +
... stats.norm.rvs(scale=0.2,size=500))
>>> stats.ttest_rel(rvs1,rvs2)
(0.24101764965300962, 0.80964043445811562)
>>> rvs3 = (stats.norm.rvs(loc=8,scale=10,size=500) +
... stats.norm.rvs(scale=0.2,size=500))
>>> stats.ttest_rel(rvs1,rvs3)
(-3.9995108708727933, 7.3082402191726459e-005)
"""
a, b, axis = _chk2_asarray(a, b, axis)
cna, npa = _contains_nan(a, nan_policy)
cnb, npb = _contains_nan(b, nan_policy)
contains_nan = cna or cnb
if npa == 'omit' or npb == 'omit':
nan_policy = 'omit'
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
b = ma.masked_invalid(b)
m = ma.mask_or(ma.getmask(a), ma.getmask(b))
aa = ma.array(a, mask=m, copy=True)
bb = ma.array(b, mask=m, copy=True)
return mstats_basic.ttest_rel(aa, bb, axis)
if a.shape[axis] != b.shape[axis]:
raise ValueError('unequal length arrays')
if a.size == 0 or b.size == 0:
return np.nan, np.nan
n = a.shape[axis]
df = n - 1
d = (a - b).astype(np.float64)
v = np.var(d, axis, ddof=1)
dm = np.mean(d, axis)
denom = np.sqrt(v / n)
with np.errstate(divide='ignore', invalid='ignore'):
t = np.divide(dm, denom)
t, prob = _ttest_finish(df, t)
return Ttest_relResult(t, prob)
KstestResult = namedtuple('KstestResult', ('statistic', 'pvalue'))
def kstest(rvs, cdf, args=(), N=20, alternative='two-sided', mode='approx'):
"""
Perform the Kolmogorov-Smirnov test for goodness of fit.
This performs a test of the distribution F(x) of an observed
random variable against a given distribution G(x). Under the null
hypothesis, the two distributions are identical, F(x)=G(x). The
alternative hypothesis can be either 'two-sided' (default), 'less'
or 'greater'. The KS test is only valid for continuous distributions.
Parameters
----------
rvs : str, array_like, or callable
If a string, it should be the name of a distribution in `scipy.stats`.
If an array, it should be a 1-D array of observations of random
variables.
If a callable, it should be a function to generate random variables;
it is required to have a keyword argument `size`.
cdf : str or callable
If a string, it should be the name of a distribution in `scipy.stats`.
If `rvs` is a string then `cdf` can be False or the same as `rvs`.
If a callable, that callable is used to calculate the cdf.
args : tuple, sequence, optional
Distribution parameters, used if `rvs` or `cdf` are strings.
N : int, optional
Sample size if `rvs` is string or callable. Default is 20.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is 'two-sided'):
* 'two-sided'
* 'less': one-sided, see explanation in Notes
* 'greater': one-sided, see explanation in Notes
mode : {'approx', 'asymp'}, optional
Defines the distribution used for calculating the p-value.
The following options are available (default is 'approx'):
* 'approx': use approximation to exact distribution of test statistic
* 'asymp': use asymptotic distribution of test statistic
Returns
-------
statistic : float
KS test statistic, either D, D+ or D-.
pvalue : float
One-tailed or two-tailed p-value.
See Also
--------
ks_2samp
Notes
-----
In the one-sided test, the alternative is that the empirical
cumulative distribution function of the random variable is "less"
or "greater" than the cumulative distribution function G(x) of the
hypothesis, ``F(x)<=G(x)``, resp. ``F(x)>=G(x)``.
Examples
--------
>>> from scipy import stats
>>> x = np.linspace(-15, 15, 9)
>>> stats.kstest(x, 'norm')
(0.44435602715924361, 0.038850142705171065)
>>> np.random.seed(987654321) # set random seed to get the same result
>>> stats.kstest('norm', False, N=100)
(0.058352892479417884, 0.88531190944151261)
The above lines are equivalent to:
>>> np.random.seed(987654321)
>>> stats.kstest(stats.norm.rvs(size=100), 'norm')
(0.058352892479417884, 0.88531190944151261)
*Test against one-sided alternative hypothesis*
Shift distribution to larger values, so that ``cdf_dgp(x) < norm.cdf(x)``:
>>> np.random.seed(987654321)
>>> x = stats.norm.rvs(loc=0.2, size=100)
>>> stats.kstest(x,'norm', alternative = 'less')
(0.12464329735846891, 0.040989164077641749)
Reject equal distribution against alternative hypothesis: less
>>> stats.kstest(x,'norm', alternative = 'greater')
(0.0072115233216311081, 0.98531158590396395)
Don't reject equal distribution against alternative hypothesis: greater
>>> stats.kstest(x,'norm', mode='asymp')
(0.12464329735846891, 0.08944488871182088)
*Testing t distributed random variables against normal distribution*
With 100 degrees of freedom the t distribution looks close to the normal
distribution, and the K-S test does not reject the hypothesis that the
sample came from the normal distribution:
>>> np.random.seed(987654321)
>>> stats.kstest(stats.t.rvs(100,size=100),'norm')
(0.072018929165471257, 0.67630062862479168)
With 3 degrees of freedom the t distribution looks sufficiently different
from the normal distribution, that we can reject the hypothesis that the
sample came from the normal distribution at the 10% level:
>>> np.random.seed(987654321)
>>> stats.kstest(stats.t.rvs(3,size=100),'norm')
(0.131016895759829, 0.058826222555312224)
"""
if isinstance(rvs, str):
if (not cdf) or (cdf == rvs):
cdf = getattr(distributions, rvs).cdf
rvs = getattr(distributions, rvs).rvs
else:
raise AttributeError("if rvs is string, cdf has to be the "
"same distribution")
if isinstance(cdf, str):
cdf = getattr(distributions, cdf).cdf
if callable(rvs):
kwds = {'size': N}
vals = np.sort(rvs(*args, **kwds))
else:
vals = np.sort(rvs)
N = len(vals)
cdfvals = cdf(vals, *args)
# to not break compatibility with existing code
if alternative == 'two_sided':
alternative = 'two-sided'
if alternative in ['two-sided', 'greater']:
Dplus = (np.arange(1.0, N + 1)/N - cdfvals).max()
if alternative == 'greater':
return KstestResult(Dplus, distributions.ksone.sf(Dplus, N))
if alternative in ['two-sided', 'less']:
Dmin = (cdfvals - np.arange(0.0, N)/N).max()
if alternative == 'less':
return KstestResult(Dmin, distributions.ksone.sf(Dmin, N))
if alternative == 'two-sided':
D = np.max([Dplus, Dmin])
if mode == 'asymp':
return KstestResult(D, distributions.kstwobign.sf(D * np.sqrt(N)))
if mode == 'approx':
pval_two = distributions.kstwobign.sf(D * np.sqrt(N))
if N > 2666 or pval_two > 0.80 - N*0.3/1000:
return KstestResult(D, pval_two)
else:
return KstestResult(D, 2 * distributions.ksone.sf(D, N))
# Map from names to lambda_ values used in power_divergence().
_power_div_lambda_names = {
"pearson": 1,
"log-likelihood": 0,
"freeman-tukey": -0.5,
"mod-log-likelihood": -1,
"neyman": -2,
"cressie-read": 2/3,
}
def _count(a, axis=None):
"""
Count the number of non-masked elements of an array.
This function behaves like np.ma.count(), but is much faster
for ndarrays.
"""
if hasattr(a, 'count'):
num = a.count(axis=axis)
if isinstance(num, np.ndarray) and num.ndim == 0:
# In some cases, the `count` method returns a scalar array (e.g.
# np.array(3)), but we want a plain integer.
num = int(num)
else:
if axis is None:
num = a.size
else:
num = a.shape[axis]
return num
Power_divergenceResult = namedtuple('Power_divergenceResult',
('statistic', 'pvalue'))
def power_divergence(f_obs, f_exp=None, ddof=0, axis=0, lambda_=None):
"""
Cressie-Read power divergence statistic and goodness of fit test.
This function tests the null hypothesis that the categorical data
has the given frequencies, using the Cressie-Read power divergence
statistic.
Parameters
----------
f_obs : array_like
Observed frequencies in each category.
f_exp : array_like, optional
Expected frequencies in each category. By default the categories are
assumed to be equally likely.
ddof : int, optional
"Delta degrees of freedom": adjustment to the degrees of freedom
for the p-value. The p-value is computed using a chi-squared
distribution with ``k - 1 - ddof`` degrees of freedom, where `k`
is the number of observed frequencies. The default value of `ddof`
is 0.
axis : int or None, optional
The axis of the broadcast result of `f_obs` and `f_exp` along which to
apply the test. If axis is None, all values in `f_obs` are treated
as a single data set. Default is 0.
lambda_ : float or str, optional
The power in the Cressie-Read power divergence statistic. The default
is 1. For convenience, `lambda_` may be assigned one of the following
strings, in which case the corresponding numerical value is used::
String Value Description
"pearson" 1 Pearson's chi-squared statistic.
In this case, the function is
equivalent to `stats.chisquare`.
"log-likelihood" 0 Log-likelihood ratio. Also known as
the G-test [3]_.
"freeman-tukey" -1/2 Freeman-Tukey statistic.
"mod-log-likelihood" -1 Modified log-likelihood ratio.
"neyman" -2 Neyman's statistic.
"cressie-read" 2/3 The power recommended in [5]_.
Returns
-------
statistic : float or ndarray
The Cressie-Read power divergence test statistic. The value is
a float if `axis` is None or if` `f_obs` and `f_exp` are 1-D.
pvalue : float or ndarray
The p-value of the test. The value is a float if `ddof` and the
return value `stat` are scalars.
See Also
--------
chisquare
Notes
-----
This test is invalid when the observed or expected frequencies in each
category are too small. A typical rule is that all of the observed
and expected frequencies should be at least 5.
When `lambda_` is less than zero, the formula for the statistic involves
dividing by `f_obs`, so a warning or error may be generated if any value
in `f_obs` is 0.
Similarly, a warning or error may be generated if any value in `f_exp` is
zero when `lambda_` >= 0.
The default degrees of freedom, k-1, are for the case when no parameters
of the distribution are estimated. If p parameters are estimated by
efficient maximum likelihood then the correct degrees of freedom are
k-1-p. If the parameters are estimated in a different way, then the
dof can be between k-1-p and k-1. However, it is also possible that
the asymptotic distribution is not a chisquare, in which case this
test is not appropriate.
This function handles masked arrays. If an element of `f_obs` or `f_exp`
is masked, then data at that position is ignored, and does not count
towards the size of the data set.
.. versionadded:: 0.13.0
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 8.
https://web.archive.org/web/20171015035606/http://faculty.vassar.edu/lowry/ch8pt1.html
.. [2] "Chi-squared test", https://en.wikipedia.org/wiki/Chi-squared_test
.. [3] "G-test", https://en.wikipedia.org/wiki/G-test
.. [4] Sokal, R. R. and Rohlf, F. J. "Biometry: the principles and
practice of statistics in biological research", New York: Freeman
(1981)
.. [5] Cressie, N. and Read, T. R. C., "Multinomial Goodness-of-Fit
Tests", J. Royal Stat. Soc. Series B, Vol. 46, No. 3 (1984),
pp. 440-464.
Examples
--------
(See `chisquare` for more examples.)
When just `f_obs` is given, it is assumed that the expected frequencies
are uniform and given by the mean of the observed frequencies. Here we
perform a G-test (i.e. use the log-likelihood ratio statistic):
>>> from scipy.stats import power_divergence
>>> power_divergence([16, 18, 16, 14, 12, 12], lambda_='log-likelihood')
(2.006573162632538, 0.84823476779463769)
The expected frequencies can be given with the `f_exp` argument:
>>> power_divergence([16, 18, 16, 14, 12, 12],
... f_exp=[16, 16, 16, 16, 16, 8],
... lambda_='log-likelihood')
(3.3281031458963746, 0.6495419288047497)
When `f_obs` is 2-D, by default the test is applied to each column.
>>> obs = np.array([[16, 18, 16, 14, 12, 12], [32, 24, 16, 28, 20, 24]]).T
>>> obs.shape
(6, 2)
>>> power_divergence(obs, lambda_="log-likelihood")
(array([ 2.00657316, 6.77634498]), array([ 0.84823477, 0.23781225]))
By setting ``axis=None``, the test is applied to all data in the array,
which is equivalent to applying the test to the flattened array.
>>> power_divergence(obs, axis=None)
(23.31034482758621, 0.015975692534127565)
>>> power_divergence(obs.ravel())
(23.31034482758621, 0.015975692534127565)
`ddof` is the change to make to the default degrees of freedom.
>>> power_divergence([16, 18, 16, 14, 12, 12], ddof=1)
(2.0, 0.73575888234288467)
The calculation of the p-values is done by broadcasting the
test statistic with `ddof`.
>>> power_divergence([16, 18, 16, 14, 12, 12], ddof=[0,1,2])
(2.0, array([ 0.84914504, 0.73575888, 0.5724067 ]))
`f_obs` and `f_exp` are also broadcast. In the following, `f_obs` has
shape (6,) and `f_exp` has shape (2, 6), so the result of broadcasting
`f_obs` and `f_exp` has shape (2, 6). To compute the desired chi-squared
statistics, we must use ``axis=1``:
>>> power_divergence([16, 18, 16, 14, 12, 12],
... f_exp=[[16, 16, 16, 16, 16, 8],
... [8, 20, 20, 16, 12, 12]],
... axis=1)
(array([ 3.5 , 9.25]), array([ 0.62338763, 0.09949846]))
"""
# Convert the input argument `lambda_` to a numerical value.
if isinstance(lambda_, str):
if lambda_ not in _power_div_lambda_names:
names = repr(list(_power_div_lambda_names.keys()))[1:-1]
raise ValueError("invalid string for lambda_: {0!r}. Valid strings "
"are {1}".format(lambda_, names))
lambda_ = _power_div_lambda_names[lambda_]
elif lambda_ is None:
lambda_ = 1
f_obs = np.asanyarray(f_obs)
if f_exp is not None:
f_exp = np.asanyarray(f_exp)
else:
# Ignore 'invalid' errors so the edge case of a data set with length 0
# is handled without spurious warnings.
with np.errstate(invalid='ignore'):
f_exp = f_obs.mean(axis=axis, keepdims=True)
# `terms` is the array of terms that are summed along `axis` to create
# the test statistic. We use some specialized code for a few special
# cases of lambda_.
if lambda_ == 1:
# Pearson's chi-squared statistic
terms = (f_obs.astype(np.float64) - f_exp)**2 / f_exp
elif lambda_ == 0:
# Log-likelihood ratio (i.e. G-test)
terms = 2.0 * special.xlogy(f_obs, f_obs / f_exp)
elif lambda_ == -1:
# Modified log-likelihood ratio
terms = 2.0 * special.xlogy(f_exp, f_exp / f_obs)
else:
# General Cressie-Read power divergence.
terms = f_obs * ((f_obs / f_exp)**lambda_ - 1)
terms /= 0.5 * lambda_ * (lambda_ + 1)
stat = terms.sum(axis=axis)
num_obs = _count(terms, axis=axis)
ddof = asarray(ddof)
p = distributions.chi2.sf(stat, num_obs - 1 - ddof)
return Power_divergenceResult(stat, p)
def chisquare(f_obs, f_exp=None, ddof=0, axis=0):
"""
Calculate a one-way chi-square test.
The chi-square test tests the null hypothesis that the categorical data
has the given frequencies.
Parameters
----------
f_obs : array_like
Observed frequencies in each category.
f_exp : array_like, optional
Expected frequencies in each category. By default the categories are
assumed to be equally likely.
ddof : int, optional
"Delta degrees of freedom": adjustment to the degrees of freedom
for the p-value. The p-value is computed using a chi-squared
distribution with ``k - 1 - ddof`` degrees of freedom, where `k`
is the number of observed frequencies. The default value of `ddof`
is 0.
axis : int or None, optional
The axis of the broadcast result of `f_obs` and `f_exp` along which to
apply the test. If axis is None, all values in `f_obs` are treated
as a single data set. Default is 0.
Returns
-------
chisq : float or ndarray
The chi-squared test statistic. The value is a float if `axis` is
None or `f_obs` and `f_exp` are 1-D.
p : float or ndarray
The p-value of the test. The value is a float if `ddof` and the
return value `chisq` are scalars.
See Also
--------
scipy.stats.power_divergence
Notes
-----
This test is invalid when the observed or expected frequencies in each
category are too small. A typical rule is that all of the observed
and expected frequencies should be at least 5.
The default degrees of freedom, k-1, are for the case when no parameters
of the distribution are estimated. If p parameters are estimated by
efficient maximum likelihood then the correct degrees of freedom are
k-1-p. If the parameters are estimated in a different way, then the
dof can be between k-1-p and k-1. However, it is also possible that
the asymptotic distribution is not chi-square, in which case this test
is not appropriate.
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 8.
https://web.archive.org/web/20171022032306/http://vassarstats.net:80/textbook/ch8pt1.html
.. [2] "Chi-squared test", https://en.wikipedia.org/wiki/Chi-squared_test
Examples
--------
When just `f_obs` is given, it is assumed that the expected frequencies
are uniform and given by the mean of the observed frequencies.
>>> from scipy.stats import chisquare
>>> chisquare([16, 18, 16, 14, 12, 12])
(2.0, 0.84914503608460956)
With `f_exp` the expected frequencies can be given.
>>> chisquare([16, 18, 16, 14, 12, 12], f_exp=[16, 16, 16, 16, 16, 8])
(3.5, 0.62338762774958223)
When `f_obs` is 2-D, by default the test is applied to each column.
>>> obs = np.array([[16, 18, 16, 14, 12, 12], [32, 24, 16, 28, 20, 24]]).T
>>> obs.shape
(6, 2)
>>> chisquare(obs)
(array([ 2. , 6.66666667]), array([ 0.84914504, 0.24663415]))
By setting ``axis=None``, the test is applied to all data in the array,
which is equivalent to applying the test to the flattened array.
>>> chisquare(obs, axis=None)
(23.31034482758621, 0.015975692534127565)
>>> chisquare(obs.ravel())
(23.31034482758621, 0.015975692534127565)
`ddof` is the change to make to the default degrees of freedom.
>>> chisquare([16, 18, 16, 14, 12, 12], ddof=1)
(2.0, 0.73575888234288467)
The calculation of the p-values is done by broadcasting the
chi-squared statistic with `ddof`.
>>> chisquare([16, 18, 16, 14, 12, 12], ddof=[0,1,2])
(2.0, array([ 0.84914504, 0.73575888, 0.5724067 ]))
`f_obs` and `f_exp` are also broadcast. In the following, `f_obs` has
shape (6,) and `f_exp` has shape (2, 6), so the result of broadcasting
`f_obs` and `f_exp` has shape (2, 6). To compute the desired chi-squared
statistics, we use ``axis=1``:
>>> chisquare([16, 18, 16, 14, 12, 12],
... f_exp=[[16, 16, 16, 16, 16, 8], [8, 20, 20, 16, 12, 12]],
... axis=1)
(array([ 3.5 , 9.25]), array([ 0.62338763, 0.09949846]))
"""
return power_divergence(f_obs, f_exp=f_exp, ddof=ddof, axis=axis,
lambda_="pearson")
Ks_2sampResult = namedtuple('Ks_2sampResult', ('statistic', 'pvalue'))
def _compute_prob_inside_method(m, n, g, h):
"""
Count the proportion of paths that stay strictly inside two diagonal lines.
Parameters
----------
m : integer
m > 0
n : integer
n > 0
g : integer
g is greatest common divisor of m and n
h : integer
0 <= h <= lcm(m,n)
Returns
-------
p : float
The proportion of paths that stay inside the two lines.
Count the integer lattice paths from (0, 0) to (m, n) which satisfy
|x/m - y/n| < h / lcm(m, n).
The paths make steps of size +1 in either positive x or positive y directions.
We generally follow Hodges' treatment of Drion/Gnedenko/Korolyuk.
Hodges, J.L. Jr.,
"The Significance Probability of the Smirnov Two-Sample Test,"
Arkiv fiur Matematik, 3, No. 43 (1958), 469-86.
"""
# Probability is symmetrical in m, n. Computation below uses m >= n.
if m < n:
m, n = n, m
mg = m // g
ng = n // g
# Count the integer lattice paths from (0, 0) to (m, n) which satisfy
# |nx/g - my/g| < h.
# Compute matrix A such that:
# A(x, 0) = A(0, y) = 1
# A(x, y) = A(x, y-1) + A(x-1, y), for x,y>=1, except that
# A(x, y) = 0 if |x/m - y/n|>= h
# Probability is A(m, n)/binom(m+n, n)
# Optimizations exist for m==n, m==n*p.
# Only need to preserve a single column of A, and only a sliding window of it.
# minj keeps track of the slide.
minj, maxj = 0, min(int(np.ceil(h / mg)), n + 1)
curlen = maxj - minj
# Make a vector long enough to hold maximum window needed.
lenA = min(2 * maxj + 2, n + 1)
# This is an integer calculation, but the entries are essentially
# binomial coefficients, hence grow quickly.
# Scaling after each column is computed avoids dividing by a
# large binomial coefficent at the end, but is not sufficient to avoid
# the large dyanamic range which appears during the calculation.
# Instead we rescale based on the magnitude of the right most term in
# the column and keep track of an exponent separately and apply
# it at the end of the calculation. Similarly when multiplying by
# the binomial coefficint
dtype = np.float64
A = np.zeros(lenA, dtype=dtype)
# Initialize the first column
A[minj:maxj] = 1
expnt = 0
for i in range(1, m + 1):
# Generate the next column.
# First calculate the sliding window
lastminj, lastlen = minj, curlen
minj = max(int(np.floor((ng * i - h) / mg)) + 1, 0)
minj = min(minj, n)
maxj = min(int(np.ceil((ng * i + h) / mg)), n + 1)
if maxj <= minj:
return 0
# Now fill in the values
A[0:maxj - minj] = np.cumsum(A[minj - lastminj:maxj - lastminj])
curlen = maxj - minj
if lastlen > curlen:
# Set some carried-over elements to 0
A[maxj - minj:maxj - minj + (lastlen - curlen)] = 0
# Rescale if the right most value is over 2**900
val = A[maxj - minj - 1]
_, valexpt = math.frexp(val)
if valexpt > 900:
# Scaling to bring down to about 2**800 appears
# sufficient for sizes under 10000.
valexpt -= 800
A = np.ldexp(A, -valexpt)
expnt += valexpt
val = A[maxj - minj - 1]
# Now divide by the binomial (m+n)!/m!/n!
for i in range(1, n + 1):
val = (val * i) / (m + i)
_, valexpt = math.frexp(val)
if valexpt < -128:
val = np.ldexp(val, -valexpt)
expnt += valexpt
# Finally scale if needed.
return np.ldexp(val, expnt)
def _compute_prob_outside_square(n, h):
"""
Compute the proportion of paths that pass outside the two diagonal lines.
Parameters
----------
n : integer
n > 0
h : integer
0 <= h <= n
Returns
-------
p : float
The proportion of paths that pass outside the lines x-y = +/-h.
"""
# Compute Pr(D_{n,n} >= h/n)
# Prob = 2 * ( binom(2n, n-h) - binom(2n, n-2a) + binom(2n, n-3a) - ... ) / binom(2n, n)
# This formulation exhibits subtractive cancellation.
# Instead divide each term by binom(2n, n), then factor common terms
# and use a Horner-like algorithm
# P = 2 * A0 * (1 - A1*(1 - A2*(1 - A3*(1 - A4*(...)))))
P = 0.0
k = int(np.floor(n / h))
while k >= 0:
p1 = 1.0
# Each of the Ai terms has numerator and denominator with h simple terms.
for j in range(h):
p1 = (n - k * h - j) * p1 / (n + k * h + j + 1)
P = p1 * (1.0 - P)
k -= 1
return 2 * P
def _count_paths_outside_method(m, n, g, h):
"""
Count the number of paths that pass outside the specified diagonal.
Parameters
----------
m : integer
m > 0
n : integer
n > 0
g : integer
g is greatest common divisor of m and n
h : integer
0 <= h <= lcm(m,n)
Returns
-------
p : float
The number of paths that go low.
The calculation may overflow - check for a finite answer.
Exceptions
----------
FloatingPointError: Raised if the intermediate computation goes outside
the range of a float.
Notes
-----
Count the integer lattice paths from (0, 0) to (m, n), which at some
point (x, y) along the path, satisfy:
m*y <= n*x - h*g
The paths make steps of size +1 in either positive x or positive y directions.
We generally follow Hodges' treatment of Drion/Gnedenko/Korolyuk.
Hodges, J.L. Jr.,
"The Significance Probability of the Smirnov Two-Sample Test,"
Arkiv fiur Matematik, 3, No. 43 (1958), 469-86.
"""
# Compute #paths which stay lower than x/m-y/n = h/lcm(m,n)
# B(x, y) = #{paths from (0,0) to (x,y) without previously crossing the boundary}
# = binom(x, y) - #{paths which already reached the boundary}
# Multiply by the number of path extensions going from (x, y) to (m, n)
# Sum.
# Probability is symmetrical in m, n. Computation below assumes m >= n.
if m < n:
m, n = n, m
mg = m // g
ng = n // g
# 0 <= x_j <= m is the smallest integer for which n*x_j - m*j < g*h
xj = [int(np.ceil((h + mg * j)/ng)) for j in range(n+1)]
xj = [_ for _ in xj if _ <= m]
lxj = len(xj)
# B is an array just holding a few values of B(x,y), the ones needed.
# B[j] == B(x_j, j)
if lxj == 0:
return np.round(special.binom(m + n, n))
B = np.zeros(lxj)
B[0] = 1
# Compute the B(x, y) terms
# The binomial coefficient is an integer, but special.binom() may return a float.
# Round it to the nearest integer.
for j in range(1, lxj):
Bj = np.round(special.binom(xj[j] + j, j))
if not np.isfinite(Bj):
raise FloatingPointError()
for i in range(j):
bin = np.round(special.binom(xj[j] - xj[i] + j - i, j-i))
dec = bin * B[i]
Bj -= dec
B[j] = Bj
if not np.isfinite(Bj):
raise FloatingPointError()
# Compute the number of path extensions...
num_paths = 0
for j in range(lxj):
bin = np.round(special.binom((m-xj[j]) + (n - j), n-j))
term = B[j] * bin
if not np.isfinite(term):
raise FloatingPointError()
num_paths += term
return np.round(num_paths)
def ks_2samp(data1, data2, alternative='two-sided', mode='auto'):
"""
Compute the Kolmogorov-Smirnov statistic on 2 samples.
This is a two-sided test for the null hypothesis that 2 independent samples
are drawn from the same continuous distribution. The alternative hypothesis
can be either 'two-sided' (default), 'less' or 'greater'.
Parameters
----------
data1, data2 : sequence of 1-D ndarrays
Two arrays of sample observations assumed to be drawn from a continuous
distribution, sample sizes can be different.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is 'two-sided'):
* 'two-sided'
* 'less': one-sided, see explanation in Notes
* 'greater': one-sided, see explanation in Notes
mode : {'auto', 'exact', 'asymp'}, optional
Defines the method used for calculating the p-value.
The following options are available (default is 'auto'):
* 'auto' : use 'exact' for small size arrays, 'asymp' for large
* 'exact' : use approximation to exact distribution of test statistic
* 'asymp' : use asymptotic distribution of test statistic
Returns
-------
statistic : float
KS statistic.
pvalue : float
Two-tailed p-value.
See Also
--------
kstest
Notes
-----
This tests whether 2 samples are drawn from the same distribution. Note
that, like in the case of the one-sample KS test, the distribution is
assumed to be continuous.
In the one-sided test, the alternative is that the empirical
cumulative distribution function F(x) of the data1 variable is "less"
or "greater" than the empirical cumulative distribution function G(x)
of the data2 variable, ``F(x)<=G(x)``, resp. ``F(x)>=G(x)``.
If the KS statistic is small or the p-value is high, then we cannot
reject the hypothesis that the distributions of the two samples
are the same.
If the mode is 'auto', the computation is exact if the sample sizes are
less than 10000. For larger sizes, the computation uses the
Kolmogorov-Smirnov distributions to compute an approximate value.
The 'two-sided' 'exact' computation computes the complementary probability
and then subtracts from 1. As such, the minimum probability it can return
is about 1e-16. While the algorithm itself is exact, numerical
errors may accumulate for large sample sizes. It is most suited to
situations in which one of the sample sizes is only a few thousand.
We generally follow Hodges' treatment of Drion/Gnedenko/Korolyuk [1]_.
References
----------
.. [1] Hodges, J.L. Jr., "The Significance Probability of the Smirnov
Two-Sample Test," Arkiv fiur Matematik, 3, No. 43 (1958), 469-86.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678) #fix random seed to get the same result
>>> n1 = 200 # size of first sample
>>> n2 = 300 # size of second sample
For a different distribution, we can reject the null hypothesis since the
pvalue is below 1%:
>>> rvs1 = stats.norm.rvs(size=n1, loc=0., scale=1)
>>> rvs2 = stats.norm.rvs(size=n2, loc=0.5, scale=1.5)
>>> stats.ks_2samp(rvs1, rvs2)
(0.20833333333333334, 5.129279597781977e-05)
For a slightly different distribution, we cannot reject the null hypothesis
at a 10% or lower alpha since the p-value at 0.144 is higher than 10%
>>> rvs3 = stats.norm.rvs(size=n2, loc=0.01, scale=1.0)
>>> stats.ks_2samp(rvs1, rvs3)
(0.10333333333333333, 0.14691437867433876)
For an identical distribution, we cannot reject the null hypothesis since
the p-value is high, 41%:
>>> rvs4 = stats.norm.rvs(size=n2, loc=0.0, scale=1.0)
>>> stats.ks_2samp(rvs1, rvs4)
(0.07999999999999996, 0.41126949729859719)
"""
LARGE_N = 10000 # 'auto' will attempt to be exact if n1,n2 <= LARGE_N
data1 = np.sort(data1)
data2 = np.sort(data2)
n1 = data1.shape[0]
n2 = data2.shape[0]
if min(n1, n2) == 0:
raise ValueError('Data passed to ks_2samp must not be empty')
data_all = np.concatenate([data1, data2])
# using searchsorted solves equal data problem
cdf1 = np.searchsorted(data1, data_all, side='right') / n1
cdf2 = np.searchsorted(data2, data_all, side='right') / n2
cddiffs = cdf1 - cdf2
minS = -np.min(cddiffs)
maxS = np.max(cddiffs)
alt2Dvalue = {'less': minS, 'greater': maxS, 'two-sided': max(minS, maxS)}
d = alt2Dvalue[alternative]
g = gcd(n1, n2)
n1g = n1 // g
n2g = n2 // g
prob = -np.inf
original_mode = mode
if mode == 'auto':
if max(n1, n2) <= LARGE_N:
mode = 'exact'
else:
mode = 'asymp'
elif mode == 'exact':
# If lcm(n1, n2) is too big, switch from exact to asymp
if n1g >= np.iinfo(np.int).max / n2g:
mode = 'asymp'
warnings.warn(
"Exact ks_2samp calculation not possible with samples sizes "
"%d and %d. Switching to 'asymp' " % (n1, n2), RuntimeWarning)
saw_fp_error = False
if mode == 'exact':
lcm = (n1 // g) * n2
h = int(np.round(d * lcm))
d = h * 1.0 / lcm
if h == 0:
prob = 1.0
else:
try:
if alternative == 'two-sided':
if n1 == n2:
prob = _compute_prob_outside_square(n1, h)
else:
prob = 1 - _compute_prob_inside_method(n1, n2, g, h)
else:
if n1 == n2:
# prob = binom(2n, n-h) / binom(2n, n)
# Evaluating in that form incurs roundoff errors
# from special.binom. Instead calculate directly
prob = 1.0
for j in range(h):
prob = (n1 - j) * prob / (n1 + j + 1)
else:
num_paths = _count_paths_outside_method(n1, n2, g, h)
bin = special.binom(n1 + n2, n1)
if not np.isfinite(bin) or not np.isfinite(num_paths) or num_paths > bin:
raise FloatingPointError()
prob = num_paths / bin
except FloatingPointError:
# Switch mode
mode = 'asymp'
saw_fp_error = True
# Can't raise warning here, inside the try
finally:
if saw_fp_error:
if original_mode == 'exact':
warnings.warn(
"ks_2samp: Exact calculation overflowed. "
"Switching to mode=%s" % mode, RuntimeWarning)
else:
if prob > 1 or prob < 0:
mode = 'asymp'
if original_mode == 'exact':
warnings.warn(
"ks_2samp: Exact calculation incurred large"
" rounding error. Switching to mode=%s" % mode,
RuntimeWarning)
if mode == 'asymp':
# The product n1*n2 is large. Use Smirnov's asymptoptic formula.
if alternative == 'two-sided':
en = np.sqrt(n1 * n2 / (n1 + n2))
# Switch to using kstwo.sf() when it becomes available.
# prob = distributions.kstwo.sf(d, int(np.round(en)))
prob = distributions.kstwobign.sf(en * d)
else:
m, n = max(n1, n2), min(n1, n2)
z = np.sqrt(m*n/(m+n)) * d
# Use Hodges' suggested approximation Eqn 5.3
expt = -2 * z**2 - 2 * z * (m + 2*n)/np.sqrt(m*n*(m+n))/3.0
prob = np.exp(expt)
prob = (0 if prob < 0 else (1 if prob > 1 else prob))
return Ks_2sampResult(d, prob)
def tiecorrect(rankvals):
"""
Tie correction factor for Mann-Whitney U and Kruskal-Wallis H tests.
Parameters
----------
rankvals : array_like
A 1-D sequence of ranks. Typically this will be the array
returned by `~scipy.stats.rankdata`.
Returns
-------
factor : float
Correction factor for U or H.
See Also
--------
rankdata : Assign ranks to the data
mannwhitneyu : Mann-Whitney rank test
kruskal : Kruskal-Wallis H test
References
----------
.. [1] Siegel, S. (1956) Nonparametric Statistics for the Behavioral
Sciences. New York: McGraw-Hill.
Examples
--------
>>> from scipy.stats import tiecorrect, rankdata
>>> tiecorrect([1, 2.5, 2.5, 4])
0.9
>>> ranks = rankdata([1, 3, 2, 4, 5, 7, 2, 8, 4])
>>> ranks
array([ 1. , 4. , 2.5, 5.5, 7. , 8. , 2.5, 9. , 5.5])
>>> tiecorrect(ranks)
0.9833333333333333
"""
arr = np.sort(rankvals)
idx = np.nonzero(np.r_[True, arr[1:] != arr[:-1], True])[0]
cnt = np.diff(idx).astype(np.float64)
size = np.float64(arr.size)
return 1.0 if size < 2 else 1.0 - (cnt**3 - cnt).sum() / (size**3 - size)
MannwhitneyuResult = namedtuple('MannwhitneyuResult', ('statistic', 'pvalue'))
def mannwhitneyu(x, y, use_continuity=True, alternative=None):
"""
Compute the Mann-Whitney rank test on samples x and y.
Parameters
----------
x, y : array_like
Array of samples, should be one-dimensional.
use_continuity : bool, optional
Whether a continuity correction (1/2.) should be taken into
account. Default is True.
alternative : {None, 'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is None):
* None: computes p-value half the size of the 'two-sided' p-value and
a different U statistic. The default behavior is not the same as
using 'less' or 'greater'; it only exists for backward compatibility
and is deprecated.
* 'two-sided'
* 'less': one-sided
* 'greater': one-sided
Use of the None option is deprecated.
Returns
-------
statistic : float
The Mann-Whitney U statistic, equal to min(U for x, U for y) if
`alternative` is equal to None (deprecated; exists for backward
compatibility), and U for y otherwise.
pvalue : float
p-value assuming an asymptotic normal distribution. One-sided or
two-sided, depending on the choice of `alternative`.
Notes
-----
Use only when the number of observation in each sample is > 20 and
you have 2 independent samples of ranks. Mann-Whitney U is
significant if the u-obtained is LESS THAN or equal to the critical
value of U.
This test corrects for ties and by default uses a continuity correction.
References
----------
.. [1] https://en.wikipedia.org/wiki/Mann-Whitney_U_test
.. [2] H.B. Mann and D.R. Whitney, "On a Test of Whether one of Two Random
Variables is Stochastically Larger than the Other," The Annals of
Mathematical Statistics, vol. 18, no. 1, pp. 50-60, 1947.
"""
if alternative is None:
warnings.warn("Calling `mannwhitneyu` without specifying "
"`alternative` is deprecated.", DeprecationWarning)
x = np.asarray(x)
y = np.asarray(y)
n1 = len(x)
n2 = len(y)
ranked = rankdata(np.concatenate((x, y)))
rankx = ranked[0:n1] # get the x-ranks
u1 = n1*n2 + (n1*(n1+1))/2.0 - np.sum(rankx, axis=0) # calc U for x
u2 = n1*n2 - u1 # remainder is U for y
T = tiecorrect(ranked)
if T == 0:
raise ValueError('All numbers are identical in mannwhitneyu')
sd = np.sqrt(T * n1 * n2 * (n1+n2+1) / 12.0)
meanrank = n1*n2/2.0 + 0.5 * use_continuity
if alternative is None or alternative == 'two-sided':
bigu = max(u1, u2)
elif alternative == 'less':
bigu = u1
elif alternative == 'greater':
bigu = u2
else:
raise ValueError("alternative should be None, 'less', 'greater' "
"or 'two-sided'")
z = (bigu - meanrank) / sd
if alternative is None:
# This behavior, equal to half the size of the two-sided
# p-value, is deprecated.
p = distributions.norm.sf(abs(z))
elif alternative == 'two-sided':
p = 2 * distributions.norm.sf(abs(z))
else:
p = distributions.norm.sf(z)
u = u2
# This behavior is deprecated.
if alternative is None:
u = min(u1, u2)
return MannwhitneyuResult(u, p)
RanksumsResult = namedtuple('RanksumsResult', ('statistic', 'pvalue'))
def ranksums(x, y):
"""
Compute the Wilcoxon rank-sum statistic for two samples.
The Wilcoxon rank-sum test tests the null hypothesis that two sets
of measurements are drawn from the same distribution. The alternative
hypothesis is that values in one sample are more likely to be
larger than the values in the other sample.
This test should be used to compare two samples from continuous
distributions. It does not handle ties between measurements
in x and y. For tie-handling and an optional continuity correction
see `scipy.stats.mannwhitneyu`.
Parameters
----------
x,y : array_like
The data from the two samples.
Returns
-------
statistic : float
The test statistic under the large-sample approximation that the
rank sum statistic is normally distributed.
pvalue : float
The two-sided p-value of the test.
References
----------
.. [1] https://en.wikipedia.org/wiki/Wilcoxon_rank-sum_test
"""
x, y = map(np.asarray, (x, y))
n1 = len(x)
n2 = len(y)
alldata = np.concatenate((x, y))
ranked = rankdata(alldata)
x = ranked[:n1]
s = np.sum(x, axis=0)
expected = n1 * (n1+n2+1) / 2.0
z = (s - expected) / np.sqrt(n1*n2*(n1+n2+1)/12.0)
prob = 2 * distributions.norm.sf(abs(z))
return RanksumsResult(z, prob)
KruskalResult = namedtuple('KruskalResult', ('statistic', 'pvalue'))
def kruskal(*args, **kwargs):
"""
Compute the Kruskal-Wallis H-test for independent samples.
The Kruskal-Wallis H-test tests the null hypothesis that the population
median of all of the groups are equal. It is a non-parametric version of
ANOVA. The test works on 2 or more independent samples, which may have
different sizes. Note that rejecting the null hypothesis does not
indicate which of the groups differs. Post hoc comparisons between
groups are required to determine which groups are different.
Parameters
----------
sample1, sample2, ... : array_like
Two or more arrays with the sample measurements can be given as
arguments.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
statistic : float
The Kruskal-Wallis H statistic, corrected for ties.
pvalue : float
The p-value for the test using the assumption that H has a chi
square distribution.
See Also
--------
f_oneway : 1-way ANOVA.
mannwhitneyu : Mann-Whitney rank test on two samples.
friedmanchisquare : Friedman test for repeated measurements.
Notes
-----
Due to the assumption that H has a chi square distribution, the number
of samples in each group must not be too small. A typical rule is
that each sample must have at least 5 measurements.
References
----------
.. [1] W. H. Kruskal & W. W. Wallis, "Use of Ranks in
One-Criterion Variance Analysis", Journal of the American Statistical
Association, Vol. 47, Issue 260, pp. 583-621, 1952.
.. [2] https://en.wikipedia.org/wiki/Kruskal-Wallis_one-way_analysis_of_variance
Examples
--------
>>> from scipy import stats
>>> x = [1, 3, 5, 7, 9]
>>> y = [2, 4, 6, 8, 10]
>>> stats.kruskal(x, y)
KruskalResult(statistic=0.2727272727272734, pvalue=0.6015081344405895)
>>> x = [1, 1, 1]
>>> y = [2, 2, 2]
>>> z = [2, 2]
>>> stats.kruskal(x, y, z)
KruskalResult(statistic=7.0, pvalue=0.0301973834223185)
"""
args = list(map(np.asarray, args))
num_groups = len(args)
if num_groups < 2:
raise ValueError("Need at least two groups in stats.kruskal()")
for arg in args:
if arg.size == 0:
return KruskalResult(np.nan, np.nan)
n = np.asarray(list(map(len, args)))
if 'nan_policy' in kwargs.keys():
if kwargs['nan_policy'] not in ('propagate', 'raise', 'omit'):
raise ValueError("nan_policy must be 'propagate', "
"'raise' or'omit'")
else:
nan_policy = kwargs['nan_policy']
else:
nan_policy = 'propagate'
contains_nan = False
for arg in args:
cn = _contains_nan(arg, nan_policy)
if cn[0]:
contains_nan = True
break
if contains_nan and nan_policy == 'omit':
for a in args:
a = ma.masked_invalid(a)
return mstats_basic.kruskal(*args)
if contains_nan and nan_policy == 'propagate':
return KruskalResult(np.nan, np.nan)
alldata = np.concatenate(args)
ranked = rankdata(alldata)
ties = tiecorrect(ranked)
if ties == 0:
raise ValueError('All numbers are identical in kruskal')
# Compute sum^2/n for each group and sum
j = np.insert(np.cumsum(n), 0, 0)
ssbn = 0
for i in range(num_groups):
ssbn += _square_of_sums(ranked[j[i]:j[i+1]]) / n[i]
totaln = np.sum(n, dtype=float)
h = 12.0 / (totaln * (totaln + 1)) * ssbn - 3 * (totaln + 1)
df = num_groups - 1
h /= ties
return KruskalResult(h, distributions.chi2.sf(h, df))
FriedmanchisquareResult = namedtuple('FriedmanchisquareResult',
('statistic', 'pvalue'))
def friedmanchisquare(*args):
"""
Compute the Friedman test for repeated measurements.
The Friedman test tests the null hypothesis that repeated measurements of
the same individuals have the same distribution. It is often used
to test for consistency among measurements obtained in different ways.
For example, if two measurement techniques are used on the same set of
individuals, the Friedman test can be used to determine if the two
measurement techniques are consistent.
Parameters
----------
measurements1, measurements2, measurements3... : array_like
Arrays of measurements. All of the arrays must have the same number
of elements. At least 3 sets of measurements must be given.
Returns
-------
statistic : float
The test statistic, correcting for ties.
pvalue : float
The associated p-value assuming that the test statistic has a chi
squared distribution.
Notes
-----
Due to the assumption that the test statistic has a chi squared
distribution, the p-value is only reliable for n > 10 and more than
6 repeated measurements.
References
----------
.. [1] https://en.wikipedia.org/wiki/Friedman_test
"""
k = len(args)
if k < 3:
raise ValueError('Less than 3 levels. Friedman test not appropriate.')
n = len(args[0])
for i in range(1, k):
if len(args[i]) != n:
raise ValueError('Unequal N in friedmanchisquare. Aborting.')
# Rank data
data = np.vstack(args).T
data = data.astype(float)
for i in range(len(data)):
data[i] = rankdata(data[i])
# Handle ties
ties = 0
for i in range(len(data)):
replist, repnum = find_repeats(array(data[i]))
for t in repnum:
ties += t * (t*t - 1)
c = 1 - ties / (k*(k*k - 1)*n)
ssbn = np.sum(data.sum(axis=0)**2)
chisq = (12.0 / (k*n*(k+1)) * ssbn - 3*n*(k+1)) / c
return FriedmanchisquareResult(chisq, distributions.chi2.sf(chisq, k - 1))
BrunnerMunzelResult = namedtuple('BrunnerMunzelResult',
('statistic', 'pvalue'))
def brunnermunzel(x, y, alternative="two-sided", distribution="t",
nan_policy='propagate'):
"""
Compute the Brunner-Munzel test on samples x and y.
The Brunner-Munzel test is a nonparametric test of the null hypothesis that
when values are taken one by one from each group, the probabilities of
getting large values in both groups are equal.
Unlike the Wilcoxon-Mann-Whitney's U test, this does not require the
assumption of equivariance of two groups. Note that this does not assume
the distributions are same. This test works on two independent samples,
which may have different sizes.
Parameters
----------
x, y : array_like
Array of samples, should be one-dimensional.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is 'two-sided'):
* 'two-sided'
* 'less': one-sided
* 'greater': one-sided
distribution : {'t', 'normal'}, optional
Defines how to get the p-value.
The following options are available (default is 't'):
* 't': get the p-value by t-distribution
* 'normal': get the p-value by standard normal distribution.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
statistic : float
The Brunner-Munzer W statistic.
pvalue : float
p-value assuming an t distribution. One-sided or
two-sided, depending on the choice of `alternative` and `distribution`.
See Also
--------
mannwhitneyu : Mann-Whitney rank test on two samples.
Notes
-----
Brunner and Munzel recommended to estimate the p-value by t-distribution
when the size of data is 50 or less. If the size is lower than 10, it would
be better to use permuted Brunner Munzel test (see [2]_).
References
----------
.. [1] Brunner, E. and Munzel, U. "The nonparametric Benhrens-Fisher
problem: Asymptotic theory and a small-sample approximation".
Biometrical Journal. Vol. 42(2000): 17-25.
.. [2] Neubert, K. and Brunner, E. "A studentized permutation test for the
non-parametric Behrens-Fisher problem". Computational Statistics and
Data Analysis. Vol. 51(2007): 5192-5204.
Examples
--------
>>> from scipy import stats
>>> x1 = [1,2,1,1,1,1,1,1,1,1,2,4,1,1]
>>> x2 = [3,3,4,3,1,2,3,1,1,5,4]
>>> w, p_value = stats.brunnermunzel(x1, x2)
>>> w
3.1374674823029505
>>> p_value
0.0057862086661515377
"""
x = np.asarray(x)
y = np.asarray(y)
# check both x and y
cnx, npx = _contains_nan(x, nan_policy)
cny, npy = _contains_nan(y, nan_policy)
contains_nan = cnx or cny
if npx == "omit" or npy == "omit":
nan_policy = "omit"
if contains_nan and nan_policy == "propagate":
return BrunnerMunzelResult(np.nan, np.nan)
elif contains_nan and nan_policy == "omit":
x = ma.masked_invalid(x)
y = ma.masked_invalid(y)
return mstats_basic.brunnermunzel(x, y, alternative, distribution)
nx = len(x)
ny = len(y)
if nx == 0 or ny == 0:
return BrunnerMunzelResult(np.nan, np.nan)
rankc = rankdata(np.concatenate((x, y)))
rankcx = rankc[0:nx]
rankcy = rankc[nx:nx+ny]
rankcx_mean = np.mean(rankcx)
rankcy_mean = np.mean(rankcy)
rankx = rankdata(x)
ranky = rankdata(y)
rankx_mean = np.mean(rankx)
ranky_mean = np.mean(ranky)
Sx = np.sum(np.power(rankcx - rankx - rankcx_mean + rankx_mean, 2.0))
Sx /= nx - 1
Sy = np.sum(np.power(rankcy - ranky - rankcy_mean + ranky_mean, 2.0))
Sy /= ny - 1
wbfn = nx * ny * (rankcy_mean - rankcx_mean)
wbfn /= (nx + ny) * np.sqrt(nx * Sx + ny * Sy)
if distribution == "t":
df_numer = np.power(nx * Sx + ny * Sy, 2.0)
df_denom = np.power(nx * Sx, 2.0) / (nx - 1)
df_denom += np.power(ny * Sy, 2.0) / (ny - 1)
df = df_numer / df_denom
p = distributions.t.cdf(wbfn, df)
elif distribution == "normal":
p = distributions.norm.cdf(wbfn)
else:
raise ValueError(
"distribution should be 't' or 'normal'")
if alternative == "greater":
pass
elif alternative == "less":
p = 1 - p
elif alternative == "two-sided":
p = 2 * np.min([p, 1-p])
else:
raise ValueError(
"alternative should be 'less', 'greater' or 'two-sided'")
return BrunnerMunzelResult(wbfn, p)
def combine_pvalues(pvalues, method='fisher', weights=None):
"""
Combine p-values from independent tests bearing upon the same hypothesis.
Parameters
----------
pvalues : array_like, 1-D
Array of p-values assumed to come from independent tests.
method : {'fisher', 'pearson', 'tippett', 'stouffer', 'mudholkar_george'}, optional
Name of method to use to combine p-values.
The following methods are available (default is 'fisher'):
* 'fisher': Fisher's method (Fisher's combined probability test), the
sum of the logarithm of the p-values
* 'pearson': Pearson's method (similar to Fisher's but uses sum of the
complement of the p-values inside the logarithms)
* 'tippett': Tippett's method (minimum of p-values)
* 'stouffer': Stouffer's Z-score method
* 'mudholkar_george': the difference of Fisher's and Pearson's methods
divided by 2
weights : array_like, 1-D, optional
Optional array of weights used only for Stouffer's Z-score method.
Returns
-------
statistic: float
The statistic calculated by the specified method.
pval: float
The combined p-value.
Notes
-----
Fisher's method (also known as Fisher's combined probability test) [1]_ uses
a chi-squared statistic to compute a combined p-value. The closely related
Stouffer's Z-score method [2]_ uses Z-scores rather than p-values. The
advantage of Stouffer's method is that it is straightforward to introduce
weights, which can make Stouffer's method more powerful than Fisher's
method when the p-values are from studies of different size [6]_ [7]_.
The Pearson's method uses :math:`log(1-p_i)` inside the sum whereas Fisher's
method uses :math:`log(p_i)` [4]_. For Fisher's and Pearson's method, the
sum of the logarithms is multiplied by -2 in the implementation. This
quantity has a chi-square distribution that determines the p-value. The
`mudholkar_george` method is the difference of the Fisher's and Pearson's
test statistics, each of which include the -2 factor [4]_. However, the
`mudholkar_george` method does not include these -2 factors. The test
statistic of `mudholkar_george` is the sum of logisitic random variables and
equation 3.6 in [3]_ is used to approximate the p-value based on Student's
t-distribution.
Fisher's method may be extended to combine p-values from dependent tests
[5]_. Extensions such as Brown's method and Kost's method are not currently
implemented.
.. versionadded:: 0.15.0
References
----------
.. [1] https://en.wikipedia.org/wiki/Fisher%27s_method
.. [2] https://en.wikipedia.org/wiki/Fisher%27s_method#Relation_to_Stouffer.27s_Z-score_method
.. [3] George, E. O., and G. S. Mudholkar. "On the convolution of logistic
random variables." Metrika 30.1 (1983): 1-13.
.. [4] Heard, N. and Rubin-Delanchey, P. "Choosing between methods of
combining p-values." Biometrika 105.1 (2018): 239-246.
.. [5] Whitlock, M. C. "Combining probability from independent tests: the
weighted Z-method is superior to Fisher's approach." Journal of
Evolutionary Biology 18, no. 5 (2005): 1368-1373.
.. [6] Zaykin, Dmitri V. "Optimally weighted Z-test is a powerful method
for combining probabilities in meta-analysis." Journal of
Evolutionary Biology 24, no. 8 (2011): 1836-1841.
.. [7] https://en.wikipedia.org/wiki/Extensions_of_Fisher%27s_method
"""
pvalues = np.asarray(pvalues)
if pvalues.ndim != 1:
raise ValueError("pvalues is not 1-D")
if method == 'fisher':
statistic = -2 * np.sum(np.log(pvalues))
pval = distributions.chi2.sf(statistic, 2 * len(pvalues))
elif method == 'pearson':
statistic = -2 * np.sum(np.log1p(-pvalues))
pval = distributions.chi2.sf(statistic, 2 * len(pvalues))
elif method == 'mudholkar_george':
statistic = -np.sum(np.log(pvalues)) + np.sum(np.log1p(-pvalues))
nu = 5 * len(pvalues) + 4
approx_factor = np.sqrt(nu / (nu - 2))
pval = distributions.t.sf(statistic * approx_factor, nu)
elif method == 'tippett':
statistic = np.min(pvalues)
pval = distributions.beta.sf(statistic, 1, len(pvalues))
elif method == 'stouffer':
if weights is None:
weights = np.ones_like(pvalues)
elif len(weights) != len(pvalues):
raise ValueError("pvalues and weights must be of the same size.")
weights = np.asarray(weights)
if weights.ndim != 1:
raise ValueError("weights is not 1-D")
Zi = distributions.norm.isf(pvalues)
statistic = np.dot(weights, Zi) / np.linalg.norm(weights)
pval = distributions.norm.sf(statistic)
else:
raise ValueError(
"Invalid method '%s'. Options are 'fisher', 'pearson', \
'mudholkar_george', 'tippett', 'or 'stouffer'", method)
return (statistic, pval)
#####################################
# STATISTICAL DISTANCES #
#####################################
def wasserstein_distance(u_values, v_values, u_weights=None, v_weights=None):
r"""
Compute the first Wasserstein distance between two 1D distributions.
This distance is also known as the earth mover's distance, since it can be
seen as the minimum amount of "work" required to transform :math:`u` into
:math:`v`, where "work" is measured as the amount of distribution weight
that must be moved, multiplied by the distance it has to be moved.
.. versionadded:: 1.0.0
Parameters
----------
u_values, v_values : array_like
Values observed in the (empirical) distribution.
u_weights, v_weights : array_like, optional
Weight for each value. If unspecified, each value is assigned the same
weight.
`u_weights` (resp. `v_weights`) must have the same length as
`u_values` (resp. `v_values`). If the weight sum differs from 1, it
must still be positive and finite so that the weights can be normalized
to sum to 1.
Returns
-------
distance : float
The computed distance between the distributions.
Notes
-----
The first Wasserstein distance between the distributions :math:`u` and
:math:`v` is:
.. math::
l_1 (u, v) = \inf_{\pi \in \Gamma (u, v)} \int_{\mathbb{R} \times
\mathbb{R}} |x-y| \mathrm{d} \pi (x, y)
where :math:`\Gamma (u, v)` is the set of (probability) distributions on
:math:`\mathbb{R} \times \mathbb{R}` whose marginals are :math:`u` and
:math:`v` on the first and second factors respectively.
If :math:`U` and :math:`V` are the respective CDFs of :math:`u` and
:math:`v`, this distance also equals to:
.. math::
l_1(u, v) = \int_{-\infty}^{+\infty} |U-V|
See [2]_ for a proof of the equivalence of both definitions.
The input distributions can be empirical, therefore coming from samples
whose values are effectively inputs of the function, or they can be seen as
generalized functions, in which case they are weighted sums of Dirac delta
functions located at the specified values.
References
----------
.. [1] "Wasserstein metric", https://en.wikipedia.org/wiki/Wasserstein_metric
.. [2] Ramdas, Garcia, Cuturi "On Wasserstein Two Sample Testing and Related
Families of Nonparametric Tests" (2015). :arXiv:`1509.02237`.
Examples
--------
>>> from scipy.stats import wasserstein_distance
>>> wasserstein_distance([0, 1, 3], [5, 6, 8])
5.0
>>> wasserstein_distance([0, 1], [0, 1], [3, 1], [2, 2])
0.25
>>> wasserstein_distance([3.4, 3.9, 7.5, 7.8], [4.5, 1.4],
... [1.4, 0.9, 3.1, 7.2], [3.2, 3.5])
4.0781331438047861
"""
return _cdf_distance(1, u_values, v_values, u_weights, v_weights)
def energy_distance(u_values, v_values, u_weights=None, v_weights=None):
r"""
Compute the energy distance between two 1D distributions.
.. versionadded:: 1.0.0
Parameters
----------
u_values, v_values : array_like
Values observed in the (empirical) distribution.
u_weights, v_weights : array_like, optional
Weight for each value. If unspecified, each value is assigned the same
weight.
`u_weights` (resp. `v_weights`) must have the same length as
`u_values` (resp. `v_values`). If the weight sum differs from 1, it
must still be positive and finite so that the weights can be normalized
to sum to 1.
Returns
-------
distance : float
The computed distance between the distributions.
Notes
-----
The energy distance between two distributions :math:`u` and :math:`v`, whose
respective CDFs are :math:`U` and :math:`V`, equals to:
.. math::
D(u, v) = \left( 2\mathbb E|X - Y| - \mathbb E|X - X'| -
\mathbb E|Y - Y'| \right)^{1/2}
where :math:`X` and :math:`X'` (resp. :math:`Y` and :math:`Y'`) are
independent random variables whose probability distribution is :math:`u`
(resp. :math:`v`).
As shown in [2]_, for one-dimensional real-valued variables, the energy
distance is linked to the non-distribution-free version of the Cramer-von
Mises distance:
.. math::
D(u, v) = \sqrt{2} l_2(u, v) = \left( 2 \int_{-\infty}^{+\infty} (U-V)^2
\right)^{1/2}
Note that the common Cramer-von Mises criterion uses the distribution-free
version of the distance. See [2]_ (section 2), for more details about both
versions of the distance.
The input distributions can be empirical, therefore coming from samples
whose values are effectively inputs of the function, or they can be seen as
generalized functions, in which case they are weighted sums of Dirac delta
functions located at the specified values.
References
----------
.. [1] "Energy distance", https://en.wikipedia.org/wiki/Energy_distance
.. [2] Szekely "E-statistics: The energy of statistical samples." Bowling
Green State University, Department of Mathematics and Statistics,
Technical Report 02-16 (2002).
.. [3] Rizzo, Szekely "Energy distance." Wiley Interdisciplinary Reviews:
Computational Statistics, 8(1):27-38 (2015).
.. [4] Bellemare, Danihelka, Dabney, Mohamed, Lakshminarayanan, Hoyer,
Munos "The Cramer Distance as a Solution to Biased Wasserstein
Gradients" (2017). :arXiv:`1705.10743`.
Examples
--------
>>> from scipy.stats import energy_distance
>>> energy_distance([0], [2])
2.0000000000000004
>>> energy_distance([0, 8], [0, 8], [3, 1], [2, 2])
1.0000000000000002
>>> energy_distance([0.7, 7.4, 2.4, 6.8], [1.4, 8. ],
... [2.1, 4.2, 7.4, 8. ], [7.6, 8.8])
0.88003340976158217
"""
return np.sqrt(2) * _cdf_distance(2, u_values, v_values,
u_weights, v_weights)
def _cdf_distance(p, u_values, v_values, u_weights=None, v_weights=None):
r"""
Compute, between two one-dimensional distributions :math:`u` and
:math:`v`, whose respective CDFs are :math:`U` and :math:`V`, the
statistical distance that is defined as:
.. math::
l_p(u, v) = \left( \int_{-\infty}^{+\infty} |U-V|^p \right)^{1/p}
p is a positive parameter; p = 1 gives the Wasserstein distance, p = 2
gives the energy distance.
Parameters
----------
u_values, v_values : array_like
Values observed in the (empirical) distribution.
u_weights, v_weights : array_like, optional
Weight for each value. If unspecified, each value is assigned the same
weight.
`u_weights` (resp. `v_weights`) must have the same length as
`u_values` (resp. `v_values`). If the weight sum differs from 1, it
must still be positive and finite so that the weights can be normalized
to sum to 1.
Returns
-------
distance : float
The computed distance between the distributions.
Notes
-----
The input distributions can be empirical, therefore coming from samples
whose values are effectively inputs of the function, or they can be seen as
generalized functions, in which case they are weighted sums of Dirac delta
functions located at the specified values.
References
----------
.. [1] Bellemare, Danihelka, Dabney, Mohamed, Lakshminarayanan, Hoyer,
Munos "The Cramer Distance as a Solution to Biased Wasserstein
Gradients" (2017). :arXiv:`1705.10743`.
"""
u_values, u_weights = _validate_distribution(u_values, u_weights)
v_values, v_weights = _validate_distribution(v_values, v_weights)
u_sorter = np.argsort(u_values)
v_sorter = np.argsort(v_values)
all_values = np.concatenate((u_values, v_values))
all_values.sort(kind='mergesort')
# Compute the differences between pairs of successive values of u and v.
deltas = np.diff(all_values)
# Get the respective positions of the values of u and v among the values of
# both distributions.
u_cdf_indices = u_values[u_sorter].searchsorted(all_values[:-1], 'right')
v_cdf_indices = v_values[v_sorter].searchsorted(all_values[:-1], 'right')
# Calculate the CDFs of u and v using their weights, if specified.
if u_weights is None:
u_cdf = u_cdf_indices / u_values.size
else:
u_sorted_cumweights = np.concatenate(([0],
np.cumsum(u_weights[u_sorter])))
u_cdf = u_sorted_cumweights[u_cdf_indices] / u_sorted_cumweights[-1]
if v_weights is None:
v_cdf = v_cdf_indices / v_values.size
else:
v_sorted_cumweights = np.concatenate(([0],
np.cumsum(v_weights[v_sorter])))
v_cdf = v_sorted_cumweights[v_cdf_indices] / v_sorted_cumweights[-1]
# Compute the value of the integral based on the CDFs.
# If p = 1 or p = 2, we avoid using np.power, which introduces an overhead
# of about 15%.
if p == 1:
return np.sum(np.multiply(np.abs(u_cdf - v_cdf), deltas))
if p == 2:
return np.sqrt(np.sum(np.multiply(np.square(u_cdf - v_cdf), deltas)))
return np.power(np.sum(np.multiply(np.power(np.abs(u_cdf - v_cdf), p),
deltas)), 1/p)
def _validate_distribution(values, weights):
"""
Validate the values and weights from a distribution input of `cdf_distance`
and return them as ndarray objects.
Parameters
----------
values : array_like
Values observed in the (empirical) distribution.
weights : array_like
Weight for each value.
Returns
-------
values : ndarray
Values as ndarray.
weights : ndarray
Weights as ndarray.
"""
# Validate the value array.
values = np.asarray(values, dtype=float)
if len(values) == 0:
raise ValueError("Distribution can't be empty.")
# Validate the weight array, if specified.
if weights is not None:
weights = np.asarray(weights, dtype=float)
if len(weights) != len(values):
raise ValueError('Value and weight array-likes for the same '
'empirical distribution must be of the same size.')
if np.any(weights < 0):
raise ValueError('All weights must be non-negative.')
if not 0 < np.sum(weights) < np.inf:
raise ValueError('Weight array-like sum must be positive and '
'finite. Set as None for an equal distribution of '
'weight.')
return values, weights
return values, None
#####################################
# SUPPORT FUNCTIONS #
#####################################
RepeatedResults = namedtuple('RepeatedResults', ('values', 'counts'))
def find_repeats(arr):
"""
Find repeats and repeat counts.
Parameters
----------
arr : array_like
Input array. This is cast to float64.
Returns
-------
values : ndarray
The unique values from the (flattened) input that are repeated.
counts : ndarray
Number of times the corresponding 'value' is repeated.
Notes
-----
In numpy >= 1.9 `numpy.unique` provides similar functionality. The main
difference is that `find_repeats` only returns repeated values.
Examples
--------
>>> from scipy import stats
>>> stats.find_repeats([2, 1, 2, 3, 2, 2, 5])
RepeatedResults(values=array([2.]), counts=array([4]))
>>> stats.find_repeats([[10, 20, 1, 2], [5, 5, 4, 4]])
RepeatedResults(values=array([4., 5.]), counts=array([2, 2]))
"""
# Note: always copies.
return RepeatedResults(*_find_repeats(np.array(arr, dtype=np.float64)))
def _sum_of_squares(a, axis=0):
"""
Square each element of the input array, and return the sum(s) of that.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
sum_of_squares : ndarray
The sum along the given axis for (a**2).
See Also
--------
_square_of_sums : The square(s) of the sum(s) (the opposite of
`_sum_of_squares`).
"""
a, axis = _chk_asarray(a, axis)
return np.sum(a*a, axis)
def _square_of_sums(a, axis=0):
"""
Sum elements of the input array, and return the square(s) of that sum.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
square_of_sums : float or ndarray
The square of the sum over `axis`.
See Also
--------
_sum_of_squares : The sum of squares (the opposite of `square_of_sums`).
"""
a, axis = _chk_asarray(a, axis)
s = np.sum(a, axis)
if not np.isscalar(s):
return s.astype(float) * s
else:
return float(s) * s
def rankdata(a, method='average', *, axis=None):
"""
Assign ranks to data, dealing with ties appropriately.
By default (``axis=None``), the data array is first flattened, and a flat
array of ranks is returned. Separately reshape the rank array to the
shape of the data array if desired (see Examples).
Ranks begin at 1. The `method` argument controls how ranks are assigned
to equal values. See [1]_ for further discussion of ranking methods.
Parameters
----------
a : array_like
The array of values to be ranked.
method : {'average', 'min', 'max', 'dense', 'ordinal'}, optional
The method used to assign ranks to tied elements.
The following methods are available (default is 'average'):
* 'average': The average of the ranks that would have been assigned to
all the tied values is assigned to each value.
* 'min': The minimum of the ranks that would have been assigned to all
the tied values is assigned to each value. (This is also
referred to as "competition" ranking.)
* 'max': The maximum of the ranks that would have been assigned to all
the tied values is assigned to each value.
* 'dense': Like 'min', but the rank of the next highest element is
assigned the rank immediately after those assigned to the tied
elements.
* 'ordinal': All values are given a distinct rank, corresponding to
the order that the values occur in `a`.
axis : {None, int}, optional
Axis along which to perform the ranking. If ``None``, the data array
is first flattened.
Returns
-------
ranks : ndarray
An array of size equal to the size of `a`, containing rank
scores.
References
----------
.. [1] "Ranking", https://en.wikipedia.org/wiki/Ranking
Examples
--------
>>> from scipy.stats import rankdata
>>> rankdata([0, 2, 3, 2])
array([ 1. , 2.5, 4. , 2.5])
>>> rankdata([0, 2, 3, 2], method='min')
array([ 1, 2, 4, 2])
>>> rankdata([0, 2, 3, 2], method='max')
array([ 1, 3, 4, 3])
>>> rankdata([0, 2, 3, 2], method='dense')
array([ 1, 2, 3, 2])
>>> rankdata([0, 2, 3, 2], method='ordinal')
array([ 1, 2, 4, 3])
>>> rankdata([[0, 2], [3, 2]]).reshape(2,2)
array([[1. , 2.5],
[4. , 2.5]])
>>> rankdata([[0, 2, 2], [3, 2, 5]], axis=1)
array([[1. , 2.5, 2.5],
[2. , 1. , 3. ]])
"""
if method not in ('average', 'min', 'max', 'dense', 'ordinal'):
raise ValueError('unknown method "{0}"'.format(method))
if axis is not None:
a = np.asarray(a)
if a.size == 0:
# The return values of `normalize_axis_index` are ignored. The
# call validates `axis`, even though we won't use it.
# use scipy._lib._util._normalize_axis_index when available
np.core.multiarray.normalize_axis_index(axis, a.ndim)
dt = np.float64 if method == 'average' else np.int_
return np.empty(a.shape, dtype=dt)
return np.apply_along_axis(rankdata, axis, a, method)
arr = np.ravel(np.asarray(a))
algo = 'mergesort' if method == 'ordinal' else 'quicksort'
sorter = np.argsort(arr, kind=algo)
inv = np.empty(sorter.size, dtype=np.intp)
inv[sorter] = np.arange(sorter.size, dtype=np.intp)
if method == 'ordinal':
return inv + 1
arr = arr[sorter]
obs = np.r_[True, arr[1:] != arr[:-1]]
dense = obs.cumsum()[inv]
if method == 'dense':
return dense
# cumulative counts of each unique value
count = np.r_[np.nonzero(obs)[0], len(obs)]
if method == 'max':
return count[dense]
if method == 'min':
return count[dense - 1] + 1
# average method
return .5 * (count[dense] + count[dense - 1] + 1)
|
person142/scipy
|
scipy/stats/stats.py
|
Python
|
bsd-3-clause
| 257,101
|
[
"DIRAC"
] |
eb51209a3e4b70a325a1add66e16f974195b0a94fa080121019c79e16f4a4c02
|
# Copyright 2021 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for vit_moe."""
from absl.testing import absltest
import jax
from vmoe.nn import vit_moe
# Default configuration for the V-MoE.
DEFAULT_TEST_CONFIG = {
'num_classes': 4,
'patch_size': (2, 2),
'hidden_size': 8,
'encoder': {
'num_layers': 2,
'mlp_dim': 16,
'num_heads': 2,
'moe': {
'layers': (1,),
'num_experts': 4,
'group_size': 4,
'router': {
'num_selected_experts': 1,
'noise_std': 1e-3,
'importance_loss_weight': 0.02,
'load_loss_weight': 0.02,
'dispatcher': {
'name': 'einsum',
'capacity': 2,
'batch_priority': False,
'bfloat16': False,
}
},
},
'dropout_rate': 0.0,
'attention_dropout_rate': 0.0,
},
'classifier': 'gap',
'representation_size': None,
}
EXPECTED_DEFAULT_ATTENTION_SHAPES = {
'key': {'bias': (2, 4), 'kernel': (8, 2, 4)},
'out': {'bias': (8,), 'kernel': (2, 4, 8)},
'query': {'bias': (2, 4), 'kernel': (8, 2, 4)},
'value': {'bias': (2, 4), 'kernel': (8, 2, 4)},
}
EXPECTED_DEFAULT_MLP_SHAPES = {
'Dense_0': {'bias': (16,), 'kernel': (8, 16)},
'Dense_1': {'bias': (8,), 'kernel': (16, 8)},
}
EXPECTED_DEFAULT_MOE_SHAPES = {
'Dense_0': {'bias': (4, 16), 'kernel': (4, 8, 16)},
'Dense_1': {'bias': (4, 8), 'kernel': (4, 16, 8)},
}
EXPECTED_DEFAULT_LAYER_NORM_SHAPES = {'bias': (8,), 'scale': (8,)}
class VitMoeTest(absltest.TestCase):
def test_initialize_shapes(self):
"""Tests that the shapes of the parameters are the expected ones."""
def init(rngs, x):
model = vit_moe.VisionTransformerMoe(**DEFAULT_TEST_CONFIG)
return model.init(rngs, x)
rngs = dict(params=jax.random.PRNGKey(0), gating=jax.random.PRNGKey(1))
x = jax.random.normal(jax.random.PRNGKey(0), (16, 4, 4, 3))
shapes = jax.tree_map(lambda x: x.shape, jax.eval_shape(init, rngs, x))
shapes = shapes.unfreeze()
expected_shapes = {
'params': {
'Encoder': {
'encoder_norm': EXPECTED_DEFAULT_LAYER_NORM_SHAPES,
'posembed_input': {'pos_embedding': (1, 4, 8)},
'encoderblock_0': {
'LayerNorm_0': EXPECTED_DEFAULT_LAYER_NORM_SHAPES,
'LayerNorm_1': EXPECTED_DEFAULT_LAYER_NORM_SHAPES,
'SelfAttention': EXPECTED_DEFAULT_ATTENTION_SHAPES,
'Mlp': EXPECTED_DEFAULT_MLP_SHAPES,
},
'encoderblock_1': {
'LayerNorm_0': EXPECTED_DEFAULT_LAYER_NORM_SHAPES,
'LayerNorm_1': EXPECTED_DEFAULT_LAYER_NORM_SHAPES,
'SelfAttention': EXPECTED_DEFAULT_ATTENTION_SHAPES,
'Moe': {
'Mlp': EXPECTED_DEFAULT_MOE_SHAPES,
'Router': {'dense': {'kernel': (8, 4)}},
},
},
},
'embedding': {'bias': (8,), 'kernel': (2, 2, 3, 8)},
'head': {'bias': (4,), 'kernel': (8, 4)},
}
}
self.assertDictEqual(shapes, expected_shapes)
def test_forward(self):
"""Tests that the model runs in forward mode. Correctness is not tested."""
model = vit_moe.VisionTransformerMoe(**DEFAULT_TEST_CONFIG)
rngs = dict(params=jax.random.PRNGKey(0), gating=jax.random.PRNGKey(1))
x = jax.random.normal(jax.random.PRNGKey(0), (16, 4, 4, 3))
output, _ = model.init_with_output(rngs, x)
self.assertIsInstance(output, tuple)
output, metrics = output
self.assertIn('auxiliary_loss', metrics)
self.assertTupleEqual(output.shape, (16, 4))
if __name__ == '__main__':
absltest.main()
|
google-research/vmoe
|
vmoe/nn/vit_moe_test.py
|
Python
|
apache-2.0
| 4,412
|
[
"MOE"
] |
916abee7cfbe4a4a7927351f6627425ad9b12890e29431f23e37683e06c8ad92
|
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import ast
import sys
from ansible import constants as C
from ansible.module_utils.common.text.converters import container_to_text, to_native
from ansible.module_utils.six import string_types, PY2
from ansible.module_utils.six.moves import builtins
from ansible.plugins.loader import filter_loader, test_loader
def safe_eval(expr, locals=None, include_exceptions=False):
'''
This is intended for allowing things like:
with_items: a_list_variable
Where Jinja2 would return a string but we do not want to allow it to
call functions (outside of Jinja2, where the env is constrained).
Based on:
http://stackoverflow.com/questions/12523516/using-ast-and-whitelists-to-make-pythons-eval-safe
'''
locals = {} if locals is None else locals
# define certain JSON types
# eg. JSON booleans are unknown to python eval()
OUR_GLOBALS = {
'__builtins__': {}, # avoid global builtins as per eval docs
'false': False,
'null': None,
'true': True,
# also add back some builtins we do need
'True': True,
'False': False,
'None': None
}
# this is the whitelist of AST nodes we are going to
# allow in the evaluation. Any node type other than
# those listed here will raise an exception in our custom
# visitor class defined below.
SAFE_NODES = set(
(
ast.Add,
ast.BinOp,
# ast.Call,
ast.Compare,
ast.Dict,
ast.Div,
ast.Expression,
ast.List,
ast.Load,
ast.Mult,
ast.Num,
ast.Name,
ast.Str,
ast.Sub,
ast.USub,
ast.Tuple,
ast.UnaryOp,
)
)
# AST node types were expanded after 2.6
if sys.version_info[:2] >= (2, 7):
SAFE_NODES.update(
set(
(ast.Set,)
)
)
# And in Python 3.4 too
if sys.version_info[:2] >= (3, 4):
SAFE_NODES.update(
set(
(ast.NameConstant,)
)
)
# And in Python 3.6 too, although not encountered until Python 3.8, see https://bugs.python.org/issue32892
if sys.version_info[:2] >= (3, 6):
SAFE_NODES.update(
set(
(ast.Constant,)
)
)
filter_list = []
for filter_ in filter_loader.all():
try:
filter_list.extend(filter_.filters().keys())
except Exception:
# This is handled and displayed in JinjaPluginIntercept._load_ansible_plugins
continue
test_list = []
for test in test_loader.all():
try:
test_list.extend(test.tests().keys())
except Exception:
# This is handled and displayed in JinjaPluginIntercept._load_ansible_plugins
continue
CALL_ENABLED = C.CALLABLE_ACCEPT_LIST + filter_list + test_list
class CleansingNodeVisitor(ast.NodeVisitor):
def generic_visit(self, node, inside_call=False):
if type(node) not in SAFE_NODES:
raise Exception("invalid expression (%s)" % expr)
elif isinstance(node, ast.Call):
inside_call = True
elif isinstance(node, ast.Name) and inside_call:
# Disallow calls to builtin functions that we have not vetted
# as safe. Other functions are excluded by setting locals in
# the call to eval() later on
if hasattr(builtins, node.id) and node.id not in CALL_ENABLED:
raise Exception("invalid function: %s" % node.id)
# iterate over all child nodes
for child_node in ast.iter_child_nodes(node):
self.generic_visit(child_node, inside_call)
if not isinstance(expr, string_types):
# already templated to a datastructure, perhaps?
if include_exceptions:
return (expr, None)
return expr
cnv = CleansingNodeVisitor()
try:
parsed_tree = ast.parse(expr, mode='eval')
cnv.visit(parsed_tree)
compiled = compile(parsed_tree, '<expr %s>' % to_native(expr), 'eval')
# Note: passing our own globals and locals here constrains what
# callables (and other identifiers) are recognized. this is in
# addition to the filtering of builtins done in CleansingNodeVisitor
result = eval(compiled, OUR_GLOBALS, dict(locals))
if PY2:
# On Python 2 u"{'key': 'value'}" is evaluated to {'key': 'value'},
# ensure it is converted to {u'key': u'value'}.
result = container_to_text(result)
if include_exceptions:
return (result, None)
else:
return result
except SyntaxError as e:
# special handling for syntax errors, we just return
# the expression string back as-is to support late evaluation
if include_exceptions:
return (expr, None)
return expr
except Exception as e:
if include_exceptions:
return (expr, e)
return expr
|
pmarques/ansible
|
lib/ansible/template/safe_eval.py
|
Python
|
gpl-3.0
| 5,976
|
[
"VisIt"
] |
d666bb79328859b39a1b333d26d8e7464f4d62d0e1c84cf3c3619c20d99d7a0a
|
BOARDS = {
'arduino': {
'digital': tuple(x for x in range(14)),
'analog': tuple(x for x in range(6)),
'pwm': (3, 5, 6, 9, 10, 11),
'use_ports': True,
'disabled': (0, 1) # Rx, Tx, Crystal
},
'arduino_mega': {
'digital': tuple(x for x in range(54)),
'analog': tuple(x for x in range(16)),
'pwm': tuple(x for x in range(2, 14)),
'use_ports': True,
'disabled': (0, 1) # Rx, Tx, Crystal
},
'arduino_due': {
'digital': tuple(x for x in range(54)),
'analog': tuple(x for x in range(12)),
'pwm': tuple(x for x in range(2, 14)),
'use_ports': True,
'disabled': (0, 1) # Rx, Tx, Crystal
}
}
|
jochasinga/pyFirmata
|
pyfirmata/boards.py
|
Python
|
mit
| 727
|
[
"CRYSTAL"
] |
b3b1cad217bacc211e06ee7b670aec88a8929348349813d29794e065a3e98cdb
|
import re
from tests import factories as f
from wye.base.constants import WorkshopLevel
outbox_len = 0
def create_user(password):
user = f.create_user()
user.set_password(password)
user.save()
return user
def login(browser, url, user, password):
browser.visit(url)
browser.fill('login', user.email)
browser.fill('password', password)
browser.find_by_css('[type=submit]')[0].click()
def login_and_confirm(browser, url, outbox, user, password):
global outbox_len
outbox_len = outbox_len + 1
browser.visit(url)
browser.fill('login', user.email)
browser.fill('password', password)
browser.find_by_css('[type=submit]')[0].click()
# assert len(outbox) == outbox_len
mail = outbox[len(outbox) - 1]
confirm_link = re.findall(r'http.*/accounts/.*/', mail.body)
assert confirm_link
browser.visit(confirm_link[0])
assert browser.title, "Confirm E-mail Address"
browser.find_by_css('[type=submit]')[0].click()
def workshop_create(browser, url, org, section):
browser.visit(url)
browser.fill('no_of_participants', 10)
browser.fill('expected_date', '11/12/2018')
browser.fill('description', "test")
browser.select('requester', org.id)
browser.select('workshop_level', WorkshopLevel.BEGINNER)
browser.select('workshop_section', section.id)
browser.find_by_css('[type=submit]')[0].click()
|
shankisg/wye
|
tests/functional/workshop/base.py
|
Python
|
mit
| 1,398
|
[
"VisIt"
] |
83684199e484600e5647073cc48a0227c1d23c5a21cedcd70528b96d89d4d074
|
"""
This is the window manager part of pySSN
pySSN is available under the GNU licence providing you cite the developpers names:
Ch. Morisset (Instituto de Astronomia, Universidad Nacional Autonoma de Mexico)
D. Pequignot (Meudon Observatory, France)
Inspired by a demo code by:
Eli Bendersky (eliben@gmail.com)
"""
import sys, os
from PyQt4 import QtCore, QtGui
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
#from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
import numpy as np
from pyssn import log_, __version__
from ..core.spectrum import spectrum
from ..utils.misc import get_parser
from collections import OrderedDict
from ..utils.physics import CST
log_.level = 4
#ToDo :
class NavigationToolbar( NavigationToolbar2QT ):
curs = QtCore.pyqtSignal(bool)
def __init__(self, canvas, parent ):
NavigationToolbar2QT.__init__(self,canvas,parent)
self.clearButtons=[]
# Search through existing buttons
# next use for placement of custom button
next=None
for c in self.findChildren(QtGui.QToolButton):
if next is None:
next=c
# Don't want to see subplots and customize
"""
if str(c.text()) in ('Subplots', 'Customize'):
c.defaultAction().setVisible(False)
continue
"""
# Need to keep track of pan and zoom buttons
# Also grab toggled event to clear checked status of picker button
if str(c.text()) in ('Pan','Zoom'):
c.toggled.connect(self.clearCurs)
self.clearButtons.append(c)
next=None
# create custom button
pm=QtGui.QPixmap(32,32)
pm.fill(QtGui.QApplication.palette().color(QtGui.QPalette.Normal,QtGui.QPalette.Button))
painter=QtGui.QPainter(pm)
painter.fillRect(6,6,20,20,QtCore.Qt.red)
painter.fillRect(15,3,3,26,QtCore.Qt.blue)
painter.fillRect(3,15,26,3,QtCore.Qt.blue)
painter.end()
icon=QtGui.QIcon(pm)
ac = self.addAction(icon, "Toggle Curs")
ac.setCheckable(True)
#Ver como inicializar
#ac.setChecked(True)
ac.toggled.connect(self.curs_toggle)
self.ac = ac
#button=QtGui.QToolButton(self)
#button.setDefaultAction(self.ac)
# Add it to the toolbar, and connect up event
#self.insertWidget(next.defaultAction(),button)
# Grab the picked event from the canvas
canvas.mpl_connect('pick_event',self.canvasPicked)
def clearCurs(self, checked):
if checked:
self.ac.setChecked(False)
def curs_toggle(self, checked):
self.curs.emit(checked)
def canvasPicked(self, event):
if self.ac.isChecked():
self.curs.emit(event.ind)
class AppForm(QtGui.QMainWindow):
def __init__(self, parent=None, init_filename=None, post_proc_file=None, use_workspace=False):
self.calling = 'pySSN GUI'
self.use_workspace = use_workspace
QtGui.QMainWindow.__init__(self, parent)
self.setWindowTitle('pySSN')
self.sp = None
self.axes = None
self.axes2 = None
self.axes3 = None
self.fig = None
self.init_file_name = init_filename
self.init_line_num = None
self.init_ion = None
self.init_xmin = None
self.init_xmax = None
self.init_y1min = None
self.init_y1max = None
self.init_y3min = None
self.init_y3max = None
self.init_legend_fontsize = None
self.init_legend_loc = None
self.init_nearby_line_num = None
self.init_nearby_ion = None
self.init_nearby_xmin = None
self.init_nearby_xmax = None
self.init_nearby_y1min = None
self.init_nearby_y1max = None
self.init_nearby_y3min = None
self.init_nearby_y3max = None
self.init_nearby_legend_fontsize = None
self.init_nearby_legend_loc = None
self.init_cont_line_num = None
self.init_cont_ion = None
self.init_cont_xmin = None
self.init_cont_xmax = None
self.init_cont_y1min = None
self.init_cont_y1max = None
self.init_cont_y3min = None
self.init_cont_y3max = None
self.init_cont_legend_fontsize = None
self.init_cont_legend_loc = None
self.call_on_draw = True
self.cursor_on = False
self.line_info_ref = 0
self.x_plot_lims = None
self.y1_plot_lims = None
self.y2_plot_lims = None
self.y3_plot_lims = None
self.xscale = None
self.yscale = None
self.post_proc_file = post_proc_file
self.tick_file = None
self.save_parameters_file = None
self.do_save = True
self.cont_par_changed = False
self.axes_fixed = False
self.showErrorBox = True
self.create_menu()
self.create_main_frame()
self.create_status_bar()
self.exec_init()
self.cont_pars_dialog = None
self.cursor_w1 = None
self.cursor_w2 = None
self.nearbyLines = None
self.nearbyLines_sort_by = 'i_tot'
self.nearbyLines_sort_reverse = True
self.nearbyLines_dialog = None
self.nearbyLines_selected_ions = None
self.line_info_dialog = None
self.instr_prof_dialog = None
self.refine_wave_dialog = None
self.refine_wave_as_table = False
self.interpol_cont_dialog = None
self.interpol_cont_as_table = False
self.fig_prof = None
self.green_tick_shown = False
self.magenta_tick_shown = False
self.addGreenTickToLegend = True
self.show_true_ions = False
self.nearbyDialogFilterIsActive = False
self.get_user_cont_points = False
self.del_user_cont_points = False
self.user_cont_editBox = None
self.showHelpBrowser = False
def closeEvent(self, evnt):
if self.sp.get_conf('save_parameters_on_exit'):
self.save_pars_as()
if self.cont_pars_dialog is not None:
self.cont_pars_dialog.close()
if self.nearbyLines_dialog is not None:
self.nearbyLines_dialog.close()
if self.line_info_dialog is not None:
self.line_info_dialog.close()
self.line_info_table.close()
if self.instr_prof_dialog is not None:
self.instr_prof_dialog.close()
if self.refine_wave_dialog is not None:
self.refine_wave_dialog.close()
if self.interpol_cont_dialog is not None:
self.interpol_cont_dialog.close()
def image_extension_list(self):
filetypes = self.canvas.get_supported_filetypes()
file_extensions = filetypes.keys()
file_extensions.sort()
return file_extensions
def image_filter(self, fileExt=''):
filetypes = self.canvas.get_supported_filetypes_grouped()
imagetype_list = filetypes.keys()
imagetype_list.sort()
s = ''
k = 0
for imagetype in imagetype_list:
extension_list = filetypes[ imagetype ]
if fileExt in extension_list:
k = imagetype_list.index(imagetype)
s = s + str(imagetype)
s1 = ' (*.' + str(extension_list[0])
for extension in extension_list[1:]:
s1 = s1 + ' *.' + str(extension)
s1 = s1 + ')'
s = s + s1 + s1 + ';;'
filter_str = s[:-2]
selectedFilter = s.split(';;')[k]
return filter_str, selectedFilter
def save_plot(self):
path = self.sp.get_conf('plot_filename')
self.canvas.print_figure(path, dpi=self.dpi)
self.statusBar().showMessage('Plot saved to file %s' % path, 2000)
def save_plot_as(self):
path = self.sp.get_conf('plot_filename')
extension = os.path.splitext(path)[1][1:].lower()
file_choices, selectedFilter = self.image_filter(extension)
path = unicode(QtGui.QFileDialog.getSaveFileName(self, 'Save plot to file', path, file_choices, selectedFilter))
if path:
extension = os.path.splitext(path)[1][1:].lower()
if extension in self.image_extension_list():
self.sp.set_conf('plot_filename', path)
self.canvas.print_figure(path, dpi=self.dpi)
self.statusBar().showMessage('Plot saved to file %s' % path, 2000)
else:
title = 'Error saving plot'
msg = 'Format "{0}" not supported.'.format(extension)
msg = msg + '\nSupported formats: '
extension_list = self.image_extension_list()
n = len(extension_list)-1
s = ''
for i in range(0,n):
s = s + extension_list[i] + ', '
s = s + extension_list[n] + '.'
msg = msg + s
QtGui.QMessageBox.critical(self, title, msg, QtGui.QMessageBox.Ok )
def on_about(self):
msg = """ pySSN (Spectral Synthesis for Nebulae):
"""
QtGui.QMessageBox.about(self, "About the demo", msg.strip())
def set_cursor(self, checked):
self.cursor_on = checked
self.sp.firstClick = True
def on_click(self, event):
if self.get_user_cont_points and self.user_cont_editBox is not None:
wave = event.xdata
i_list = [i for i in range(len(self.sp.w)-1) if self.sp.w[i] <= wave <= self.sp.w[i+1] or self.sp.w[i+1] <= wave <= self.sp.w[i]]
if len(i_list) == 1:
i = i_list[0]
c = self.sp.cont[i] - self.sp.conts['user'][i]
self.user_cont_editBox.append('{:<7.1f} {:.2f}'.format(event.xdata, event.ydata-c))
self.update_user_cont()
elif ( self.del_user_cont_points and
self.user_cont_editBox is not None and
self.sp.get_conf('cont_user_table') is not None ):
wave = event.xdata
points = self.sp.get_conf('cont_user_table')[:]
if points is not None and len(points) > 0:
points.remove(min(points, key=lambda x:abs(x[0]-wave)))
self.user_cont_list2table(points)
self.update_user_cont()
elif self.cursor_on:
do_print = not self.sp.get_conf('qt_show_dialogs', True)
nearbyLines = self.sp.nearby_lines(event, do_print, sort='i_tot', reverse=True)
if nearbyLines is None:
return
self.nearbyLines = nearbyLines
if not do_print:
self.show_nearbyLines_dialog()
def sort_nearbyLines(self, sort, reverse=False):
if self.nearbyLines is None:
return
if sort == 'proc':
sorts = np.argsort([ self.sp.process[str(line_num)[-9]] for line_num in self.nearbyLines['num'] ])
else:
sorts = np.argsort(self.nearbyLines[sort])
if reverse:
sorts = sorts[::-1]
self.nearbyLines = np.array(self.nearbyLines)[sorts]
def create_main_frame(self):
if self.use_workspace:
self.main_frame = QtGui.QWorkspace()
else:
self.main_frame = QtGui.QWidget()
# Create the mpl Figure and FigCanvas objects.
#
self.dpi = 100
#self.fig = plt.figure(figsize=(15,15))
self.fig = plt.figure(figsize=(15,15))
# self.fig = plt.figure(figsize=(20.0, 15.0), dpi=self.dpi)
log_.debug('creating figure {}'.format(id(self.fig)), calling=self.calling)
self.canvas = FigureCanvas(self.fig)
if self.use_workspace:
self.main_frame.addWindow(self.canvas)
self.fig2 = Figure((20.0, 15.0), dpi=self.dpi)
self.canvas2 = FigureCanvas(self.fig2)
#self.main_frame.addWindow(self.canvas2)
else:
self.canvas.setParent(self.main_frame)
self.canvas.mpl_connect('button_press_event', self.on_click)
self.canvas.mpl_connect('figure_leave_event', self.leave_fig)
# Create the navigation toolbar, tied to the canvas
#
self.mpl_toolbar = NavigationToolbar(self.canvas, self.main_frame)
self.mpl_toolbar.curs.connect(self.set_cursor)
# Other GUI controls
#
self.fix_axes_cb = QtGui.QCheckBox("fix")
self.fix_axes_cb.setChecked(False)
self.connect(self.fix_axes_cb, QtCore.SIGNAL('stateChanged(int)'), self.fix_axes)
self.xlim_min_box = QtGui.QLineEdit()
self.xlim_min_box.setMinimumWidth(50)
#self.connect(self.xlim_min_box, QtCore.SIGNAL('editingFinished()'), self.validate_xlim_min)
self.connect(self.xlim_min_box, QtCore.SIGNAL('returnPressed()'), self.set_plot_limits_and_draw)
self.xlim_max_box = QtGui.QLineEdit()
self.xlim_max_box.setMinimumWidth(50)
#self.connect(self.xlim_max_box, QtCore.SIGNAL('editingFinished()'), self.validate_xlim_max)
#self.xlim_max_box.editingFinished.connect(self.validate_xlim_max)
self.connect(self.xlim_max_box, QtCore.SIGNAL('returnPressed()'), self.set_plot_limits_and_draw)
self.y1lim_min_box = QtGui.QLineEdit()
self.y1lim_min_box.setMinimumWidth(50)
#self.connect(self.y1lim_min_box, QtCore.SIGNAL('editingFinished()'), self.validate_y1lim_min)
self.connect(self.y1lim_min_box, QtCore.SIGNAL('returnPressed()'), self.set_plot_limits_and_draw)
self.y1lim_max_box = QtGui.QLineEdit()
self.y1lim_max_box.setMinimumWidth(50)
#self.connect(self.y1lim_max_box, QtCore.SIGNAL('editingFinished()'), self.validate_y1lim_max)
self.connect(self.y1lim_max_box, QtCore.SIGNAL('returnPressed()'), self.set_plot_limits_and_draw)
self.y3lim_min_box = QtGui.QLineEdit()
self.y3lim_min_box.setMinimumWidth(50)
#self.connect(self.y3lim_min_box, QtCore.SIGNAL('editingFinished()'), self.validate_y3lim_min)
self.connect(self.y3lim_min_box, QtCore.SIGNAL('returnPressed()'), self.set_plot_limits_and_draw)
self.y3lim_max_box = QtGui.QLineEdit()
self.y3lim_max_box.setMinimumWidth(50)
#self.connect(self.y3lim_max_box, QtCore.SIGNAL('editingFinished()'), self.validate_y3lim_max)
self.connect(self.y3lim_max_box, QtCore.SIGNAL('returnPressed()'), self.set_plot_limits_and_draw)
self.run_button = QtGui.QPushButton("Run")
self.connect(self.run_button, QtCore.SIGNAL('clicked()'), self.rerun)
self.draw_button = QtGui.QPushButton("Draw")
self.connect(self.draw_button, QtCore.SIGNAL('clicked()'), self.on_draw)
self.Command_GroupBox = QtGui.QGroupBox("Execute")
self.Command_GroupBox.setCheckable(False)
self.ObsSpec_GroupBox = QtGui.QGroupBox("Parameters of the synthetic spectrum")
self.ObsSpec_GroupBox.setCheckable(False)
self.SpecPlot_GroupBox = QtGui.QGroupBox("Plot of spectra")
self.SpecPlot_GroupBox.setCheckable(False)
self.lineIDs_GroupBox = QtGui.QGroupBox("Show lines")
self.lineIDs_GroupBox.setCheckable(True)
self.lineIDs_GroupBox.setChecked(True)
self.connect(self.lineIDs_GroupBox, QtCore.SIGNAL('clicked()'), self.show_lines_clicked)
self.lineIDs_GroupBox_ToolTip = 'Check to show ticks at the central positions of the spectral lines and plot the lines of selected ions'
self.residual_GroupBox = QtGui.QGroupBox("Plot of residuals")
self.residual_GroupBox.setCheckable(True)
self.residual_GroupBox.setChecked(True)
self.connect(self.residual_GroupBox, QtCore.SIGNAL('clicked()'), self.residual_box_clicked)
self.residual_GroupBox_ToolTip = 'Check to display the residual plot'
self.adjust_button = QtGui.QPushButton("Update")
self.adjust_button.setChecked(False)
self.connect(self.adjust_button, QtCore.SIGNAL('clicked()'), self.adjust)
self.post_proc_button = QtGui.QPushButton("Post proc")
self.post_proc_button.setChecked(False)
self.connect(self.post_proc_button, QtCore.SIGNAL('clicked()'), self.apply_post_proc)
self.update_profile_button = QtGui.QPushButton("Update profiles")
self.update_profile_button.setChecked(False)
self.connect(self.update_profile_button, QtCore.SIGNAL('clicked()'), self.update_profile)
self.sp_min_box = QtGui.QLineEdit()
self.sp_min_box.setMinimumWidth(50)
#self.connect(self.sp_min_box, QtCore.SIGNAL('editingFinished()'), self.set_limit_sp)
self.connect(self.sp_min_box, QtCore.SIGNAL('returnPressed()'), self.set_limit_sp_and_run)
self.sp_max_box = QtGui.QLineEdit()
self.sp_max_box.setMinimumWidth(50)
#self.connect(self.sp_max_box, QtCore.SIGNAL('editingFinished()'), self.set_limit_sp)
self.connect(self.sp_max_box, QtCore.SIGNAL('returnPressed()'), self.set_limit_sp_and_run)
self.sp_norm_box = QtGui.QLineEdit()
self.sp_norm_box.setMinimumWidth(50)
self.connect(self.sp_norm_box, QtCore.SIGNAL('returnPressed()'), self.sp_norm)
self.obj_velo_box = QtGui.QLineEdit()
self.obj_velo_box.setMinimumWidth(50)
self.connect(self.obj_velo_box, QtCore.SIGNAL('returnPressed()'), self.obj_velo)
self.ebv_box = QtGui.QLineEdit()
self.ebv_box.setMinimumWidth(50)
self.connect(self.ebv_box, QtCore.SIGNAL('returnPressed()'), self.ebv)
self.resol_box = QtGui.QLineEdit()
self.resol_box.setMinimumWidth(50)
self.connect(self.resol_box, QtCore.SIGNAL('returnPressed()'), self.resol)
self.cut2_box = QtGui.QLineEdit()
self.cut2_box.setMinimumWidth(50)
self.connect(self.cut2_box, QtCore.SIGNAL('returnPressed()'), self.cut2)
self.cut_cb = QtGui.QCheckBox('')
self.cut_cb.setChecked(False)
self.connect(self.cut_cb, QtCore.SIGNAL('clicked()'), self.cut_cb_changed)
self.ion_box = QtGui.QLineEdit()
self.ion_box.setMinimumWidth(70)
self.connect(self.ion_box, QtCore.SIGNAL('returnPressed()'), self.draw_ion)
self.ion_cb = QtGui.QCheckBox('')
self.ion_cb.setChecked(False)
self.connect(self.ion_cb, QtCore.SIGNAL('clicked()'), self.ion_cb_changed)
self.line_info_box = QtGui.QLineEdit()
self.line_info_box.setFixedWidth(130)
self.connect(self.line_info_box, QtCore.SIGNAL('returnPressed()'), self.line_info)
self.mpl_toolbar.addSeparator()
self.mpl_toolbar.addWidget(QtGui.QLabel(' line number '))
self.mpl_toolbar.addWidget(self.line_info_box)
self.magenta_box = QtGui.QLineEdit()
self.magenta_box.setMinimumWidth(50)
self.connect(self.magenta_box, QtCore.SIGNAL('returnPressed()'), self.magenta_line)
self.magenta_label_box = QtGui.QLineEdit()
self.magenta_label_box.setMinimumWidth(50)
self.connect(self.magenta_label_box, QtCore.SIGNAL('returnPressed()'), self.magenta_line)
self.cyan_box = QtGui.QLineEdit()
self.cyan_box.setMinimumWidth(50)
self.connect(self.cyan_box, QtCore.SIGNAL('returnPressed()'), self.cyan_line)
self.cyan_label_box = QtGui.QLineEdit()
self.cyan_label_box.setMinimumWidth(50)
self.connect(self.cyan_label_box, QtCore.SIGNAL('returnPressed()'), self.cyan_line)
self.setStyleSheet("""QToolTip {
background-color: black;
color: lightgray;
min-width: 20em;
font-size: 14px;
font-family: "sans-serif";
border: black solid 10px
}""")
s = 'Click to execute the synthesis from the beginning.'
self.run_button_ToolTip = s
s = 'Click to update synthesis with changes in line intensities, profiles, and continuum parameters.'
self.adjust_button_ToolTip = s
s = 'Enter line number to get information on\n' \
'the reference line and on its satellites.'
self.line_info_box_ToolTip = s
s = 'Color excess E(B-V)\n\n' \
'Set with: \n' \
' e_bv = <float>\n\n' \
'Comment: \n' \
u' E(B-V) \u2248 C(H\u03B2) / 1.5'
self.ebv_box_ToolTip = s
s = 'Radial velocity in km/s\n\n' \
'Set with: \n' \
' obj_velo = <float>'
self.obj_velo_box_ToolTip = s
s = 'Minimum wavelength of the synthetic spectrum (in angstroms)\n\n' \
'Set with: \n' \
' limit_sp = (<xmin>, <xmax>)'
self.sp_min_box_ToolTip = s
s = 'Maximum wavelength of the synthetic spectrum (in angstroms)\n\n' \
'Set with: \n' \
' limit_sp = (<xmin>, <xmax>)'
self.sp_max_box_ToolTip = s
s = 'Minimum wavelength in the plots of spectra and residuals (in angstroms)\n\n' \
'Set with: \n' \
' x_plot_lims = (<xmin>, <xmax>)'
self.xlim_min_box_ToolTip = s
s = 'Maximum wavelength in the plots of spectra and residuals (in angstroms)\n\n' \
'Set with: \n' \
' x_plot_lims = (<xmin>, <xmax>)'
self.xlim_max_box_ToolTip = s
s = 'Minimum ordinate in the plot of spectra, in units of relative intensity \n\n' \
'Set with: \n' \
' y1_plot_lims = (<ymin>, <ymax>)'
self.y1lim_min_box_ToolTip = s
s = 'Maximum ordinate in the plot of spectra, in units of relative intensity\n\n' \
'Set with: \n' \
' y1_plot_lims = (<ymin>, <ymax>)'
self.y1lim_max_box_ToolTip = s
s = 'Minimum ordinate in the plot of residuals, in units of relative intensity\n\n' \
'Set with: \n' \
' y3_plot_lims = (<ymin>, <ymax>)'
self.y3lim_min_box_ToolTip = s
s = 'Maximum ordinate in the plot of residuals, in units of relative intensity\n\n' \
'Set with: \n' \
' y3_plot_lims = (<ymin>, <ymax>)'
self.y3lim_max_box_ToolTip = s
s = 'Check to retain the current limits of the plots while zooming and panning.'
self.fix_axes_cb_ToolTip = s
s = 'Check to show only lines with intensities above cut. \n\n' \
'Set with: \n' \
' show_selected_intensities_only = <boolean>'
self.cut_cb_ToolTip = s
s = 'Check to show only lines of selected ions. \n\n' \
'Set with: \n' \
' show_selected_ions_only = <boolean>'
self.ion_cb_ToolTip = s
s = 'Normalization factor, ratio between the intensity and the \n' \
u'observed flux of the reference line, usually 10\u2074/F(H\u03B2)\n\n' \
'Set with: \n' \
' sp_norm = <float>'
self.sp_norm_box_ToolTip = s
s = 'Rebinning factor, the odd integer factor by which the number of points \n' \
'of the original spectrum is multiplied in the rebinning process\n\n' \
'Set with: \n' \
' resol = <integer>\n\n' \
'Usage: \n' \
' Set to \'1\' if the resolution of the observed spectrum is large enough'
self.resol_box_ToolTip = s
s = 'Minimum relative intensity of lines to be shown. \n\n' \
'Set with: \n' \
' cut_plot2 = <float>'
self.cut2_box_ToolTip = s
s = 'Comma-separated list of selected ions, elements, or line numbers to be shown. \n\n' \
'Set with: \n' \
' selected_ions = [<ion1>,<ion2>,...]\n\n' \
'Examples: \n' \
' \'O III\' (or \'O_III\') to show the lines of O III\n' \
' \'O III*\' (or \'O_III*\') to show the lines of O III, O IIIfl, O III5g, etc\n' \
' \'O III, O IV\' to show the lines of O III and O IV\n' \
' \'O\' to show the lines of all O ions\n' \
' \'Fe, N\' to show the lines of all Fe and N ions\n' \
' <line number> to show the lines of that same ion'
self.ion_box_ToolTip = s
#
# Layout with box sizers
#
CommandLayout = QtGui.QGridLayout()
wList = [self.run_button,self.adjust_button]
Nrow = 2
for w in wList:
k = wList.index( w )
i = k%Nrow
j = 1+2*(k/Nrow)
CommandLayout.addWidget(w,i,j)
CommandLayout.setAlignment(w,QtCore.Qt.AlignCenter)
self.Command_GroupBox.setLayout(CommandLayout)
ObsSpecLayout = QtGui.QGridLayout()
lList = ['xmin', 'xmax', u'10\u2074/F(H\u03B2)', 'radial vel.', 'E(B-V)', 'N']
wList = [self.sp_min_box, self.sp_max_box, self.sp_norm_box, self.obj_velo_box, self.ebv_box, self.resol_box ]
Nrow = 2
for l in lList:
w = QtGui.QLabel(l)
k = lList.index( l )
i = k%Nrow
j = 2*(k/Nrow)
ObsSpecLayout.addWidget(w,i,j)
ObsSpecLayout.setAlignment(w,QtCore.Qt.AlignRight)
for w in wList:
k = wList.index( w )
i = k%Nrow
j = 1+2*(k/Nrow)
ObsSpecLayout.addWidget(w,i,j)
ObsSpecLayout.setAlignment(w,QtCore.Qt.AlignRight)
self.ObsSpec_GroupBox.setLayout(ObsSpecLayout)
SpecPlotLayout = QtGui.QGridLayout()
SpecPlotLayout.addWidget(QtGui.QLabel('xmin'),0,0)
SpecPlotLayout.addWidget(QtGui.QLabel('xmax'),1,0)
SpecPlotLayout.addWidget(QtGui.QLabel('ymin'),0,2)
SpecPlotLayout.addWidget(QtGui.QLabel('ymax'),1,2)
SpecPlotLayout.addWidget(self.xlim_min_box,0,1)
SpecPlotLayout.addWidget(self.xlim_max_box,1,1)
SpecPlotLayout.addWidget(self.y1lim_min_box,0,3)
SpecPlotLayout.addWidget(self.y1lim_max_box,1,3)
SpecPlotLayout.addWidget(self.fix_axes_cb,0,4)
self.SpecPlot_GroupBox.setLayout(SpecPlotLayout)
LineIDLayout = QtGui.QGridLayout()
LineIDLayout.addWidget(QtGui.QLabel('cut'),0,0)
LineIDLayout.addWidget(self.cut2_box,0,1)
LineIDLayout.addWidget(self.cut_cb,0,2)
LineIDLayout.addWidget(QtGui.QLabel('ion'),1,0)
LineIDLayout.addWidget(self.ion_box,1,1)
LineIDLayout.addWidget(self.ion_cb,1,2)
self.lineIDs_GroupBox.setLayout(LineIDLayout)
ResidualLayout = QtGui.QGridLayout()
ResidualLayout.addWidget(QtGui.QLabel('ymin'),0,0)
ResidualLayout.addWidget(QtGui.QLabel('ymax'),1,0)
ResidualLayout.addWidget(self.y3lim_min_box,0,1)
ResidualLayout.addWidget(self.y3lim_max_box,1,1)
self.residual_GroupBox.setLayout(ResidualLayout)
grid = QtGui.QGridLayout()
grid.addWidget(self.Command_GroupBox, 0, 1 )
grid.addWidget(self.ObsSpec_GroupBox, 0, 2 )
grid.addWidget(self.SpecPlot_GroupBox, 0, 3 )
grid.addWidget(self.residual_GroupBox, 0, 4 )
grid.addWidget(self.lineIDs_GroupBox, 0, 5 )
vbox = QtGui.QVBoxLayout()
vbox.addWidget(self.canvas)
vbox.addWidget(self.mpl_toolbar)
vbox.addLayout(grid)
#vbox.setAlignment(QtCore.Qt.AlignBottom)
self.main_frame.setLayout(vbox)
self.setCentralWidget(self.main_frame)
def create_status_bar(self):
self.status_text = QtGui.QLabel("pySSN, v{}".format(__version__))
self.statusBar().addWidget(self.status_text, 1)
def create_menu(self):
self.file_menu = self.menuBar().addMenu("File")
open_init_action = self.create_action("Open init file",
shortcut="",
slot=self.select_init,
tip="Open the initialization file and run the synthesis")
save_pars_action = self.create_action("Save parameters",
shortcut="Ctrl+S",
slot=self.save_pars_as,
tip="Save synthesis and plot parameters to file")
save_pars_as_action = self.create_action("Save parameters as",
shortcut="Ctrl+Shift+S",
slot=self.save_pars_as,
tip="Select file name and save parameters of the synthesis")
self.save_plot_action = self.create_action("Save plot",
shortcut="Ctrl+P",
slot=self.save_plot_as,
tip="Save plot to file")
save_plot_as_action = self.create_action("Save plot as",
shortcut="Ctrl+Shift+P",
slot=self.save_plot_as,
tip="Select file name and save plot")
save_lines_action = self.create_action("Save lines",
shortcut="Ctrl+L",
slot=self.save_lines_as,
tip="Save list of lines to file")
save_lines_as_action = self.create_action("Save lines as",
shortcut="Ctrl+Shift+L",
slot=self.save_lines_as,
tip="Select file name and save list of lines")
self.add_actions(self.file_menu,
(open_init_action, save_pars_action, None, self.save_plot_action, None, save_lines_action))
#(open_init_action, save_pars_action, save_pars_as_action, None, self.save_plot_action, save_plot_as_action, None, save_lines_action, save_lines_as_action))
self.line_sort_list = ['wavelength', 'decreasing wavelength', 'intensity', 'decreasing intensity', 'ion' , 'decreasing ion' ]
s = 'Sort lines by:\n'
for i in range(len(self.line_sort_list)):
s = s + ' ' + str(i) + ' - ' + self.line_sort_list[i] + '\n'
s = s + '\nSet with:\n' + ' save_lines_sort = <integer>'
self.line_sort_ag = QtGui.QActionGroup(self, exclusive=True)
self.line_sort_menu = self.file_menu.addMenu("Sort lines by")
self.line_sort_menu_ToolTip = ''
for i in range(len(self.line_sort_list)):
a = self.line_sort_ag.addAction(QtGui.QAction(self.line_sort_list[i], self, checkable=True))
self.line_sort_menu.addAction(a)
self.line_sort_ag.triggered.connect(self.line_sort)
self.line_print_dic = OrderedDict( [
( 'num' , 'line number' ),
( 'id' , 'ion' ),
( 'lambda' , 'wavelength' ),
( 'l_shift' , 'wavelength shift' ),
( 'l_tot' , 'corrected wavelength' ),
( 'i_rel' , 'intensity' ),
( 'i_cor' , 'intensity correction factor' ),
( 'i_tot' , 'corrected intensity' ),
( 'ref' , 'reference line number' ),
( 'profile' , 'line profile code number' ),
( 'vitesse' , 'natural line width' ),
( 'comment' , 'comment' ) ])
items = list(self.line_print_dic.values())
s = 'Fields to be printed:\n'
for i in range(len(items)):
s = s + ' ' + str(i) + ' - ' + items[i] + '\n'
s = s + '\nSet with:\n' + ' save_lines_fields = <list>'
self.line_field_menu = self.file_menu.addMenu("Show fields")
self.line_field_menu_ToolTip = ''
for i in range(len(items)):
a = self.create_action(items[i],
shortcut='', slot=self.set_line_fields_to_print, checkable=True,
tip=None)
self.line_field_menu.addAction(a)
self.file_menu.addMenu(self.line_field_menu)
self.show_header_action = self.create_action("Show header",
slot=self.set_show_header,
shortcut="",
checkable=True,
tip="Show header in list of lines")
self.file_menu.addAction(self.show_header_action)
self.open_cosmetic_file_action = self.create_action("Open cosmetic file",
slot=self.set_cosmetic_file,
shortcut="",
tip="Open the cosmetic file")
self.clean_cosmetic_file_action = self.create_action("Clean cosmetic file",
slot=self.clean_cosmetic_file,
shortcut="",
tip="Remove the unchanged lines from the cosmetic file")
self.empty_cosmetic_file_action = self.create_action("Empty cosmetic file",
slot=self.empty_cosmetic_file,
shortcut="",
tip="Remove all lines from the cosmetic file")
self.order_cosmetic_file_action = self.create_action("Order cosmetic file",
slot=self.order_cosmetic_file,
shortcut="",
tip="Order the cosmetic file by line number and remove duplicate lines")
quit_action = self.create_action("&Quit",
slot=self.fileQuit,
shortcut="Ctrl+Q",
tip="Close the application")
self.add_actions(self.file_menu, (None, self.open_cosmetic_file_action, self.clean_cosmetic_file_action,
self.order_cosmetic_file_action, self.empty_cosmetic_file_action, None, quit_action))
self.run_menu = self.menuBar().addMenu("Execute")
run_action = self.create_action("Run",
shortcut="Ctrl+F9",
slot=self.rerun,
tip="Execute synthesis from the beginning")
update_action = self.create_action("Update",
shortcut="F9",
slot=self.adjust,
tip="Update synthesis with changes in line intensities, profiles, and continuum parameters")
draw_action = self.create_action("Draw",
shortcut="F8",
slot=self.set_plot_limits_and_draw,
tip="Redraw plots")
post_proc_action = self.create_action("Post-process",
shortcut="Ctrl+F8",
slot=self.apply_post_proc,
tip="Edit the plots with python commands defined in an external file")
open_profile_action = self.create_action("Instrumental profile",
shortcut="F7",
slot=self.apply_instr_prof,
tip="Open the instrumental profile file and run the synthesis")
refine_wavelengths_action = self.create_action("Wavelength-refining",
slot=self.refine_wavelengths,
shortcut="F6",
tip="Refine the wavelength calibration")
self.add_actions(self.run_menu, (update_action, run_action, draw_action, None,
post_proc_action, open_profile_action, refine_wavelengths_action))
self.line_menu = self.menuBar().addMenu('Lines')
self.show_line_ticks_action = self.create_action('Plot line ticks',
shortcut='Alt+L', slot=self.show_line_ticks_action_clicked, checkable=True,
tip='Check to show line ticks')
self.plot_lines_action = self.create_action('Plot spectra of selected ions',
shortcut='Alt+P', slot=self.show_line_ticks_action_clicked, checkable=True,
tip='Check to plot spectra of selected ions')
self.selected_intensities_action = self.create_action('Only above the cut',
shortcut='Alt+K', slot=self.selected_lines_clicked, checkable=True,
tip='Check to show the ticks for lines with intensities above cut only')
self.selected_ions_action = self.create_action('Only for selected ions',
shortcut='Alt+I', slot=self.selected_lines_clicked, checkable=True,
tip='Check to show the line ticks for selected ions only')
self.add_actions(self.line_menu,
(self.plot_lines_action, None, self.show_line_ticks_action, self.selected_intensities_action, self.selected_ions_action))
self.diff_lines_list = ['ion and reference line', 'ion and process', 'ion', 'element' ]
s = 'Differentiate lines by:\n'
for i in range(len(self.diff_lines_list)):
s = s + ' ' + str(i) + ' - ' + self.diff_lines_list[i] + '\n'
s = s + '\nSet with:\n' + ' diff_lines_by = <integer>'
self.diff_lines_ag = QtGui.QActionGroup(self, exclusive=True)
self.diff_lines_menu = self.line_menu.addMenu("Differentiate lines by")
self.diff_lines_menu_ToolTip = ''
for i in range(len(self.diff_lines_list)):
a = self.diff_lines_ag.addAction(QtGui.QAction(self.diff_lines_list[i], self, checkable=True))
a.setShortcut('Alt+' + str(i+1))
self.diff_lines_menu.addAction(a)
self.diff_lines_ag.triggered.connect(self.diff_lines)
self.cycle_forwards_ions_action = self.create_action('Cycle forwards selected ions',
shortcut='Alt+0', slot=self.cycle_forwards_ions, checkable=False,
tip='Click to cycle forwards the selected ions')
self.cycle_backwards_ions = self.create_action('Cycle backwards selected ions',
shortcut='Alt+9', slot=self.cycle_backwards_ions, checkable=False,
tip='Click to cycle backwards the selected ions')
self.add_actions(self.line_menu,
(None, self.cycle_forwards_ions_action, self.cycle_backwards_ions, None))
self.line_tick_ax_menu = self.line_menu.addMenu('Window of line ticks')
self.line_tick_ax_list = ['Plot of spectra', 'Plot of residuals', 'Separate plot' ]
s = 'Show line ticks on:\n'
for i in range(len(self.line_tick_ax_list)):
s = s + ' ' + str(i) + ' - ' + self.line_tick_ax_list[i] + '\n'
s = s + '\nSet with:\n' + ' line_tick_ax = <integer>'
self.line_tick_ax_ag = QtGui.QActionGroup(self, exclusive=True)
self.line_tick_ax_menu_ToolTip = ''
for i in range(len(self.line_tick_ax_list)):
a = self.line_tick_ax_ag.addAction(QtGui.QAction(self.line_tick_ax_list[i], self, checkable=True))
self.line_tick_ax_menu.addAction(a)
self.line_tick_ax_ag.triggered.connect(self.set_plot_ax2)
self.line_tick_pos_menu = self.line_menu.addMenu('Position of line ticks')
self.line_tick_pos_list = ['Top', 'Middle', 'Bottom' ]
s = 'Position line ticks:\n'
for i in range(len(self.line_tick_pos_list)):
s = s + ' ' + str(i) + ' - ' + self.line_tick_pos_list[i] + '\n'
s = s + '\nSet with:\n' + ' line_tick_pos = <integer>'
self.line_tick_pos_ag = QtGui.QActionGroup(self, exclusive=True)
self.line_tick_pos_menu_ToolTip = ''
for i in range(len(self.line_tick_pos_list)):
a = self.line_tick_pos_ag.addAction(QtGui.QAction(self.line_tick_pos_list[i], self, checkable=True))
self.line_tick_pos_menu.addAction(a)
self.line_tick_pos_ag.triggered.connect(self.set_plot_ax2)
self.line_tick_color_action = self.create_action('Color of line ticks',
shortcut=None, slot=self.line_tick_color_clicked, checkable=False,
tip='Set color of line ticks')
self.toggle_legend_action = self.create_action('Toggle legend position and zoom',
shortcut='Alt+Shift+L', slot=self.toggle_legend_clicked, checkable=False,
tip='Toggle the legend position and zoom')
self.line_menu.addAction(self.toggle_legend_action)
self.editing_lines_action = self.create_action('Allow editing line parameters',
slot=self.editing_lines_clicked, checkable=True,
tip='Check to allow editing line parameters in line info dialog')
self.update_lines_action = self.create_action('Update after editing line parameters',
shortcut='Alt+U', slot=self.update_lines_clicked, checkable=True,
tip='Check to update synthesis after editing line parameters in line info dialog')
self.show_line_ticks_from_file_action = self.create_action('Plot line ticks from file',
shortcut='F4', slot=self.show_line_ticks_from_file,
tip='Check to show line ticks defined in an external file')
self.ask_tickfile_action = self.create_action("Ask for file name",
checkable=True, tip="Check to be always asked for the text file containing a list of wavelengths to be ticked")
self.add_actions(self.line_menu, (None, self.show_line_ticks_from_file_action))
self.cont_menu = self.menuBar().addMenu('Continuum')
self.plot_cont_action = self.create_action('Plot continuum',
shortcut="Alt+C",
slot=self.plot_cont_action_clicked,
checkable=True,
tip='Check to plot the different components of the continuum spectrum')
self.cont_action = self.create_action('Parameters',
shortcut="Shift+Alt+C",
slot=self.cont_dialog,
tip='Parameters of the continuum spectrum')
self.interpol_cont_action = self.create_action('User-defined continuum',
shortcut="F5",
slot=self.user_continuum,
tip='Open dialog to set the user-defined continuum spectrum')
self.add_actions(self.cont_menu,
(self.plot_cont_action, self.cont_action, self.interpol_cont_action,))
self.settings_menu = self.menuBar().addMenu('Settings')
self.verbosity_list = ['None', 'Errors', 'Errors and warnings', 'Errors, warnings, and comments', 'Debug messages' ]
s = 'Verbosity level:\n'
for i in range(len(self.verbosity_list)):
s = s + ' ' + str(i) + ' - ' + self.verbosity_list[i] + '\n'
s = s + '\nSet with:\n' + ' log_level = <integer>'
self.verbosity_ag = QtGui.QActionGroup(self, exclusive=True)
#self.verbosity_menu = self.menuBar().addMenu("Verbosity")
self.verbosity_menu = self.settings_menu.addMenu("Verbosity")
self.verbosity_menu_ToolTip = ''
for i in range(len(self.verbosity_list)):
a = self.verbosity_ag.addAction(QtGui.QAction(self.verbosity_list[i], self, checkable=True))
self.verbosity_menu.addAction(a)
self.verbosity_ag.triggered.connect(self.verbosity)
self.style_list = list(QtGui.QStyleFactory.keys())
s = 'Widget styles:\n'
for i in range(len(self.style_list)):
s = s + ' ' + str(i) + ' - ' + self.style_list[i] + '\n'
s = s + '\nSet with:\n' + ' qt_style = <integer>'
self.style_ag = QtGui.QActionGroup(self, exclusive=True)
self.style_menu = self.settings_menu.addMenu('Widget style')
self.style_menu_ToolTip = ''
for i in range(len(self.style_list)):
a = self.style_ag.addAction(QtGui.QAction(self.style_list[i], self, checkable=True))
self.style_menu.addAction(a)
self.style_ag.triggered.connect(self.style)
self.enable_tooltips_action = self.create_action('Enable tooltips',
slot=self.enable_tooltips_action_clicked, checkable=True,
tip='Check to enable tooltips')
self.adjust_fig_action = self.create_action('Adjust figure',
slot=self.adjust_fig_action_clicked, checkable=True,
tip='Automatically adjust figure to avoid overlaps and to minimize the empty borders.')
self.show_uncor_obs_action = self.create_action('Show uncorrected spectrum',
slot=self.show_uncor_obs_action_clicked, checkable=True,
tip='Show observational spectrum without the wavelength refining.')
self.add_actions(self.settings_menu,
(None, self.enable_tooltips_action, self.adjust_fig_action, None, self.editing_lines_action, self.update_lines_action, self.show_uncor_obs_action))
self.help_menu = self.menuBar().addMenu("&Help")
about_action = self.create_action("&About",
shortcut='F1', slot=self.on_about,
tip='About the demo')
self.add_actions(self.help_menu, (about_action,))
def fileQuit(self):
self.close()
def add_actions(self, target, actions):
for action in actions:
if action is None:
target.addSeparator()
else:
target.addAction(action)
def create_action( self, text, slot=None, shortcut=None,
icon=None, tip=None, checkable=False,
signal="triggered()"):
action = QtGui.QAction(text, self)
if icon is not None:
action.setIcon(QtGui.QIcon(":/%s.png" % icon))
if shortcut is not None:
action.setShortcut(shortcut)
if tip is not None:
action.setToolTip(tip)
action.setStatusTip(tip)
if slot is not None:
self.connect(action, QtCore.SIGNAL(signal), slot)
if checkable:
action.setCheckable(True)
return action
def isInteger(self, str_):
try:
int(str_)
return True
except ValueError:
return False
def isPositiveInteger(self, str_):
if self.isInteger(str_):
n = int(str_)
if n > 0:
return True
else:
return False
else:
return False
def isPositiveOdd(self, str_):
if self.isInteger(str_):
n = int(str_)
if n%2 == 1 and n > 0:
return True
else:
return False
else:
return False
def isFloat(self, str_):
try:
np.float(str_)
return True
except ValueError:
return False
def floatFixFormat(self, r, fix_fmt, align='>'):
"""
floatFixFormat(1.23456789, '{:7.3f}') = ' 1.234'
floatFixFormat(-1.23456789, '{:7.3f}') = ' -1.234'
floatFixFormat(123.456789, '{:7.3f}') = ' 1.23e2'
floatFixFormat(-123.456789, '{:7.3f}') = '-1.23e2'
floatFixFormat(1.23456789e+04, '{:7.3f}') = ' 1.23e4'
floatFixFormat(1.23456789e-04, '{:7.3f}') = ' 1.2e-4'
floatFixFormat(1.23456789e+34, '{:7.3f}') = ' 1.2e34'
floatFixFormat(99.999999, '{:7.3f}') = ' 1.2e34'
"""
if not ( 'f' in fix_fmt and self.isFloat(r) ):
return None
s = fix_fmt.strip('{')
s = s.strip('}')
s = s.strip(':')
s = s.strip('f')
k = s.index('.')
w = int(s[:k])
p = int(s[k+1:])
s0 = '{:{align}{w}.{p}f}'.format(float(abs(r)), w=w-1, p=p, align=align)
s = '{:0.{w}e}'.format(float(abs(r)), w=w)
if r < 0:
sgn = '-'
else:
sgn = ''
k = s.index('e')
mantissa = s[:k]
mantissa = mantissa[:p+2]
e = int(s[k+1:])
if p+e+2>w-3-len(str(e)) and len(s0) < w:
s = s0.strip()
else:
s = '{:0.{p}e}'.format(float(abs(r)), p=min(p,w-4-len(str(e))))
k = s.index('e')
mantissa = s[:k]
exponent = str(int(s[k+1:]))
s = mantissa + 'e' + exponent
s = '{:{align}{w}}'.format(sgn+s, w=w, align=align)
return s
def rightFormat(self, s, field):
if field == 'comment':
output = s.strip()
return output
try:
if field == 'profile':
r = int(s)
else:
r = np.float(s)
fmt = self.sp.field_format[field]
if 'f' in fmt:
s = self.floatFixFormat(r, fmt)
else:
s = fmt.format(r)
if len(s) == self.sp.field_width[field] and not np.isinf(r):
if field == 'vitesse' and (r < 0 or s.strip() == '0.00'):
output = None
else:
output = s
else:
output = None
except:
output = None
return output
def ConvStrToValidTypes(self, str_):
str_ = str_.replace('Error in ','')
str_ = str_.replace(' ','')
if str_ == '':
result = None
elif str_.isdigit():
result = int(str_)
elif self.isFloat(str_):
result = np.float(str_)
elif str_.capitalize() == 'True':
result = True
elif str_.capitalize() == 'False':
result = False
elif str_.find('(') >= 0:
try:
str_ = str_.replace('[','')
str_ = str_.replace(']','')
str_ = str_.strip('[]()')
result = [(float(s.split(',')[0]),float(s.split(',')[1])) for s in str_.split('),(')]
except:
result = None
elif str_.find(',') >= 0:
try:
str_ = str_.replace('[','')
str_ = str_.replace(']','')
result = [float(i) for i in str_.split(',')]
except:
result = None
else:
result = str_
return result
def save_par_in_file(self, field, value, path, help_=None):
if self.isValidFilename(path):
if os.path.isfile(path):
f = open(path, 'r')
lines = f.readlines()[::-1]
f.close()
else:
lines = []
j = 0
found = False
while ( j < len(lines) ) and ( not found ):
line = str(lines[j])
if line.find(field) == 0:
if type(value) is str:
s0 = ' = \''
s1 = '\'\n'
else:
s0 = ' = '
s1 = '\n'
line = '# ' + line + field + s0 + value + s1
lines[j] = line
found = True
break
j += 1
if not found:
if help_ is not None:
lines.insert(0, '\n# ' + help_ + '\n')
lines.insert(0, field + ' = ' + value + '\n')
lines = lines[::-1]
f = open(path, 'w')
f.writelines(lines)
f.close()
def save_cont_pars(self):
file_choices = "Python files (*.py) (*.py);;Text files (*.txt *.dat) (*.txt *.dat);;All Files (*) (*)"
filename = self.sp.config_file.split('/')[-1]
path = unicode(QtGui.QFileDialog.getSaveFileName(self, 'Save to file', filename, file_choices))
if path:
if os.path.isfile(path):
f = open(path, 'r')
lines = f.readlines()[::-1]
f.close()
else:
lines = []
for i in range(0, self.table.rowCount()):
field = str(self.table.item(i,0).text())
value = str(self.table.item(i,1).text())
help_ = str(self.table.item(i,2).text().toUtf8())
help_ = help_.replace('\xce\xb2', 'beta')
help_ = help_.replace('\xe2\x81\xbb\xc2\xb3', '-3')
help_ = help_.replace('\xce\xb1', 'alpha')
help_ = help_.replace('\xce\xbb/5000 \xe2\x84\xab', 'lambda/5000 A')
j = 0
found = False
while ( j < len(lines) ) and ( not found ):
line = str(lines[j])
if line.find(field) == 0:
k = line.find('#')
if k > 0:
comment = ' ' + line[k:]
else:
comment = '\n'
line = field + ' = ' + value + comment
lines[j] = line
found = True
break
j += 1
if not found:
lines.insert(0, '\n# ' + help_ + '\n')
lines.insert(0, field + ' = ' + value + '\n')
lines = lines[::-1]
f = open(path, 'w')
f.writelines(lines)
f.close()
def get_shifts_from_profile(self, profile_key):
if profile_key not in self.sp.emis_profiles:
profile_key = '1'
vel = self.sp.emis_profiles[profile_key]['vel']
par_list = self.sp.emis_profiles[profile_key]['params']
shift_list = []
for item in par_list:
shift = np.float(item[2])
intensity = np.float(item[1])
if item[0]=='G' and ( intensity > 0.2 ):
shift_list.append(shift)
shift_list.sort()
return shift_list, vel
def plot_tick_at(self, wavelength, ion, line_num):
if self.green_tick_shown:
self.on_draw()
color = 'green'
ion = ion.replace('_',' ').strip()
to_select = (self.sp.liste_raies['num'] == np.int(line_num))
vitesse = self.sp.liste_raies[to_select]['vitesse']
profile_key = str(self.sp.liste_raies[to_select]['profile'][0])
shift_list, vel = self.get_shifts_from_profile(profile_key)
line_num = line_num.strip().strip('0')
# label = ion + ' (' + line_num.strip() + ')'
label = ion + ' {:.2f}'.format(wavelength)
posTick = self.getTickPosOfSelectedLine()
y1, y2 = self.get_line_tick_lim(posTick)
k = self.sp.get_conf('line_tick_ax')
if not (k == 1 and self.residual_GroupBox.isChecked()):
k = 0
if len(shift_list) > 0:
if posTick == 0:
ys1 = 2*y1-y2
ys2 = y1
ym = y1
else:
ys1 = y2
ys2 = 2*y2-y1
ym = y2
if k == 0:
yy1 = self.y1_plot_lims[0] + ym*(self.y1_plot_lims[1] - self.y1_plot_lims[0])
else:
yy1 = self.y3_plot_lims[0] + ym*(self.y3_plot_lims[1] - self.y3_plot_lims[0])
current_legend_loc = self.sp.legend_loc
f = 0.15
r = (self.x_plot_lims[1] - self.x_plot_lims[0])/2
if wavelength - self.x_plot_lims[0] < 2*r*f:
current_legend_loc = 1
if self.x_plot_lims[1] - wavelength < 2*r*f:
current_legend_loc = 2
self.fig.axes[k].axvline( wavelength, y1, y2, color = color, linestyle = 'solid', linewidth = 2.5 )
wave_shifts = -vitesse*wavelength*shift_list / CST.CLIGHT * 1e5 + wavelength*vel / CST.CLIGHT * 1e5
if len(wave_shifts) > 0:
max_wave_shift = max(abs(wave_shifts))
else:
max_wave_shift = 0
# Ticks for the profiles components are not shown if they are within 1000*f percent of the x-axis width.
f = 0.001
if max_wave_shift > f*(self.x_plot_lims[1] - self.x_plot_lims[0]):
x1 = (wavelength - self.x_plot_lims[0])/(self.x_plot_lims[1] - self.x_plot_lims[0])
for shift in wave_shifts:
self.fig.axes[k].axvline( wavelength+shift, ys1, ys2, color = color, linestyle = '--', linewidth = 2.5 )
x2 = (wavelength + shift - self.x_plot_lims[0])/(self.x_plot_lims[1] - self.x_plot_lims[0])
self.fig.axes[k].axhline( yy1, x1, x2, color = color, linestyle = '-', linewidth = 1.0 )
if self.addGreenTickToLegend:
self.fig.axes[k].step( [0,0], [0,100], color = color, linestyle = 'solid', label = label, linewidth = 2.5 )
self.fig.axes[k].legend(loc=current_legend_loc, fontsize=self.sp.legend_fontsize)
self.fig.canvas.draw()
self.green_tick_shown = True
self.magenta_tick_shown = False
def show_line_info_dialog(self):
def get_window_size_and_position():
if self.line_info_dialog is None:
font = QtGui.QFont()
width = QtGui.QFontMetrics(font).width('='*120)
self.line_info_dialog_width = width
self.line_info_dialog_height = 470
sG = QtGui.QApplication.desktop().screenGeometry()
self.line_info_dialog_x = sG.width()-self.line_info_dialog_width
self.line_info_dialog_y = 0
else:
self.line_info_dialog_width = self.line_info_dialog.width()
self.line_info_dialog_height = self.line_info_dialog.height()
self.line_info_dialog_x = self.line_info_dialog.pos().x()
self.line_info_dialog_y = self.line_info_dialog.pos().y()
def save_initial_plot_pars():
self.init_line_num = self.line_info_box.text()
self.init_ion = self.ion_box.text()
self.init_xmin = self.xlim_min_box.text()
self.init_xmax = self.xlim_max_box.text()
self.init_y1min = self.y1lim_min_box.text()
self.init_y1max = self.y1lim_max_box.text()
self.init_y3min = self.y3lim_min_box.text()
self.init_y3max = self.y3lim_max_box.text()
self.init_legend_fontsize = self.sp.legend_fontsize
self.init_legend_loc = self.sp.legend_loc
def toggle_statusbar():
self.showStatusBar = not self.showStatusBar
statusBar.setVisible(self.showStatusBar)
def redo_initial_plot():
self.line_info_box.setText(self.init_line_num)
self.ion_box.setText(self.init_ion)
self.xlim_min_box.setText(self.init_xmin)
self.xlim_max_box.setText(self.init_xmax)
self.y1lim_min_box.setText(self.init_y1min)
self.y1lim_max_box.setText(self.init_y1max)
self.y3lim_min_box.setText(self.init_y3min)
self.y3lim_max_box.setText(self.init_y3max)
self.sp.legend_fontsize = self.init_legend_fontsize
self.sp.legend_loc = self.init_legend_loc
self.set_plot_limits_and_draw()
#self.save_from_lim_boxes()
#self.draw_ion()
def do_reset():
self.curr_line_num = self.init_line_num
get_info(self.curr_line_num)
fill_line_info_table()
redo_initial_plot()
def toggle_show_satellites():
self.show_satellites = (self.show_satellites + 1)%3
fill_line_info_table()
def on_click():
item = self.line_info_table.currentItem()
row = item.row()
col = item.column()
s = item.text()
if col == col_ion:
ion = self.line_info_table.item(row, col).text()
self.ion_box.setText(ion)
self.draw_ion()
if not self.isFloat(s):
return
if col in [col_num, col_ref] and int(s) != 0:
self.curr_line_num = s
get_info(self.curr_line_num)
self.line_info_box.setText(self.curr_line_num)
fill_line_info_table()
def on_doubleClick():
item = self.line_info_table.currentItem()
row = item.row()
col = item.column()
s = item.text()
if col == col_ion:
ion = self.line_info_table.item(row, col).text()
self.ion_box.setText(ion)
self.draw_ion()
if not self.isFloat(s):
return
if col in [col_num, col_ref] and int(s) != 0:
self.curr_line_num = s
get_info(self.curr_line_num)
self.line_info_box.setText(self.curr_line_num)
fill_line_info_table()
def on_itemClicked():
# to avoid blinking with itemSelectionChanged
item = self.line_info_table.currentItem()
if item == self.selected_item:
on_itemSelectionChanged()
def on_itemSelectionChanged():
if self.green_tick_shown:
self.on_draw()
self.green_tick_shown = False
item = self.line_info_table.currentItem()
if item == None:
self.draw_ion()
return
self.selected_item = item
row = item.row()
col = item.column()
s = item.text()
l_shift_refline = np.float(self.sp.fieldStrFromLine(self.refline,'l_shift'))
if col == col_wave:
wavelength = np.float(s)
ion = str(self.line_info_table.item(row, col_ion).text())
line_num = str(self.line_info_table.item(row, col_num).text())
max_wave = np.float(self.sp_max_box.text())
min_wave = np.float(self.sp_min_box.text())
if wavelength > min_wave and wavelength < max_wave:
l_shift = np.float(self.line_info_table.item(row, col_lshift).text())
wavelength = wavelength + l_shift + l_shift_refline
r = (self.x_plot_lims[1] - self.x_plot_lims[0])/2
f = 0.05
if (wavelength < self.x_plot_lims[0] + f*r) or (wavelength > self.x_plot_lims[1] - f*r):
if wavelength-r < min_wave:
self.x_plot_lims = (min_wave-r*f, min_wave-r*f+2*r)
elif wavelength+r > max_wave:
self.x_plot_lims = (max_wave+r*f-2*r , max_wave+r*f)
else:
self.x_plot_lims = (wavelength-r,wavelength+r)
if not self.axes_fixed:
self.update_lim_boxes()
self.restore_axes()
self.plot_tick_at(wavelength, ion, line_num)
elif wavelength == 1:
if str(self.line_info_table.item(row, col_ref).text()) == '0000000000000':
satellites = self.satellites
else:
satellites = self.sp.read_satellites(self.sp.phyat_file, int(line_num))
satellites = add_satellites_of_subreferences(satellites)
SelectedSatellites = []
max_wave = np.float(self.sp_max_box.text())
min_wave = np.float(self.sp_min_box.text())
for i in range(0, len(satellites)):
wavelength = np.float(self.sp.fieldStrFromLine(satellites[i],'lambda'))
if (wavelength > min_wave) and (wavelength < max_wave):
SelectedSatellites.append(satellites[i])
satellites = SelectedSatellites
self.plot_line_ticks_for(satellites, ion, line_num, self.refline)
def isRefLine(line):
s = self.sp.fieldStrFromLine(line,'ref').strip()
if s == '0000000000000':
return True
else:
return False
def isSubRefLine(line):
wavelength = np.float(self.sp.fieldStrFromLine(line,'lambda'))
if not isRefLine(line) and (wavelength < 2.0):
return True
else:
return False
def fill_data(i, line, cat=''):
if line == None:
return
editableCols = []
if self.sp.get_conf('qt_allow_editing_lines', False):
if cat == 'sat':
if do_cosmetics:
editableCols = ['l_shift', 'i_cor', 'profile', 'vitesse', 'comment']
else:
editableCols = []
elif cat == 'subref':
if do_cosmetics:
editableCols = ['i_cor', 'comment']
else:
editableCols = []
elif cat == 'ref':
editableCols = ['l_shift', 'i_cor', 'i_rel', 'profile', 'vitesse', 'comment']
for j in range(0,len(fieldItems)):
s = self.sp.fieldStrFromLine(line, fieldItems[j])
s = s.strip()
if j == col_ion:
if self.show_true_ions:
s = self.sp.true_ion(s).replace('_',' ').strip()
isPseudoIon = self.sp.isPseudoIon(s)
if j == fieldItems.index('proc'):
if isRefLine(line):
s = ''
elif isPseudoIon:
s = ''
else:
s = self.sp.process[s]
item = QtGui.QTableWidgetItem(s)
if fieldItems[j] in editableCols:
item.setBackgroundColor(self.editableCells_bg_color)
else:
item.setFlags(item.flags() ^ QtCore.Qt.ItemIsEditable)
item.setBackgroundColor(self.readOnlyCells_bg_color)
self.line_info_table.setItem(i,j,item)
def fill_text(i, text):
item = QtGui.QTableWidgetItem(text)
item.setFlags(item.flags() ^ (QtCore.Qt.ItemIsEditable|QtCore.Qt.ItemIsSelectable|QtCore.Qt.ItemIsEnabled))
item.setBackgroundColor(self.readOnlyCells_bg_color)
item.setTextAlignment(QtCore.Qt.AlignBottom)
item.setTextColor(QtCore.Qt.blue)
self.line_info_table.setItem(i,0,item)
self.line_info_table.setSpan(i,0,2,len(fieldItems))
def add_satellites_of_subreferences(satellites):
subref_list = []
all_satellites = satellites
for sat_line in satellites:
if isSubRefLine(sat_line):
subref_list.append(sat_line)
i = 0
while i < len(subref_list):
sat_line_num = self.sp.fieldStrFromLine(subref_list[i],'num')
new_satellites = self.sp.read_satellites(self.sp.phyat_file, int(sat_line_num))
for line in new_satellites:
if isSubRefLine(line):
subref_list.append(line)
i += 1
for line in new_satellites:
if not line in all_satellites:
all_satellites.append(line)
return all_satellites
def get_info(line_num):
line = None
refline = None
subrefline = None
LineList = []
if int(line_num) == 0:
return
while refline == None:
refline = self.sp.read_line(self.sp.fic_model, int(line_num))
if refline is None:
if do_cosmetics:
curr_line = self.sp.read_line(self.sp.fic_cosmetik, int(line_num))
else:
curr_line = None
if self.sp.cosmetic_line_ok(curr_line) is not True:
curr_line = None
if curr_line == None:
curr_line = self.sp.read_line(self.sp.phyat_file, int(line_num))
LineList.append(curr_line)
line_num = self.sp.fieldStrFromLine(curr_line,'ref')
if len(LineList) > 0:
if isSubRefLine(LineList[0]):
subrefline = LineList[:1]
else:
line = LineList[0]
if len(LineList) > 1:
subrefline = LineList[1:]
if subrefline is not None:
n_subref = len(subrefline)
else:
n_subref = 0
subsatellites = []
for k in range(0, n_subref):
subsat = []
subrefline_num = self.sp.fieldStrFromLine(subrefline[k], 'num')
subsat = self.sp.read_satellites(self.sp.phyat_file, int(subrefline_num))
n_subsat = len(subsat)
if do_cosmetics:
for i in range(0,n_subsat):
sat_line = subsat[i]
sat_line_num = int(self.sp.fieldStrFromLine(sat_line,'num'))
cosmetic_line = self.sp.read_line(self.sp.fic_cosmetik, sat_line_num)
if cosmetic_line is not None:
subsat[i] = cosmetic_line
subsatellites = subsatellites + subsat
subsatellites = add_satellites_of_subreferences(subsatellites)
n_subsat = len(subsatellites)
if refline is not None:
refline_num = self.sp.fieldStrFromLine(refline,'num')
satellites = self.sp.read_satellites(self.sp.phyat_file, int(refline_num))
satellites = add_satellites_of_subreferences(satellites)
n_sat = len(satellites)
if do_cosmetics:
for i in range(0,n_sat):
sat_line = satellites[i]
sat_line_num = int(self.sp.fieldStrFromLine(sat_line,'num'))
cosmetic_line = self.sp.read_line(self.sp.fic_cosmetik, sat_line_num)
if cosmetic_line is not None:
satellites[i] = cosmetic_line
else:
n_sat = 0
if line is None and refline is None:
title = 'Error in line info dialog'
msg = 'Line number not found.'
QtGui.QMessageBox.critical(self, title, msg, QtGui.QMessageBox.Ok )
self.line = line
self.subrefline = subrefline
self.refline = refline
self.subsatellites = subsatellites
self.satellites = satellites
self.n_sat = n_sat
self.n_subsat = n_subsat
self.n_subref = n_subref
def do_sort(lines):
waves = []
for i in range(0,len(lines)):
waves.append(self.sp.fieldStrFromLine(lines[i], 'lambda'))
lines = [x for _,x in sorted(zip(waves,lines))]
return lines
def fill_line_info_table():
self.line_info_table.blockSignals(True)
line = self.line
subrefline = self.subrefline
refline = self.refline
subsatellites = self.subsatellites
satellites = self.satellites
n_sat = self.n_sat
n_subsat = self.n_subsat
n_subref = self.n_subref
SelectedSatellites = []
SelectedSubSatellites = []
if self.show_satellites == 0:
n_sat = 0
n_subsat = 0
else:
max_wave = np.float(self.sp_max_box.text())
min_wave = np.float(self.sp_min_box.text())
for i in range(0, len(satellites)):
wavelength = np.float(self.sp.fieldStrFromLine(satellites[i],'lambda'))
if self.show_satellites == 2 or \
(self.show_satellites == 1 and (wavelength > min_wave) and (wavelength < max_wave)):
SelectedSatellites.append(satellites[i])
for i in range(0, len(subsatellites)):
wavelength = np.float(self.sp.fieldStrFromLine(subsatellites[i],'lambda'))
if self.show_satellites == 2 or \
(self.show_satellites == 1 and (wavelength > min_wave) and (wavelength < max_wave)):
SelectedSubSatellites.append(subsatellites[i])
n_sat = len(SelectedSatellites)
n_subsat = len(SelectedSubSatellites)
self.line_info_table.clearContents()
self.line_info_table.setRowCount(n_sat+n_subsat+20)
self.line_info_table.clearSpans()
k = 0
sat_list = []
if line is not None:
fill_text(k,'Line:')
k += 2
fill_data(k, line, 'sat')
k += 1
if subrefline is not None:
fill_text(k,'Subreference line:')
k += 2
for i in range(0,n_subref):
fill_data(k, subrefline[i], 'subref')
k += 1
if n_subsat > 0:
SelectedSubSatellites = do_sort(SelectedSubSatellites)
fill_text(k, str(n_subsat) + ' satellites:')
sat_list.append([k,n_subsat])
k += 2
for i in range(0,n_subsat):
if isSubRefLine(SelectedSubSatellites[i]):
fill_data(k+i, SelectedSubSatellites[i], 'subref')
else:
fill_data(k+i, SelectedSubSatellites[i], 'sat')
k += n_subsat
fill_text(k,'Reference line:')
k += 2
fill_data(k, refline, 'ref')
k += 1
if n_sat > 0:
SelectedSatellites = do_sort(SelectedSatellites)
fill_text(k, str(n_sat) + ' satellites:')
sat_list.append([k,n_sat])
k += 2
for i in range(0,n_sat):
if isSubRefLine(SelectedSatellites[i]):
fill_data(k+i, SelectedSatellites[i], 'subref')
else:
fill_data(k+i, SelectedSatellites[i], 'sat')
k += n_sat
self.line_info_table.setRowCount(k)
self.line_info_table.resizeColumnsToContents()
self.line_info_table.resizeRowsToContents()
self.line_info_table.blockSignals(False)
self.line_info_table.blockSignals(True)
if self.show_satellites == 1:
s0 = ' (in the synthesis range)'
elif self.show_satellites == 2:
s0 = ' (in the entire database and including subreferences)'
else:
s0 = ''
for i in sat_list:
k = i[0]
n = i[1]
fill_text(k, str(n) + ' satellites:' + s0)
self.line_info_table.blockSignals(False)
def on_itemChanged():
self.line_info_table.blockSignals(True)
item = self.line_info_table.currentItem()
if not (item.flags() & QtCore.Qt.ItemIsEditable):
self.line_info_table.blockSignals(False)
return
row = item.row()
col = item.column()
s = str(item.text())
value = self.rightFormat(s, fieldItems[col])
if value != None:
self.line_info_table.setItem(row, col, QtGui.QTableWidgetItem(value.strip()))
self.line_info_table.item(row, col).setBackgroundColor(self.editableCells_bg_color)
save_change(row,col)
else:
self.line_info_table.item(row, col).setBackgroundColor(QtGui.QColor('red'))
title = 'Invalid format for the ' + self.sp.field_tip[fieldItems[col]]
s0 = self.sp.field_format[fieldItems[col]]
s0 = s0[2:-1]
msg = "'" + s + "' can not be converted into the proper field format: " + s0
if col == self.sp.fields.index('vitesse'):
msg = msg + '\nor it is not a positive number.'
QtGui.QMessageBox.critical(self, title, msg, QtGui.QMessageBox.Ok )
get_info(self.curr_line_num)
fill_line_info_table()
self.line_info_table.blockSignals(False)
def get_line_from_table(row):
line = ' '*85
jList = range(0,len(fieldItems))
jList.remove(col_proc)
for j in jList:
s = self.line_info_table.item(row,j).text()
width = self.sp.field_width[fieldItems[j]]
align = self.sp.field_align[fieldItems[j]]
pos = self.sp.field_pos[fieldItems[j]]
s = '{:{a}{w}s}'.format(s, a=align, w=width)
line = line[:pos] + s + line[pos:]
line = line.rstrip()
return line
def save_change(row, col):
line = get_line_from_table(row)
if isRefLine(line):
filename = self.sp.fic_model
else:
filename = self.sp.fic_cosmetik
self.sp.replace_line(filename, line)
if col != self.sp.fields.index('comment') and \
self.sp.get_conf('qt_update_after_editing_lines', False):
self.adjust()
self.nearbyLines = self.sp.get_nearby_lines(self.cursor_w1, self.cursor_w2, do_print=False)
if self.nearbyLines is not None and self.nearbyLines_dialog.isVisible():
self.fill_nearbyLines_table()
def init_lines():
self.line = None
self.subrefline = None
self.refline = None
self.subsatellites = []
self.satellites = []
self.n_sat = 0
self.n_subsat = 0
self.n_subref = 0
statusBar = QtGui.QStatusBar()
s = 'Click on \"Satellites\" to cycle the tri-state display of satellite lines:\n' \
' 1 - The satellite lines in the spectral range of the synthesis are shown; \n' \
' 2 - All satellite lines (including subreference lines and lines outside the spectral range of the synthesis) are shown. \n' \
' 3 - No satellite line is shown; \n' \
'Double-click on a line number to show the data for that line. \n' \
'Double-click on an ion to plot line ticks and spectrum for that single ion. \n' \
'Select or click on a wavelength to draw a tick at that position and recenter the spectrum if necessary. \n' \
'Click on \"Reset\" to return to the original line and plot settings. \n' \
'The green fields are editable.'
statusBar.addWidget(QtGui.QLabel(s),1)
self.showStatusBar = False
statusBar.setVisible(self.showStatusBar)
self.show_satellites = 1
get_window_size_and_position()
if self.line_info_dialog is not None:
self.line_info_dialog.close()
self.line_info_table.close()
self.line_info_dialog = QtGui.QDialog()
self.line_info_dialog.resize(self.line_info_dialog_width,self.line_info_dialog_height)
self.line_info_dialog.move(self.line_info_dialog_x,self.line_info_dialog_y)
self.line_info_table = QtGui.QTableWidget()
fieldItems = self.sp.fields
fieldNames = [ self.sp.field_abbr[item] for item in fieldItems ]
col_num = fieldItems.index('num')
col_ion = fieldItems.index('id')
col_wave = fieldItems.index('lambda')
col_proc = fieldItems.index('proc')
col_lshift = fieldItems.index('l_shift')
col_irel = fieldItems.index('i_rel')
col_icor = fieldItems.index('i_cor')
col_ref = fieldItems.index('ref')
col_prof = fieldItems.index('profile')
col_vel = fieldItems.index('vitesse')
col_comm = fieldItems.index('comment')
self.line_info_table.setColumnCount(len(fieldItems))
self.line_info_table.setHorizontalHeaderLabels(fieldNames)
if self.enable_tooltips_action.isChecked():
for j in range(0,len(fieldItems)):
self.line_info_table.horizontalHeaderItem(j).setToolTip(self.sp.field_tip[fieldItems[j]])
self.line_info_table.horizontalHeaderItem(col_vel).setText(u'\u0394v (factor)')
if self.enable_tooltips_action.isChecked():
s = 'For a reference line, it is the thermal broadening parameter, in km/s. \n' \
'For satellite line, it is the dimensionless correction factor for the thermal broadening parameter with respect to the reference line.'
self.line_info_table.horizontalHeaderItem(col_vel).setToolTip(s)
self.line_info_table.horizontalHeaderItem(col_comm).setTextAlignment(QtCore.Qt.AlignLeft)
self.line_info_table.horizontalHeaderItem(col_comm).setText(' comment')
init_lines()
do_cosmetics = self.sp.get_conf('do_cosmetik')
save_initial_plot_pars()
self.curr_line_num = self.line_info_box.text()
get_info(self.curr_line_num)
fill_line_info_table()
self.buttonBox = QtGui.QDialogButtonBox(QtGui.QDialogButtonBox.Help|
QtGui.QDialogButtonBox.Close|
QtGui.QDialogButtonBox.Reset|
QtGui.QDialogButtonBox.Apply)
self.buttonBox.button(QtGui.QDialogButtonBox.Apply).setText("Satellites")
if self.enable_tooltips_action.isChecked():
self.buttonBox.button(QtGui.QDialogButtonBox.Apply).setToolTip("Click to toggle the satellite lines")
self.buttonBox.button(QtGui.QDialogButtonBox.Apply).clicked.connect(toggle_show_satellites)
s = "Click to return to the initial states of the line info dialog and figures"
if self.enable_tooltips_action.isChecked():
self.buttonBox.button(QtGui.QDialogButtonBox.Reset).setToolTip(s)
self.buttonBox.button(QtGui.QDialogButtonBox.Reset).clicked.connect(do_reset)
self.buttonBox.button(QtGui.QDialogButtonBox.Help).clicked.connect(toggle_statusbar)
self.buttonBox.rejected.connect(self.line_info_dialog.close)
self.line_info_table.doubleClicked.connect(on_doubleClick)
self.line_info_table.itemChanged.connect(on_itemChanged)
self.selected_item = None
self.line_info_table.itemSelectionChanged.connect(on_itemSelectionChanged)
self.line_info_table.itemClicked.connect(on_itemClicked)
vbox = QtGui.QVBoxLayout()
vbox.addWidget(self.line_info_table)
vbox.addWidget(self.buttonBox)
vbox.addWidget(statusBar)
self.line_info_dialog.setLayout(vbox)
self.line_info_dialog.setWindowTitle('line info dialog')
self.line_info_dialog.setWindowModality(QtCore.Qt.NonModal)
self.line_info_dialog.show()
def fill_nearbyLines_table(self):
if self.nearbyLines is None or self.nearbyLines_table is None:
return
k = self.sp.get_conf('diff_lines_by')
fieldItems = self.sp.fields
jList = range(0,len(fieldItems))
jProc = fieldItems.index('proc')
jList.remove(jProc)
if self.nearbyDialogFilterIsActive:
#selected_ions = self.sp.get_conf('selected_ions')
selected_ions = self.nearbyLines_selected_ions
selected_true_ions = [self.sp.true_ion(ion) for ion in selected_ions]
nearbyLines = []
for line in self.nearbyLines:
ion = str(line[fieldItems.index('id')]).strip()
true_ion = self.sp.true_ion(ion)
selectThisIon = (( ion in selected_ions or true_ion in selected_ions ) and k == 1) or (true_ion in selected_true_ions and k != 1)
if selectThisIon:
nearbyLines.append(line)
else:
nearbyLines = self.nearbyLines
self.nearbyLines_table.setRowCount(len(nearbyLines))
for i in range(0,len(nearbyLines)):
ion = self.sp.true_ion(nearbyLines[i][fieldItems.index('id')])
for j in jList:
if j > jProc:
k = j - 1
else:
k = j
fmt = self.sp.field_format[fieldItems[j]]
s = fmt.format(nearbyLines[i][k])
s = str(s).strip()
if j == fieldItems.index('num'):
if self.sp.isPseudoIon(ion):
proc_str = ''
else:
proc_str = self.sp.process[s[-9]]
if j == fieldItems.index('id'):
if self.show_true_ions:
s = self.sp.true_ion(s).replace('_',' ').strip()
item = QtGui.QTableWidgetItem(s)
item.setFlags(item.flags() ^ QtCore.Qt.ItemIsEditable)
item.setBackgroundColor(self.readOnlyCells_bg_color)
self.nearbyLines_table.setItem(i,j,item)
item = QtGui.QTableWidgetItem(proc_str)
item.setFlags(item.flags() ^ QtCore.Qt.ItemIsEditable)
item.setBackgroundColor(self.readOnlyCells_bg_color)
self.nearbyLines_table.setItem(i,jProc,item)
self.nearbyLines_table.resizeColumnsToContents()
self.nearbyLines_table.resizeRowsToContents()
self.nearbyLines_table.clearSelection()
def show_nearbyLines_dialog(self):
def get_window_size_and_position():
if self.nearbyLines_dialog is None:
font = QtGui.QFont()
width = QtGui.QFontMetrics(font).width('='*120)
self.nearbyLines_dialog_width = width
self.nearbyLines_dialog_height = 470
sG = QtGui.QApplication.desktop().screenGeometry()
self.nearbyLines_dialog_x = sG.width()-self.nearbyLines_dialog_width
self.nearbyLines_dialog_y = sG.height()-self.nearbyLines_dialog_height
else:
self.nearbyLines_dialog_width = self.nearbyLines_dialog.width()
self.nearbyLines_dialog_height = self.nearbyLines_dialog.height()
self.nearbyLines_dialog_x = self.nearbyLines_dialog.pos().x()
self.nearbyLines_dialog_y = self.nearbyLines_dialog.pos().y()
def do_reset():
self.curr_line_num = self.init_nearby_line_num
#get_info(self.curr_line_num)
#fill_line_info_table()
self.nearbyDialogFilterIsActive = True
#self.nearbyLines_selected_ions = []
toggle_filter()
redo_initial_plot()
def toggle_filter():
self.nearbyLines_selected_ions = []
if not self.nearbyDialogFilterIsActive:
get_selected_ions()
if len(self.nearbyLines_selected_ions) > 0:
self.nearbyDialogFilterIsActive = True
self.buttonBox_nearbyLines.button(QtGui.QDialogButtonBox.RestoreDefaults).setStyleSheet('background-color:red;')
self.buttonBox_nearbyLines.button(QtGui.QDialogButtonBox.RestoreDefaults).setText('Deactivate ion filter')
else:
QtGui.QMessageBox.critical(self, 'nearby lines dialog: ion filter', 'No ion selected.', QtGui.QMessageBox.Ok )
else:
self.nearbyDialogFilterIsActive = False
self.buttonBox_nearbyLines.button(QtGui.QDialogButtonBox.RestoreDefaults).setStyleSheet('')
self.buttonBox_nearbyLines.button(QtGui.QDialogButtonBox.RestoreDefaults).setText('Filter selected ions')
self.fill_nearbyLines_table()
def save_initial_plot_pars():
self.init_nearby_line_num = self.line_info_box.text()
self.init_nearby_ion = self.ion_box.text()
self.init_nearby_xmin = self.xlim_min_box.text()
self.init_nearby_xmax = self.xlim_max_box.text()
self.init_nearby_y1min = self.y1lim_min_box.text()
self.init_nearby_y1max = self.y1lim_max_box.text()
self.init_nearby_y3min = self.y3lim_min_box.text()
self.init_nearby_y3max = self.y3lim_max_box.text()
self.init_nearby_legend_fontsize = self.sp.legend_fontsize
self.init_nearby_legend_loc = self.sp.legend_loc
def redo_initial_plot():
#self.line_info_box.setText(self.init_line_num)
self.ion_box.setText(self.init_nearby_ion)
self.xlim_min_box.setText(self.init_nearby_xmin)
self.xlim_max_box.setText(self.init_nearby_xmax)
self.y1lim_min_box.setText(self.init_nearby_y1min)
self.y1lim_max_box.setText(self.init_nearby_y1max)
self.y3lim_min_box.setText(self.init_nearby_y3min)
self.y3lim_max_box.setText(self.init_nearby_y3max)
self.sp.legend_fontsize = self.init_nearby_legend_fontsize
self.sp.legend_loc = self.init_nearby_legend_loc
self.set_plot_limits_and_draw()
def toggle_statusbar():
self.showStatusBar = not self.showStatusBar
statusBar.setVisible(self.showStatusBar)
def on_doubleClick():
item = self.nearbyLines_table.currentItem()
row = item.row()
col = item.column()
if col in [col_num, col_ref]:
self.line_info_box.setText(item.text())
self.show_line_info_dialog()
elif col == col_ion:
self.ion_box.setText(item.text())
self.draw_ion()
def on_itemClicked():
# to avoid blinking with itemSelectionChanged
item = self.nearbyLines_table.currentItem()
if item == self.selected_item:
on_itemSelectionChanged()
def on_itemSelectionChanged():
item = self.nearbyLines_table.currentItem()
self.selected_item = item
row = item.row()
col = item.column()
if col == col_wave:
wavelength = np.float(item.text())
l_shift = np.float(self.nearbyLines_table.item(row,col_lshift).text())
wavelength = wavelength + l_shift
line_num = str(self.nearbyLines_table.item(row,col_num).text())
ion = str(self.nearbyLines_table.item(row,col_ion).text())
max_wave = np.float(self.sp_max_box.text())
min_wave = np.float(self.sp_min_box.text())
r = (self.x_plot_lims[1] - self.x_plot_lims[0])/2
f = 0.05
if (wavelength < self.x_plot_lims[0] + f*r) or (wavelength > self.x_plot_lims[1] - f*r):
if wavelength-r < min_wave:
self.x_plot_lims = (min_wave-r*f, min_wave-r*f+2*r)
elif wavelength+r > max_wave:
self.x_plot_lims = (max_wave+r*f-2*r , max_wave+r*f)
else:
self.x_plot_lims = (wavelength-r,wavelength+r)
if not self.axes_fixed:
self.update_lim_boxes()
self.restore_axes()
self.plot_tick_at(wavelength, ion, line_num)
else:
if self.green_tick_shown:
self.on_draw()
self.green_tick_shown = False
def do_header_clicked(col):
if col == col_ion:
self.toggle_show_true_ions()
self.fill_nearbyLines_table()
def do_header_doubleClicked(col):
sort = fieldItems[col]
if sort == self.nearbyLines_sort_by:
self.nearbyLines_sort_reverse = not self.nearbyLines_sort_reverse
else:
self.nearbyLines_sort_reverse = False
self.nearbyLines_sort_by = sort
self.sort_nearbyLines(sort, self.nearbyLines_sort_reverse)
self.fill_nearbyLines_table()
def get_selected_ions():
selectedItems = self.nearbyLines_table.selectedItems()
selected_ions = []
for item in selectedItems:
col = item.column()
if col == col_ion:
ion = str(item.text())
if not ion in selected_ions:
selected_ions.append(ion)
if len(selected_ions) > 0:
self.nearbyLines_selected_ions = selected_ions
else:
#self.nearbyLines_selected_ions = self.sp.get_conf('selected_ions')
self.nearbyLines_selected_ions = []
def do_selection():
selectedItems = self.nearbyLines_table.selectedItems()
selected_ions = []
selected_lines = []
for item in selectedItems:
col = item.column()
if col == col_ion:
ion = str(item.text())
if not ion in selected_ions:
selected_ions.append(ion)
if col in [col_num, col_ref]:
line = item.text()
selected_lines.append(line)
if len(selected_ions) > 0:
s = ''
for ion in selected_ions:
s = s + ion + ', '
s = s[:-2]
self.ion_box.setText(s)
self.draw_ion()
if len(selected_lines) > 0:
s = selected_lines[0]
self.line_info_box.setText(s)
self.line_info()
get_window_size_and_position()
self.nearbyLines_dialog = QtGui.QDialog()
self.nearbyLines_dialog.resize(self.nearbyLines_dialog_width, self.nearbyLines_dialog_height)
self.nearbyLines_dialog.move(self.nearbyLines_dialog_x,self.nearbyLines_dialog_y)
statusBar = QtGui.QStatusBar()
s = 'Double-click on a line number (or select the line number and press \"Apply\") to show line info dialog. \n' \
'Double-click on an ion to plot line ticks and spectrum for that single ion. \n' \
'Click or select a wavelength to draw a tick at that position. \n' \
'Select multiple ions (using click, Shift+click, and Ctrl+click) and press \"Plot selected ions\" plot line ticks and spectra for a list of ions. \n' \
'Click on the ion header to select all ions. \n' \
'Double-click on a column header to sort the table; Double-click again to toggle between ascending and descending order. \n' \
'Click on \"Reset\" to return to the original selected ions and plot settings. \n' \
'Click on \"Filter selected ions\" to activate/deactivate ion selection.'
statusBar.addWidget(QtGui.QLabel(s),1)
self.showStatusBar = False
statusBar.setVisible(self.showStatusBar)
self.nearbyLines_table = QtGui.QTableWidget()
self.nearbyLines_table.setRowCount(len(self.nearbyLines))
fieldItems = self.sp.fields
fieldNames = [ self.sp.field_abbr[item] for item in fieldItems ]
col_num = fieldItems.index('num')
col_ion = fieldItems.index('id')
col_wave = fieldItems.index('lambda')
col_proc = fieldItems.index('proc')
col_lshift = fieldItems.index('l_shift')
col_irel = fieldItems.index('i_rel')
col_icor = fieldItems.index('i_cor')
col_ref = fieldItems.index('ref')
col_prof = fieldItems.index('profile')
col_vel = fieldItems.index('vitesse')
col_comm = fieldItems.index('comment')
self.nearbyLines_table.setColumnCount(len(fieldNames))
self.nearbyLines_table.setHorizontalHeaderLabels(fieldNames)
if self.enable_tooltips_action.isChecked():
for j in range(0,len(fieldItems)):
self.nearbyLines_table.horizontalHeaderItem(j).setToolTip(self.sp.field_tip[fieldItems[j]])
self.nearbyLines_table.horizontalHeaderItem(col_comm).setTextAlignment(QtCore.Qt.AlignLeft)
self.nearbyLines_table.horizontalHeaderItem(col_vel).setText(u'\u0394v')
if self.enable_tooltips_action.isChecked():
s = u'\u0394v is the thermal broadening parameter of the line, in km/s. \n' \
'For a single Gaussian profile, it is the half-width of the line at the level of 1/e of the peak, \n' \
'related to the full-width at half maximum and the Gaussian standard deviation by:\n\n' \
u' \u0394v = FWHM/(2(ln2)^\u00BD) = FWHM/1.665\n' \
u' \u0394v = \u221A2 \u03C3\n'
self.nearbyLines_table.horizontalHeaderItem(col_vel).setToolTip(s)
self.nearbyLines_table.horizontalHeaderItem(col_comm).setText(' comment')
#self.nearbyDialogFilterIsActive = False
self.fill_nearbyLines_table()
save_initial_plot_pars()
self.buttonBox_nearbyLines = QtGui.QDialogButtonBox(QtGui.QDialogButtonBox.Help|
QtGui.QDialogButtonBox.Reset|
QtGui.QDialogButtonBox.RestoreDefaults|
QtGui.QDialogButtonBox.Apply|
QtGui.QDialogButtonBox.Close)
self.buttonBox_nearbyLines.button(QtGui.QDialogButtonBox.RestoreDefaults).setText('Filter selected ions')
self.buttonBox_nearbyLines.button(QtGui.QDialogButtonBox.Apply).setText('Plot selected ions')
self.buttonBox_nearbyLines.rejected.connect(self.nearbyLines_dialog.close)
self.buttonBox_nearbyLines.button(QtGui.QDialogButtonBox.Apply).clicked.connect(do_selection)
self.buttonBox_nearbyLines.button(QtGui.QDialogButtonBox.Help).clicked.connect(toggle_statusbar)
self.buttonBox_nearbyLines.button(QtGui.QDialogButtonBox.Reset).clicked.connect(do_reset)
self.buttonBox_nearbyLines.button(QtGui.QDialogButtonBox.RestoreDefaults).clicked.connect(toggle_filter)
self.nearbyLines_table.doubleClicked.connect(on_doubleClick)
self.nearbyLines_table.itemSelectionChanged.connect(on_itemSelectionChanged)
self.nearbyLines_table.itemClicked.connect(on_itemClicked)
self.nearbyLines_table.verticalHeader().sectionDoubleClicked.connect(do_selection)
#self.nearbyLines_table.horizontalHeader().sectionClicked.connect(do_header_clicked)
self.nearbyLines_table.horizontalHeader().sectionDoubleClicked.connect(do_header_doubleClicked)
vbox = QtGui.QVBoxLayout()
vbox.addWidget(self.nearbyLines_table)
vbox.addWidget(self.buttonBox_nearbyLines)
vbox.addWidget(statusBar)
self.nearbyLines_dialog.setLayout(vbox)
s = 'nearby line dialog: list of lines between {0:.2f} and {1:.2f} angstroms'.format(self.sp.cursor_w1, self.sp.cursor_w2)
self.nearbyLines_dialog.setWindowTitle(s)
self.nearbyLines_dialog.setWindowModality(QtCore.Qt.NonModal)
self.cursor_w1 = self.sp.cursor_w1
self.cursor_w2 = self.sp.cursor_w2
if self.nearbyDialogFilterIsActive:
self.buttonBox_nearbyLines.button(QtGui.QDialogButtonBox.RestoreDefaults).setStyleSheet('background-color:red;')
else:
self.buttonBox_nearbyLines.button(QtGui.QDialogButtonBox.RestoreDefaults).setStyleSheet('')
self.nearbyLines_dialog.show()
def cont_dialog(self):
Pars = [ ( 'cont_unred' , 'Set to True if reddening is to be applied to the continuum' ),
( 'cont_edens' , u'Electron density, in cm\u207B\u00B3' ),
( 'cont_hi_t' , 'Temperature for the H I continuum, in K' ),
( 'cont_hi_i' , u'Intensity of the H I continuum (in theory, intensity of H\u03B2)' ),
( 'cont_hei_t' , 'Temperature for the He I continuum, in K' ),
( 'cont_hei_i' , 'Intensity of the He I continuum (in theory, intensity of He I 4471)' ),
( 'cont_heii_t' , 'Temperature for the He II continuum, in K' ),
( 'cont_heii_i' , 'Intensity of the He II continuum (in theory, intensity of He I 4686)' ),
( 'cont_bb_t' , 'Temperature of the blackbody continuum, in K' ),
( 'cont_bb_i' , 'Intensity of the blackbody continuum' ),
( 'cont_pl_alpha' , u'Index \u03B1 of the power-law continuum F = I*(\u03BB/5000 \u212B)**\u03B1' ),
( 'cont_pl_i' , 'Intensity I of the power-law continuum' ),
( 'cont_user_table' , 'Interpolation table for the user-defined continuum' ),
( 'cont_user_func' , 'Interpolation function for the user-defined continuum' ) ]
def toggle_statusbar():
self.showStatusBar = not self.showStatusBar
statusBar.setVisible(self.showStatusBar)
def get_window_size_and_position():
if self.cont_pars_dialog is None:
self.cont_pars_dialog_width = 800
self.cont_pars_dialog_height = 460
sG = QtGui.QApplication.desktop().screenGeometry()
self.cont_pars_dialog_x = sG.width()-self.cont_pars_dialog_width
self.cont_pars_dialog_y = sG.height()-self.cont_pars_dialog_height
self.cont_pars_dialog_x = 0
self.cont_pars_dialog_y = 0
else:
self.cont_pars_dialog_width = self.cont_pars_dialog.width()
self.cont_pars_dialog_height = self.cont_pars_dialog.height()
self.cont_pars_dialog_x = self.cont_pars_dialog.pos().x()
self.cont_pars_dialog_y = self.cont_pars_dialog.pos().y()
def set_conf_from_table(row):
s = str(self.table.item(row,1).text())
value = self.ConvStrToValidTypes(s)
if value != None:
self.sp.set_conf(Pars[row][0], value)
self.table.setItem(row, 1, QtGui.QTableWidgetItem(str(value)))
else:
self.table.setItem(row, 1, QtGui.QTableWidgetItem('Error in ' + s))
def on_itemChanged():
self.table.blockSignals(True)
item = self.table.currentItem()
row = item.row()
s = str(item.text())
value = self.ConvStrToValidTypes(s)
if value != None:
self.sp.set_conf(Pars[row][0], value)
#if isinstance(value, basestring):
# value = '\'{}\''.format(value)
self.table.setItem(row, 1, QtGui.QTableWidgetItem(str(value)))
self.table.item(row, 1).setBackgroundColor(self.editableCells_bg_color)
self.cont_par_changed = True
else:
self.table.setItem(row, 1, QtGui.QTableWidgetItem('Error in ' + s))
self.table.item(row, 1).setBackgroundColor(QtGui.QColor('red'))
self.table.blockSignals(False)
get_window_size_and_position()
self.cont_pars_dialog = QtGui.QDialog()
self.cont_pars_dialog.resize(self.cont_pars_dialog_width, self.cont_pars_dialog_height)
self.cont_pars_dialog.move(self.cont_pars_dialog_x, self.cont_pars_dialog_y)
statusBar = QtGui.QStatusBar()
s = 'Click on \"Save\" to write the continuum parameters to a file. \n' \
'Click on \"Update\" to adjust the synthesis to the changes in the continuum parameters. \n' \
'The green fields are editable.'
statusBar.addWidget(QtGui.QLabel(s),1)
self.showStatusBar = False
statusBar.setVisible(self.showStatusBar)
self.table = QtGui.QTableWidget()
self.table.setRowCount(len(Pars))
self.table.setColumnCount(3)
self.table.setHorizontalHeaderLabels([ 'parameter', 'value', 'help' ])
for j in range(0,len(Pars)):
item = QtGui.QTableWidgetItem(Pars[j][0])
item.setFlags(item.flags() ^ QtCore.Qt.ItemIsEditable)
item.setBackgroundColor(self.readOnlyCells_bg_color)
self.table.setItem(j,0,item)
value = self.sp.get_conf(Pars[j][0])
#if isinstance(value, basestring):
# value = '\'{}\''.format(value)
item = QtGui.QTableWidgetItem(str(value))
#item = QtGui.QTableWidgetItem(str(self.sp.get_conf(Pars[j][0])))
item.setBackgroundColor(self.editableCells_bg_color)
self.table.setItem(j,1,item)
item = QtGui.QTableWidgetItem(Pars[j][1])
item.setFlags(item.flags() ^ QtCore.Qt.ItemIsEditable)
item.setBackgroundColor(self.readOnlyCells_bg_color)
self.table.setItem(j,2,item)
self.table.resizeColumnsToContents()
self.table.resizeRowsToContents()
if self.table.columnWidth(1) > 300:
self.table.setColumnWidth(1,300)
self.table.itemChanged.connect(on_itemChanged)
self.buttonBox = QtGui.QDialogButtonBox(QtGui.QDialogButtonBox.Help|
QtGui.QDialogButtonBox.Save|
QtGui.QDialogButtonBox.Apply|
QtGui.QDialogButtonBox.Close)
self.buttonBox.button(QtGui.QDialogButtonBox.Help).setDefault(True)
self.buttonBox.button(QtGui.QDialogButtonBox.Apply).setText('Update')
if self.enable_tooltips_action.isChecked():
self.buttonBox.button(QtGui.QDialogButtonBox.Apply).setToolTip('Click to update synthesis with changes in the continuum parameters.')
self.buttonBox.button(QtGui.QDialogButtonBox.Apply).clicked.connect(self.adjust)
self.buttonBox.rejected.connect(self.cont_pars_dialog.close)
self.buttonBox.button(QtGui.QDialogButtonBox.Save).clicked.connect(self.save_cont_pars)
self.buttonBox.button(QtGui.QDialogButtonBox.Help).clicked.connect(toggle_statusbar)
vbox = QtGui.QVBoxLayout()
vbox.addWidget(self.table)
vbox.addWidget(self.buttonBox)
vbox.addWidget(statusBar)
self.cont_pars_dialog.setLayout(vbox)
self.cont_pars_dialog.setWindowTitle('Continuum parameters')
self.cont_pars_dialog.show()
def get_line_tick_lim(self, line_tick_pos):
if line_tick_pos == 1:
y1 = 0.43
y2 = 0.57
else:
if line_tick_pos == 2:
y1 = 0.05
y2 = 0.19
else:
y1 = 0.81
y2 = 0.95
return y1, y2
def getTickPosOfSelectedLine(self):
posTick = self.sp.get_conf('line_tick_pos_selectedLine',3)
if posTick not in [0,1,2]:
posOtherTicks = self.sp.get_conf('line_tick_pos')
if posTick == 4:
if posOtherTicks == 2:
posTick = 0
else:
posTick = 2
else:
posTick = posOtherTicks
return posTick
def plot_line_ticks_for(self, satellites, ion, line_num, refline):
k = self.sp.get_conf('line_tick_ax')
if not (k == 1 and self.residual_GroupBox.isChecked()):
k = 0
posTick = self.getTickPosOfSelectedLine()
y1, y2 = self.get_line_tick_lim(posTick)
if len(satellites) > 0:
if ( k == 0 ):
self.sp.plot_line_ticks_for(satellites, ion, line_num, refline, self.axes, y1, y2, self.x_plot_lims[0], self.x_plot_lims[1], self.addGreenTickToLegend)
elif ( k == 1 ):
self.sp.plot_line_ticks_for(satellites, ion, line_num, refline, self.axes3, y1, y2, self.addGreenTickToLegend)
elif ( k == 2 ):
self.sp.plot_line_ticks_for(satellites, ion, line_num, refline, self.axes2, 0.2, 0.8, self.addGreenTickToLegend)
self.green_tick_shown = True
self.canvas.draw()
def on_draw(self, show_legend=True):
log_.debug('Entering on_drawn', calling=self.calling)
if self.sp is None:
log_.debug('Np sp in on_drawn', calling=self.calling)
return
if self.axes is None:
log_.debug('Calling make_axes from on_draw (self.axes is None)', calling=self.calling)
self.call_on_draw=False
self.make_axes()
self.init_axes()
log_.debug('back from make_axes from on_draw', calling=self.calling)
self.call_on_draw=True
if self.do_save:
self.save_axes()
self.axes.cla()
self.sp.plot_ax1(self.axes, show_legend)
k = self.sp.get_conf('line_tick_ax')
if self.show_line_ticks_action.isChecked() and ( k == 0 ):
y1, y2 = self.get_line_tick_lim(self.sp.get_conf('line_tick_pos'))
self.sp.plot_line_ticks(self.axes, y1, y2, self.x_plot_lims[0], self.x_plot_lims[1], show_legend=show_legend)
if self.sp.get_conf('cont_plot', False):
self.sp.plot_conts(self.axes)
if self.residual_GroupBox.isChecked():
self.axes3.cla()
self.sp.plot_ax3(self.axes3, show_legend)
if self.show_line_ticks_action.isChecked() and ( k == 1 ):
y1, y2 = self.get_line_tick_lim(self.sp.get_conf('line_tick_pos'))
self.sp.plot_line_ticks(self.axes3, y1, y2)
if self.show_line_ticks_action.isChecked() and ( k == 2 ):
self.axes2.cla()
# self.sp.plot_ax2(self.axes2)
self.sp.plot_line_ticks(self.axes2, 0.2, 0.8)
if self.residual_GroupBox.isChecked():
self.axes3.set_xlabel(r'Wavelength ($\AA$)')
self.axes3.set_ylabel(r'Residual')
#elif self.show_line_ticks_action.isChecked() and self.sp.get_conf(') and self.axes2 is not None:
elif self.show_line_ticks_action.isChecked() and ( k == 2 ):
self.axes2.set_xlabel(r'Wavelength ($\AA$)')
else:
self.axes.set_xlabel(r'Wavelength ($\AA$)')
self.axes.set_ylabel(r'F$_\lambda$')
self.restore_axes()
# self.update_lim_boxes()
if self.adjust_fig_action.isChecked():
plt.tight_layout(0.1)
self.canvas.draw()
self.statusBar().showMessage('Redraw is finished.', 4000)
log_.debug('Exit on_drawn', calling=self.calling)
self.magenta_tick_shown = False
def show_lines_clicked(self):
if self.lineIDs_GroupBox.isChecked():
self.show_line_ticks_action.setChecked(True)
self.plot_lines_action.setChecked(True)
self.sp.set_conf('plot_lines_of_selected_ions', True)
self.set_ion()
else:
self.show_line_ticks_action.setChecked(False)
self.plot_lines_action.setChecked(False)
self.sp.set_conf('plot_lines_of_selected_ions', False)
self.make_axes()
def line_tick_color_clicked(self):
color = QtGui.QColorDialog.getColor()
self.sp.set_conf('line_tick_color', str(color.name()))
if self.show_line_ticks_action.isChecked():
self.make_axes()
def toggle_show_true_ions(self):
self.show_true_ions = not self.show_true_ions
def toggle_legend_clicked(self):
fontsize_list = ['small', 'medium', 'large']
i = fontsize_list.index(self.sp.legend_fontsize) + 1
if i == len(fontsize_list):
self.sp.legend_fontsize = fontsize_list[0]
self.sp.legend_loc = (self.sp.legend_loc)%2+1
else:
self.sp.legend_fontsize = fontsize_list[i]
self.make_axes()
def enable_tooltips_action_clicked(self):
if self.enable_tooltips_action.isChecked():
self.enableToolTips()
self.sp.set_conf('qt_enable_tooltips', True)
log_.debug('Tooltips enabled', calling=self.calling)
else:
self.disableToolTips()
self.sp.set_conf('qt_enable_tooltips', False)
log_.debug('Tooltips disabled', calling=self.calling)
def adjust_fig_action_clicked(self):
if self.adjust_fig_action.isChecked():
self.sp.set_conf('fig_adjust', True)
log_.debug('Adjust figure enabled', calling=self.calling)
else:
self.fig.subplots_adjust(hspace=self.sp.get_conf('fig_hspace'),
bottom=self.sp.get_conf('fig_bottom'),
right=self.sp.get_conf('fig_right'),
top=self.sp.get_conf('fig_top'),
left=self.sp.get_conf('fig_left'))
log_.debug('Adjust figure disabled', calling=self.calling)
self.draw_ion()
def show_uncor_obs_action_clicked(self):
if self.show_uncor_obs_action.isChecked():
self.sp.show_uncor_spec = True
else:
self.sp.show_uncor_spec = False
self.set_plot_limits_and_draw()
def disableToolTips(self):
self.lineIDs_GroupBox.setToolTip('')
self.residual_GroupBox.setToolTip('')
self.run_button.setToolTip('')
self.adjust_button.setToolTip('')
self.line_info_box.setToolTip('')
self.ebv_box.setToolTip('')
self.obj_velo_box.setToolTip('')
self.sp_min_box.setToolTip('')
self.sp_max_box.setToolTip('')
self.xlim_min_box.setToolTip('')
self.xlim_max_box.setToolTip('')
self.y1lim_min_box.setToolTip('')
self.y1lim_max_box.setToolTip('')
self.y3lim_min_box.setToolTip('')
self.y3lim_max_box.setToolTip('')
self.fix_axes_cb.setToolTip('')
self.cut_cb.setToolTip('')
self.ion_cb.setToolTip('')
self.sp_norm_box.setToolTip('')
self.resol_box.setToolTip('')
self.cut2_box.setToolTip('')
self.ion_box.setToolTip('')
self.line_sort_menu.setToolTip('')
self.line_field_menu.setToolTip('')
self.line_tick_ax_menu.setToolTip('')
self.line_tick_pos_menu.setToolTip('')
self.diff_lines_menu.setToolTip('')
self.verbosity_menu.setToolTip('')
self.style_menu.setToolTip('')
def enableToolTips(self):
self.lineIDs_GroupBox.setToolTip(self.lineIDs_GroupBox_ToolTip)
self.residual_GroupBox.setToolTip(self.residual_GroupBox_ToolTip)
self.run_button.setToolTip(self.run_button_ToolTip)
self.adjust_button.setToolTip(self.adjust_button_ToolTip)
self.line_info_box.setToolTip(self.line_info_box_ToolTip)
self.ebv_box.setToolTip(self.ebv_box_ToolTip)
self.obj_velo_box.setToolTip(self.obj_velo_box_ToolTip)
self.sp_min_box.setToolTip(self.sp_min_box_ToolTip)
self.sp_max_box.setToolTip(self.sp_max_box_ToolTip)
self.xlim_min_box.setToolTip(self.xlim_min_box_ToolTip)
self.xlim_max_box.setToolTip(self.xlim_max_box_ToolTip)
self.y1lim_min_box.setToolTip(self.y1lim_min_box_ToolTip)
self.y1lim_max_box.setToolTip(self.y1lim_max_box_ToolTip)
self.y3lim_min_box.setToolTip(self.y3lim_min_box_ToolTip)
self.y3lim_max_box.setToolTip(self.y3lim_max_box_ToolTip)
self.fix_axes_cb.setToolTip(self.fix_axes_cb_ToolTip)
self.cut_cb.setToolTip(self.cut_cb_ToolTip)
self.ion_cb.setToolTip(self.ion_cb_ToolTip)
self.sp_norm_box.setToolTip(self.sp_norm_box_ToolTip)
self.resol_box.setToolTip(self.resol_box_ToolTip)
self.cut2_box.setToolTip(self.cut2_box_ToolTip)
self.ion_box.setToolTip(self.ion_box_ToolTip)
self.line_sort_menu.setToolTip(self.line_sort_menu_ToolTip)
self.line_field_menu.setToolTip(self.line_field_menu_ToolTip)
self.line_tick_ax_menu.setToolTip(self.line_tick_ax_menu_ToolTip)
self.line_tick_pos_menu.setToolTip(self.line_tick_pos_menu_ToolTip)
self.diff_lines_menu.setToolTip(self.diff_lines_menu_ToolTip)
self.verbosity_menu.setToolTip(self.verbosity_menu_ToolTip)
self.style_menu.setToolTip(self.style_menu_ToolTip)
def show_line_ticks_action_clicked(self):
self.set_ion()
if self.plot_lines_action.isChecked():
self.sp.set_conf('plot_lines_of_selected_ions', True)
else:
self.sp.set_conf('plot_lines_of_selected_ions', False)
if self.show_line_ticks_action.isChecked() or self.plot_lines_action.isChecked():
self.lineIDs_GroupBox.setChecked(True)
else:
self.lineIDs_GroupBox.setChecked(False)
self.make_axes()
def plot_cont_action_clicked(self):
if self.plot_cont_action.isChecked():
self.sp.set_conf('cont_plot', True)
else:
self.sp.set_conf('cont_plot', False)
self.on_draw()
def ion_cb_changed(self):
if self.ion_cb.isChecked():
self.sp.set_conf('show_selected_ions_only', True)
self.selected_ions_action.setChecked(True)
else:
self.sp.set_conf('show_selected_ions_only', False)
self.selected_ions_action.setChecked(False)
self.make_axes()
def cut_cb_changed(self):
if self.cut_cb.isChecked():
self.sp.set_conf('show_selected_intensities_only', True)
self.selected_intensities_action.setChecked(True)
else:
self.sp.set_conf('show_selected_intensities_only', False)
self.selected_intensities_action.setChecked(False)
self.make_axes()
def selected_lines_clicked(self):
if self.selected_ions_action.isChecked():
self.sp.set_conf('show_selected_ions_only', True)
self.ion_cb.setChecked(True)
else:
self.sp.set_conf('show_selected_ions_only', False)
self.ion_cb.setChecked(False)
if self.selected_intensities_action.isChecked():
self.sp.set_conf('show_selected_intensities_only', True)
self.cut_cb.setChecked(True)
else:
self.sp.set_conf('show_selected_intensities_only', False)
self.cut_cb.setChecked(False)
self.make_axes()
def diff_lines_by_process_clicked(self):
if self.diff_lines_by_process_action.isChecked():
self.sp.set_conf('diff_lines_by_process', True)
else:
self.sp.set_conf('diff_lines_by_process', False)
self.make_axes()
def editing_lines_clicked(self):
if self.editing_lines_action.isChecked():
self.sp.set_conf('qt_allow_editing_lines', True)
else:
self.sp.set_conf('qt_allow_editing_lines', False)
def update_lines_clicked(self):
if self.update_lines_action.isChecked():
self.sp.set_conf('qt_update_after_editing_lines', True)
else:
self.sp.set_conf('qt_update_after_editing_lines', False)
def cycle_forwards_ions(self):
j = self.sp.get_conf('index_of_current_ion')
selected_ions = self.sp.get_conf('selected_ions')
if j in range(-1, len(self.sp.selected_ions_data)-1):
j += 1
else:
j = -1
self.sp.set_conf('index_of_current_ion', j)
self.set_refline_to_info_box(j)
self.make_axes()
def cycle_backwards_ions(self):
j = self.sp.get_conf('index_of_current_ion')
selected_ions = self.sp.get_conf('selected_ions')
if j in range(0, len(self.sp.selected_ions_data)):
j -= 1
else:
j = len(self.sp.selected_ions_data)-1
self.sp.set_conf('index_of_current_ion', j)
self.set_refline_to_info_box(j)
self.make_axes()
def show_line_ticks_from_file(self):
file_choices = "Text files (*.txt *.dat) (*.txt *.dat);;Tex files (*.tex) (*.tex);;CSV files (*.csv) (*.csv);;All Files (*) (*)"
if self.tick_file is None:
path = ''
else:
path = self.tick_file
path = unicode(QtGui.QFileDialog.getOpenFileName(self, 'Open file', path, file_choices))
if path:
self.tick_file = path
else:
return
f = open(self.tick_file, 'r')
lines = f.readlines()
f.close()
color = 'darkmagenta'
posTick = self.sp.get_conf('line_tick_pos')
y1, y2 = self.get_line_tick_lim(posTick)
k = self.sp.get_conf('line_tick_ax')
if k == 2:
k = 1
y1 = 0.2
y2 = 0.8
elif k == 1 and self.residual_GroupBox.isChecked():
k = 1
else:
k = 0
dy = (y2-y1)*0.30
if self.magenta_tick_shown == True:
self.draw_ion()
for line in lines:
line = line.strip()
line = line.split(' ')[0]
if self.isFloat(line):
wavelength = np.float(line)
if wavelength > self.x_plot_lims[0] and wavelength < self.x_plot_lims[1]:
self.fig.axes[k].axvline( wavelength, y1+dy, y2-dy, color = color, linestyle = 'solid', linewidth = 1.5 )
self.fig.axes[k].step( [0,0], [0,100], color = color, linestyle = 'solid', linewidth = 1.5, label = self.tick_file.split('/')[-1] )
self.fig.axes[k].legend(loc=self.sp.legend_loc, fontsize=self.sp.legend_fontsize)
self.fig.canvas.draw()
self.magenta_tick_shown = True
def residual_box_clicked(self):
if self.residual_GroupBox.isChecked():
self.sp.set_conf('qt_plot_residuals', True)
else:
self.sp.set_conf('qt_plot_residuals', False)
self.make_axes()
def make_axes(self):
log_.debug('Entering make_axes', calling=self.calling)
if self.call_on_draw:
self.save_axes()
self.fig.clf()
i_ax1 = 0
i_ax2 = 1
i_ax3 = 2
rspan_ax1 = 4
rspan_ax2 = 1
rspan_ax3 = 4
n_subplots = rspan_ax1
k = self.sp.get_conf('line_tick_ax')
ShowAx2 = self.show_line_ticks_action.isChecked() and ( k == 2 )
if ShowAx2:
i_ax2 = n_subplots
n_subplots += rspan_ax2
if self.residual_GroupBox.isChecked():
i_ax3 = n_subplots
n_subplots += rspan_ax3
if self.axes is not None:
del(self.axes)
self.axes = plt.subplot2grid((n_subplots,1), (i_ax1,0), rowspan=rspan_ax1)
self.sp.ax1 = self.axes
if ShowAx2:
if self.axes2 is not None:
del(self.axes2)
self.axes2 = plt.subplot2grid((n_subplots,1), (i_ax2,0), rowspan=rspan_ax2, sharex=self.axes )
self.axes2.tick_params( left='off',labelleft='off' )
self.sp.ax2 = self.axes2
self.axes.get_xaxis().set_visible(False)
else:
self.axes2 = None
self.sp.ax2 = None
if self.residual_GroupBox.isChecked():
if self.axes3 is not None:
del(self.axes3)
self.axes3 = plt.subplot2grid((n_subplots,1), (i_ax3,0), rowspan=rspan_ax3, sharex=self.axes )
self.sp.ax3 = self.axes3
if ShowAx2:
self.axes2.get_xaxis().set_visible(False)
self.axes.get_xaxis().set_visible(False)
else:
self.axes3 = None
self.sp.ax3 = self.axes3
self.fig.subplots_adjust(hspace=self.sp.get_conf('fig_hspace'),
bottom=self.sp.get_conf('fig_bottom'),
right=self.sp.get_conf('fig_right'),
top=self.sp.get_conf('fig_top'),
left=self.sp.get_conf('fig_left'))
if self.call_on_draw:
log_.debug('Calling on_draw from make_axes', calling=self.calling)
self.do_save = False
self.on_draw()
self.do_save = True
log_.debug('Exit make_axes', calling=self.calling)
def init_axes(self):
self.x_plot_lims = self.sp.get_conf('x_plot_lims')
if self.x_plot_lims is None:
self.x_plot_lims = (np.min(self.sp.w), np.max(self.sp.w))
self.y1_plot_lims = self.sp.get_conf('y1_plot_lims')
if self.y1_plot_lims is None:
mask = (self.sp.w_ori > self.x_plot_lims[0]) & (self.sp.w_ori < self.x_plot_lims[1])
r = 1.2
if self.sp.sp_synth_lr is None:
a = np.min(self.sp.f[mask])
b = np.max(self.sp.f[mask])
else:
a = np.min(self.sp.sp_synth_lr[mask])
b = np.max(self.sp.sp_synth_lr[mask])
self.y1_plot_lims = ((a*(1+r)+b*(1-r))/2, (a*(1-r)+b*(1+r))/2)
self.y2_plot_lims = self.sp.get_conf('y2_plot_lims')
if self.y2_plot_lims is None:
self.y2_plot_lims = (-0.5, 1.5)
self.y3_plot_lims = self.sp.get_conf('y3_plot_lims')
if self.y3_plot_lims is None:
mask = (self.sp.w_ori > self.x_plot_lims[0]) & (self.sp.w_ori < self.x_plot_lims[1])
r = 1.2
if self.sp.sp_synth_lr is None:
self.y3_plot_lims = (-1,1)
else:
a = np.min((self.sp.f_ori - self.sp.sp_synth_lr)[mask])
b = np.max((self.sp.f_ori - self.sp.sp_synth_lr)[mask])
self.y3_plot_lims = ((a*(1+r)+b*(1-r))/2, (a*(1-r)+b*(1+r))/2)
log_.debug('Axes initialized. IDs {} {} {}'.format(id(self.axes), id(self.axes2), id(self.axes3)), calling=self.calling)
self.print_axes()
def save_axes(self):
if self.axes is not None:
self.x_plot_lims = self.axes.get_xlim()
self.y1_plot_lims = self.axes.get_ylim()
self.xscale = self.axes.get_xscale()
self.yscale = self.axes.get_yscale()
if self.axes2 is not None:
self.y2_plot_lims = self.axes2.get_ylim()
if self.axes3 is not None:
self.y3_plot_lims = self.axes3.get_ylim()
self.sp.save_axes()
log_.debug('Axes saved. IDs {} {} {}'.format(id(self.axes), id(self.axes2), id(self.axes3)), calling=self.calling)
self.print_axes()
def restore_axes(self):
if self.x_plot_lims is not None:
if self.axes is not None:
self.axes.set_xlim(self.x_plot_lims)
log_.debug('X-axes restored to {}'.format(self.axes.get_xlim()), calling=self.calling)
else:
log_.debug('axes is None', calling=self.calling)
else:
log_.debug('x_plot_lims is None', calling=self.calling)
if self.y1_plot_lims is not None:
if self.axes is not None:
self.axes.set_ylim(self.y1_plot_lims)
if self.y2_plot_lims is not None:
if self.axes2 is not None:
self.axes2.set_ylim(self.y2_plot_lims)
if self.y3_plot_lims is not None:
if self.axes3 is not None:
self.axes3.set_ylim(self.y3_plot_lims)
if self.xscale is not None:
self.axes.set_xscale(self.xscale)
log_.debug('X scale set to {}'.format(self.xscale))
if self.yscale is not None:
self.axes.set_yscale(self.yscale)
log_.debug('Y scale set to {}'.format(self.yscale))
log_.debug('Axes restored. IDs {} {} {}'.format(id(self.axes), id(self.axes2), id(self.axes3)), calling=self.calling)
self.print_axes()
def print_axes(self):
log_.debug('lims: {} {} {} {}'.format(self.x_plot_lims, self.y1_plot_lims, self.y2_plot_lims, self.y3_plot_lims), calling=self.calling)
log_.debug('Axes IDs {} {} {}'.format(id(self.axes), id(self.axes2), id(self.axes3)), calling=self.calling)
log_.debug(' IDs {} {} {}'.format(id(self.axes), id(self.axes2), id(self.axes3)), calling=self.calling)
def exec_init(self):
if self.init_file_name is None:
self.get_init_filename()
if self.init_file_name:
self.statusBar().showMessage('Running synthesis ...')
QtGui.QApplication.processEvents()
self.start_spectrum()
self.do_save = False
self.on_draw()
self.do_save = True
self.restore_axes()
self.update_lim_boxes()
self.save_parameters_file = None
else:
log_.warn('A filename must be given', calling=self.calling)
sys.exit('An initialization filename must be given')
def get_init_filename(self):
file_choices = "Python initialization files (*init.py) (*init.py);;Python files (*.py) (*.py);;All files (*) (*)"
title = 'Open pySSN initialization file'
init_file = str(QtGui.QFileDialog.getOpenFileName(self, title, self.init_file_name, file_choices))
if init_file and os.path.isfile(init_file):
self.init_file_name = init_file
else:
self.init_file_name = ''
def select_init(self):
old_name = self.init_file_name
self.get_init_filename()
if self.init_file_name:
self.exec_init()
else:
self.init_file_name = old_name
def save_pars(self):
path = self.sp.get_conf('save_parameters_filename')
keys = self.sp.default_keys
if '__builtins__' in keys:
keys.remove('__builtins__')
keys.sort()
with open(path, 'w') as f:
for key in keys:
value = self.sp.conf[key]
if isinstance(value, basestring):
value = '\"{}\"'.format(value)
f.write('{} = {}\n'.format(key, value))
self.statusBar().showMessage('Parameters saved to file %s' % path, 4000)
def save_pars_as(self):
if self.save_parameters_file is None:
path = self.init_file_name
else:
path = self.save_parameters_file
keys = self.sp.default_keys
keys_to_be_removed = ['__builtins__', 'plot_magenta', 'label_magenta', 'plot_cyan', 'label_cyan']
for key in keys_to_be_removed:
if key in keys:
keys.remove(key)
keys.sort()
file_choices = "pySSN initialization files (*init.py) (*init.py);;Python files (*.py) (*.py);;All files (*) (*)"
title = 'Save synthesis and plot parameters'
selectedFilter = 'pySSN initialization files (*init.py) (*init.py)'
path = unicode(QtGui.QFileDialog.getSaveFileName(self, title, path, file_choices, selectedFilter))
if path:
with open(path, 'w') as f:
for key in keys:
if key == 'instr_prof':
value = self.sp.format_instr_prof()
else:
value = self.sp.conf[key]
if isinstance(value, basestring):
value = '\"{}\"'.format(value)
f.write('{} = {}\n'.format(key, value))
self.save_parameters_file = path
self.statusBar().showMessage('Parameters saved to file %s' % path, 4000)
def teste_instr_prof(self, prof):
if prof is None:
return 'not defined'
keys = prof.keys()
keys.remove('comment')
if not 'width' in keys:
return 'The parameter \'width\' was not found.'
if prof['width'] == 0.0:
return 'The value of \'width\' can not be zero'
if not (self.sp.get_key_indexes('Bb', prof)==self.sp.get_key_indexes('Br', prof)==
self.sp.get_key_indexes('beta', prof)==self.sp.get_key_indexes('alpha', prof)):
return 'Invalid indexes por the parameters \'Bb\', \'Br\', \'alpha\', or \'beta\''
if not all((type(prof[key])==float or type(prof[key])==int) for key in keys):
return 'The values of parameters must be numbers.'
return ''
def apply_instr_prof(self):
def do_update():
path = str(prof_box.toPlainText()).strip()
try:
user_module = {}
exec(path) in user_module
prof = user_module['instr_prof']
self.sp.set_conf('instr_prof', prof)
log_.message('new instrumental profile is ok', calling = self.calling)
except:
title = 'Error reading instrument profile'
msg = 'Unable to read instrumental profile'
path = None
if self.showErrorBox:
QtGui.QMessageBox.critical(self, title, msg, QtGui.QMessageBox.Ok)
else:
log_.warn(msg, calling = self.calling)
return
msg = self.teste_instr_prof(prof)
if not msg:
self.update_profile()
else:
title = 'Error in the instrument profile'
if self.showErrorBox:
QtGui.QMessageBox.critical(self, title, msg, QtGui.QMessageBox.Ok)
else:
log_.warn(msg, calling = self.calling)
def toggle_statusbar():
self.showHelpBrowser = not self.showHelpBrowser
helpBrowser.setVisible(self.showHelpBrowser)
if self.showHelpBrowser:
self.instr_prof_dialog.resize(self.instr_prof_dialog_width, 2.1*self.instr_prof_dialog_height)
else:
self.instr_prof_dialog.resize(self.instr_prof_dialog_width, self.instr_prof_dialog_height)
def get_window_size_and_position():
if self.instr_prof_dialog is None:
font = QtGui.QFont("Courier")
width = QtGui.QFontMetrics(font).width('='*80)
height = 15*QtGui.QFontMetrics(font).height()
self.instr_prof_dialog_width = width
self.instr_prof_dialog_height = height
sG = QtGui.QApplication.desktop().screenGeometry()
self.instr_prof_dialog_x = sG.width()-self.instr_prof_dialog_width
self.instr_prof_dialog_y = sG.height()
else:
if not self.showHelpBrowser:
self.instr_prof_dialog_width = self.instr_prof_dialog.width()
self.instr_prof_dialog_height = self.instr_prof_dialog.height()
self.instr_prof_dialog_x = self.instr_prof_dialog.pos().x()
self.instr_prof_dialog_y = self.instr_prof_dialog.pos().y()
self.showHelpBrowser = False
get_window_size_and_position()
self.instr_prof_dialog = QtGui.QDialog()
self.instr_prof_dialog.setWindowFlags(self.instr_prof_dialog.windowFlags() | QtCore.Qt.WindowStaysOnTopHint)
self.instr_prof_dialog.resize(self.instr_prof_dialog_width, self.instr_prof_dialog_height)
self.instr_prof_dialog.move(self.instr_prof_dialog_x,self.instr_prof_dialog_y)
self.instr_prof_dialog.setWindowTitle('instrument profile dialog')
prof_box = QtGui.QTextEdit()
prof_box.setFontFamily("Courier")
prof_box.setText('instr_prof = ' + self.sp.format_instr_prof())
linkLabel = QtGui.QLabel('<a href="https://github.com/Morisset/pySSN/wiki">More help online</a>')
linkLabel.setOpenExternalLinks(True)
helpBrowser = QtGui.QTextBrowser()
# text=open('instr_prof.html').read()
# This text should go to a file open with text=open('instr_prof.html').read()
text = """<title> Instrumental profile help</title>
<p>The instrumental profile if defined by the <a href="https://en.wikibooks.org/wiki/Python_Programming/Dictionaries">python dictionary</a> <b>instr_prof</b>.
<p>The main component of the instrumental profile is set by the parameter <b>width</b>, which is the only indispensable parameters.</p>
<p>If <b>width</b> > 0, the main component profile follows a <a href="https://en.wikipedia.org/wiki/Normal_distribution">Gaussian distribution</a>, P ∝ exp(-(λ/<b>width</b>)<sup>2</sup>).
In this case, <b>width</b> is related to the normal full-width at half maximum by <b>width</b> = FWHM/(2(ln2)<sup>1/2</sup>) = FWHM/1.665.</p>
<p>If <b>width</b> < 0, the main component profile follows a <a href="https://en.wikipedia.org/wiki/rectangular_distribution">rectangular distribution</a>, P = 1 for -|<b>width</b>|/2 < λ < |<b>width</b>|/2, and P = 0 for all other values of λ.</p>
<p>A variable number of optional components can be included, each defined by four parameters, <b>Bb</b>, <b>Br</b>, <b>alpha</b>, and <b>beta</b>, and following P ∝ <b>B</b>exp(-(λ/<b>beta</b>)<sup><b>alpha</b></sup>).
<b>Bb</b> and <b>Br</b> are the intensity scale parameters for the bluish and reddish sides of the profile, respectively.</p>
<p>If more than one optional component is in use, the parameters must be indexed as <b>alpha_1</b> <b>alpha_2</b>, etc.</p>
Special cases for the optional components:
<ul>
<li><b>alpha</b> = 2 produces a <a href="https://en.wikipedia.org/wiki/Normal_distribution">Gaussian distribution</a>.
<li><b>alpha</b> = 2, <b>Bb</b> = 0 (or <b>Br</b> = 0) produces a <a href="https://en.wikipedia.org/wiki/Half_normal_distribution">half-Gaussian distribution</a>.
<li><b>alpha</b> = 1 produces an <a href="https://en.wikipedia.org/wiki/Exponential_distribution">exponential distribution</a>.
</ul>
<p>A comment may be included in <b>instr_prof</b>.</p>
<p>Examples:</p>
<ol>
<li>instr_prof = {'width': 0.5}<br>
<li>instr_prof = {'width': 0.5, 'comment': 'Gaussian profle'}<br>
<li>Example: instr_prof = {'width': 0.5, 'Bb':0.00016, 'Br':9e-05, 'beta': 2.2, 'alpha': 0.45}<br>
<li>instr_prof = {'width': 0.5, 'Bb_1':0.00016, 'Br_1':9e-05, 'beta_1': 2.2, 'alpha_1': 0.45, 'Bb_2': 0.0014, 'Br_2':0.001, 'beta_2': 1.4, 'alpha_2': 0.75}<br>
</ol>"""
helpBrowser.document().setHtml(text)
helpBrowser.setOpenExternalLinks(True)
helpBrowser.setVisible(self.showHelpBrowser)
policy = helpBrowser.sizePolicy()
policy.setVerticalStretch(20)
helpBrowser.setSizePolicy(policy)
vbox = QtGui.QVBoxLayout()
buttonBox = QtGui.QDialogButtonBox(QtGui.QDialogButtonBox.Help|
QtGui.QDialogButtonBox.Close|
QtGui.QDialogButtonBox.Apply)
buttonBox.button(QtGui.QDialogButtonBox.Apply).setText("Update")
vbox.addWidget(prof_box,0)
vbox.addWidget(buttonBox)
vbox.addWidget(linkLabel)
vbox.addWidget(helpBrowser)
buttonBox.button(QtGui.QDialogButtonBox.Help).clicked.connect(toggle_statusbar)
buttonBox.button(QtGui.QDialogButtonBox.Apply).clicked.connect(do_update)
buttonBox.rejected.connect(self.instr_prof_dialog.close)
self.instr_prof_dialog.setLayout(vbox)
self.instr_prof_dialog.setWindowModality(QtCore.Qt.NonModal)
self.instr_prof_dialog.show()
def refine_wavelengths(self):
def table2list(text):
text = str(text)
text = text.splitlines()
s = ''
for i in range(len(text)):
line = text[i].split()
if len(line) == 2 and sum([self.isFloat(x) for x in line]) == 2:
s += '({}, {}), '.format(line[0], line[1])
else:
if len(line) > 0:
title = 'Error in table'
msg = 'Error in line \'{}\'.\nEach line must have two numbers separated by whitespaces.'.format(text[i])
if self.showErrorBox:
QtGui.QMessageBox.critical(self, title, msg, QtGui.QMessageBox.Ok)
else:
log_.warn(msg, calling = self.calling)
return ''
s = s.strip(' ,')
if s == '':
return 'lambda_shift_table = None'
else:
return 'lambda_shift_table = [{}]'.format(s)
def toggle_table():
self.refine_wave_as_table = not self.refine_wave_as_table
if self.refine_wave_as_table:
text = str(edit_box.toPlainText()).strip()
edit_box.clear()
text = text.replace('lambda_shift_table','')
text = text.strip(' =[]')
text = text.split(')')
for i in range(len(text)-1):
line = text[i].strip(' (,')
line = line.split(',')
line = '{:<7} {}'.format(line[0].strip(),line[1].strip())
edit_box.append(line)
buttonBox.button(QtGui.QDialogButtonBox.RestoreDefaults).setText("Show as list")
else:
text = table2list(edit_box.toPlainText())
if text == '':
self.refine_wave_as_table = True
return
edit_box.clear()
edit_box.setText(text)
buttonBox.button(QtGui.QDialogButtonBox.RestoreDefaults).setText("Show as table")
def do_update():
old_value = self.sp.get_conf('lambda_shift_table')
if self.refine_wave_as_table:
path = table2list(edit_box.toPlainText())
if path == 'error':
return
else:
path = str(edit_box.toPlainText()).strip()
try:
user_module = {}
exec(path) in user_module
value = user_module['lambda_shift_table']
self.sp.set_conf('lambda_shift_table', value)
log_.message('new \'lambda_shit_table\' is ok', calling = self.calling)
except:
title = 'Error'
msg = 'Unable to read \'lambda_shit_table\''
path = None
if self.showErrorBox:
QtGui.QMessageBox.critical(self, title, msg, QtGui.QMessageBox.Ok)
else:
log_.warn(msg, calling = self.calling)
return
self.sp.show_uncor_spec = True
self.sp.init_obs()
if self.sp.read_obs_error:
self.sp.set_conf('lambda_shift_table', old_value)
if self.showErrorBox:
title = 'Error'
msg = self.sp.read_obs_error
QtGui.QMessageBox.critical(self, title, msg, QtGui.QMessageBox.Ok)
else:
log_.warn(msg, calling = self.calling)
else:
self.rerun()
if not self.show_uncor_obs_action.isChecked():
self.sp.show_uncor_spec = False
def toggle_help():
self.showHelpBrowser = not self.showHelpBrowser
helpBrowser.setVisible(self.showHelpBrowser)
if self.showHelpBrowser:
self.refine_wave_dialog.resize(self.refine_wave_dialog_width, 2.5*self.refine_wave_dialog_height)
else:
self.refine_wave_dialog.resize(self.refine_wave_dialog_width, self.refine_wave_dialog_height)
def get_window_size_and_position():
if self.refine_wave_dialog is None:
font = QtGui.QFont("Courier")
width = QtGui.QFontMetrics(font).width('='*80)
height = 15*QtGui.QFontMetrics(font).height()
self.refine_wave_dialog_width = width
self.refine_wave_dialog_height = height
sG = QtGui.QApplication.desktop().screenGeometry()
self.refine_wave_dialog_x = sG.width()-self.refine_wave_dialog_width
self.refine_wave_dialog_y = sG.height()
else:
if not self.showHelpBrowser:
self.refine_wave_dialog_width = self.refine_wave_dialog.width()
self.refine_wave_dialog_height = self.refine_wave_dialog.height()
self.refine_wave_dialog_x = self.refine_wave_dialog.pos().x()
self.refine_wave_dialog_y = self.refine_wave_dialog.pos().y()
self.showHelpBrowser = False
get_window_size_and_position()
self.refine_wave_dialog = QtGui.QDialog()
self.refine_wave_dialog.setWindowFlags(self.refine_wave_dialog.windowFlags() | QtCore.Qt.WindowStaysOnTopHint)
self.refine_wave_dialog.resize(self.refine_wave_dialog_width, self.refine_wave_dialog_height)
self.refine_wave_dialog.move(self.refine_wave_dialog_x,self.refine_wave_dialog_y)
self.refine_wave_dialog.setWindowTitle('wavelength-refining dialog')
edit_box = QtGui.QTextEdit()
edit_box.setFontFamily("Courier")
self.refine_wave_as_table = False
edit_box.setText('lambda_shift_table = ' + str(self.sp.get_conf('lambda_shift_table')))
linkLabel = QtGui.QLabel('<a href="https://github.com/Morisset/pySSN/wiki">More help online</a>')
linkLabel.setOpenExternalLinks(True)
helpBrowser = QtGui.QTextBrowser()
# text=open('wave_refining.html').read()
# This text should go to a file open with text=open('wave-refining').read()
text = """<title> Wavelength-refining help</title>
<p>The wavelength calibration of the observational spectrum can be refined with the use of
the <a href="https://en.wikibooks.org/wiki/Python_Programming/Lists">python list</a> <b>lambda_shift_table</b>.
Each element of this list is an ordered pair of numbers (λ, Δλ), where Δλ is the wavelength shift at the wavelength λ needed to improve the calibration, after the Doppler correction.</p>
<p>The data in <b>lambda_shit_table</b> will be linearly interpolated to provide the corrected wavelengths.
Outside the range of wavelenghts given in <b>lambda_shit_table</b>, the correction will be extrapolated to zero.</p>
<p>To set aside the wavelength-refining, set <b>lambda_shit_table</b> to None.</p>
<p>Examples:</p>
<ol>
<li><p>lambda_shift_table = [(4674, 0.05), (4690, 0.1), (9000, 1)]</p></li>
<li><p>lambda_shift_table = None (to set aside the wavelength-refining)</p></li>
</ol>
<p>Button functions:</p>
<ul>
<li><p>Click on <b><span style="color:red">Show as table</span></b> to display and edit the data contained in <b>lambda_shit_table</b> as a two columns table.</p></li>
<li><p>Click on <b><span style="color:red">Show as list</span></b> to get back the <b>lambda_shit_table</b> list from the two columns table.</p></li>
<li><p>Click on <b><span style="color:red">Update</span></b> to refine the wavelength calibration and redo the synthesis.</p></li>
</ul>
"""
helpBrowser.document().setHtml(text)
helpBrowser.setOpenExternalLinks(True)
helpBrowser.setVisible(self.showHelpBrowser)
policy = helpBrowser.sizePolicy()
policy.setVerticalStretch(20)
helpBrowser.setSizePolicy(policy)
vbox = QtGui.QVBoxLayout()
buttonBox = QtGui.QDialogButtonBox(QtGui.QDialogButtonBox.Help|
QtGui.QDialogButtonBox.RestoreDefaults|
QtGui.QDialogButtonBox.Close|
QtGui.QDialogButtonBox.Apply)
buttonBox.button(QtGui.QDialogButtonBox.Apply).setText("Update")
buttonBox.button(QtGui.QDialogButtonBox.RestoreDefaults).setText("Show as table")
vbox.addWidget(edit_box,0)
vbox.addWidget(buttonBox)
vbox.addWidget(linkLabel)
vbox.addWidget(helpBrowser)
buttonBox.button(QtGui.QDialogButtonBox.Help).clicked.connect(toggle_help)
buttonBox.button(QtGui.QDialogButtonBox.RestoreDefaults).clicked.connect(toggle_table)
buttonBox.button(QtGui.QDialogButtonBox.Apply).clicked.connect(do_update)
buttonBox.rejected.connect(self.refine_wave_dialog.close)
self.refine_wave_dialog.setLayout(vbox)
self.refine_wave_dialog.setWindowModality(QtCore.Qt.NonModal)
self.refine_wave_dialog.show()
def plot_user_cont(self):
self.fig.axes[0].step( [0,0], [0,100], color = color, linestyle = 'solid', label = label, linewidth = 2.5 )
self.fig.axes[0].legend(loc=current_legend_loc, fontsize=self.sp.legend_fontsize)
self.fig.canvas.draw()
def user_cont_table2list(self, text):
text = str(text)
text = text.splitlines()
text = sorted(text)
s = ''
for i in range(len(text)):
line = text[i].split()
if len(line) == 2 and sum([self.isFloat(x) for x in line]) == 2:
s += '({}, {}), '.format(line[0], line[1])
else:
if len(line) > 0:
title = 'Error in table'
msg = 'Error in line \'{}\'.\nEach line must have two numbers separated by whitespaces.'.format(text[i])
if self.showErrorBox:
QtGui.QMessageBox.critical(self, title, msg, QtGui.QMessageBox.Ok)
else:
log_.warn(msg, calling = self.calling)
return ''
s = s.strip(' ,')
if s == '':
s = 'None'
else:
s = '[{}]'.format(s)
return 'cont_user_func = \'{}\'\n\ncont_user_table = {}'.format(self.sp.get_conf('cont_user_func'), s)
def update_user_cont(self):
msg = ''
old_value = self.sp.get_conf('cont_user_table')
old_kind = self.sp.get_conf('cont_user_func')
if self.interpol_cont_as_table:
path = self.user_cont_table2list(self.user_cont_editBox.toPlainText())
if path == 'error':
return
else:
path = str(self.user_cont_editBox.toPlainText()).strip()
try:
user_module = {}
exec(path) in user_module
kind = user_module['cont_user_func']
log_.message('new \'cont_user_func\' is ok', calling = self.calling)
value = user_module['cont_user_table']
log_.message('new \'cont_user_table\' is ok', calling = self.calling)
except:
msg = 'Unable to read \'cont_user_func\' or \'cont_user_table\''
path = None
kinds = {'nearest', 'zero', 'linear', 'slinear', 'quadratic', 'cubic'}
if msg == '':
if kind not in kinds:
msg = 'Invalid function'
if msg != '':
title = 'Error'
msg = 'Problem in user-defined continuum interpolation.\n{}'.format(msg)
if self.showErrorBox:
QtGui.QMessageBox.critical(self, title, msg, QtGui.QMessageBox.Ok)
else:
log_.warn(msg, calling = self.calling)
return
if old_value != value or old_kind != kind:
self.cont_par_changed = True
if value is not None and len(value) == 0:
value = None
self.sp.set_conf('cont_user_table', value)
self.sp.set_conf('cont_user_func', kind)
self.sp.update_user_cont()
self.set_plot_limits_and_draw()
self.sp.plot_conts(self.axes, ['user'])
self.canvas.draw()
else:
self.set_plot_limits_and_draw()
def user_cont_list2table(self, points):
self.user_cont_editBox.clear()
for point in points:
line = '{:<7} {}'.format(str(point[0]).strip(),str(point[1]).strip())
self.user_cont_editBox.append(line)
def user_continuum(self):
def save_initial_plot_pars():
self.init_cont_line_num = self.line_info_box.text()
self.init_cont_ion = self.ion_box.text()
self.init_cont_xmin = self.xlim_min_box.text()
self.init_cont_xmax = self.xlim_max_box.text()
self.init_cont_y1min = self.y1lim_min_box.text()
self.init_cont_y1max = self.y1lim_max_box.text()
self.init_cont_y3min = self.y3lim_min_box.text()
self.init_cont_y3max = self.y3lim_max_box.text()
self.init_cont_legend_fontsize = self.sp.legend_fontsize
self.init_cont_legend_loc = self.sp.legend_loc
self.init_cont_sel_ions_only = self.selected_ions_action.isChecked()
def redo_initial_plot():
self.line_info_box.setText(self.init_cont_line_num)
self.ion_box.setText(self.init_cont_ion)
self.xlim_min_box.setText(self.init_cont_xmin)
self.xlim_max_box.setText(self.init_cont_xmax)
self.y1lim_min_box.setText(self.init_cont_y1min)
self.y1lim_max_box.setText(self.init_cont_y1max)
self.y3lim_min_box.setText(self.init_cont_y3min)
self.y3lim_max_box.setText(self.init_cont_y3max)
self.sp.legend_fontsize = self.init_cont_legend_fontsize
self.sp.legend_loc = self.init_cont_legend_loc
self.selected_ions_action.setChecked(self.init_cont_sel_ions_only)
self.selected_lines_clicked()
self.set_plot_limits_and_draw()
def toggle_table():
self.interpol_cont_as_table = not self.interpol_cont_as_table
if self.interpol_cont_as_table:
text = str(self.user_cont_editBox.toPlainText()).strip()
text = text[text.find('[')+1:text.find(']')]
text = text.replace('\n','')
self.user_cont_editBox.clear()
text = text.split(')')
for i in range(len(text)-1):
line = text[i].strip(' (,')
line = line.split(',')
line = '{:<7} {}'.format(line[0].strip(),line[1].strip())
self.user_cont_editBox.append(line)
buttonBox.button(QtGui.QDialogButtonBox.RestoreDefaults).setText("Show as list")
else:
self.get_user_cont_points = False
buttonBox.button(QtGui.QDialogButtonBox.Retry).setStyleSheet('')
self.del_user_cont_points = False
buttonBox.button(QtGui.QDialogButtonBox.Ignore).setStyleSheet('')
self.on_draw()
text = self.user_cont_table2list(self.user_cont_editBox.toPlainText())
if text == '':
self.interpol_cont_as_table = True
return
self.user_cont_editBox.clear()
self.user_cont_editBox.setText(text)
buttonBox.button(QtGui.QDialogButtonBox.RestoreDefaults).setText("Show as table")
def toggle_help():
self.showHelpBrowser = not self.showHelpBrowser
helpBrowser.setVisible(self.showHelpBrowser)
if self.showHelpBrowser:
self.interpol_cont_dialog.resize(self.interpol_cont_dialog_width, 2.5*self.interpol_cont_dialog_height)
else:
self.interpol_cont_dialog.resize(self.interpol_cont_dialog_width, self.interpol_cont_dialog_height)
def get_window_size_and_position():
if self.interpol_cont_dialog is None:
font = QtGui.QFont("Courier")
width = QtGui.QFontMetrics(font).width('='*80)
height = 15*QtGui.QFontMetrics(font).height()
self.interpol_cont_dialog_width = width
self.interpol_cont_dialog_height = height
sG = QtGui.QApplication.desktop().screenGeometry()
self.interpol_cont_dialog_x = sG.width()-self.interpol_cont_dialog_width
self.interpol_cont_dialog_y = sG.height()
else:
if not self.showHelpBrowser:
self.interpol_cont_dialog_width = self.interpol_cont_dialog.width()
self.interpol_cont_dialog_height = self.interpol_cont_dialog.height()
self.interpol_cont_dialog_x = self.interpol_cont_dialog.pos().x()
self.interpol_cont_dialog_y = self.interpol_cont_dialog.pos().y()
def get_points():
self.get_user_cont_points = not self.get_user_cont_points
self.del_user_cont_points = False
buttonBox.button(QtGui.QDialogButtonBox.Ignore).setStyleSheet('')
if self.get_user_cont_points:
buttonBox.button(QtGui.QDialogButtonBox.Retry).setStyleSheet('background-color:red;')
self.set_plot_limits_and_draw()
self.sp.plot_conts(self.axes, ['user'])
self.canvas.draw()
if self.interpol_cont_as_table == False:
toggle_table()
else:
buttonBox.button(QtGui.QDialogButtonBox.Retry).setStyleSheet('')
def del_points():
self.del_user_cont_points = not self.del_user_cont_points
self.get_user_cont_points = False
buttonBox.button(QtGui.QDialogButtonBox.Retry).setStyleSheet('')
if self.del_user_cont_points:
buttonBox.button(QtGui.QDialogButtonBox.Ignore).setStyleSheet('background-color:red;')
self.set_plot_limits_and_draw()
self.sp.plot_conts(self.axes, ['user'])
self.canvas.draw()
if self.interpol_cont_as_table == False:
toggle_table()
else:
buttonBox.button(QtGui.QDialogButtonBox.Ignore).setStyleSheet('')
def on_close():
redo_initial_plot()
self.interpol_cont_dialog.close()
def do_update():
self.get_user_cont_points = False
self.del_user_cont_points = False
buttonBox.button(QtGui.QDialogButtonBox.Retry).setStyleSheet('')
buttonBox.button(QtGui.QDialogButtonBox.Ignore).setStyleSheet('')
self.update_user_cont()
self.showHelpBrowser = False
get_window_size_and_position()
save_initial_plot_pars()
self.ion_box.setText('')
self.selected_ions_action.setChecked(True)
self.selected_lines_clicked()
self.set_plot_limits_and_draw()
self.interpol_cont_dialog = QtGui.QDialog()
self.interpol_cont_dialog.setWindowFlags(self.interpol_cont_dialog.windowFlags() | QtCore.Qt.WindowStaysOnTopHint)
#self.interpol_cont_dialog.setWindowFlags(QtCore.Qt.WindowMinimizeButtonHint | QtCore.Qt.WindowMaximizeButtonHint | QtCore.Qt.WindowStaysOnTopHint)
self.interpol_cont_dialog.resize(self.interpol_cont_dialog_width, self.interpol_cont_dialog_height)
self.interpol_cont_dialog.move(self.interpol_cont_dialog_x,self.interpol_cont_dialog_y)
self.interpol_cont_dialog.setWindowTitle('user-defined continuum dialog')
self.user_cont_editBox = QtGui.QTextEdit()
self.user_cont_editBox.setFontFamily("Courier")
self.interpol_cont_as_table = False
self.get_user_cont_points = False
self.del_user_cont_points = False
text = 'cont_user_func = \'{}\'\n\ncont_user_table = {}'.format(str(self.sp.get_conf('cont_user_func')), self.sp.get_conf('cont_user_table'))
self.user_cont_editBox.setText(text)
linkLabel = QtGui.QLabel('<a href="https://github.com/Morisset/pySSN/wiki">More help online</a>')
linkLabel.setOpenExternalLinks(True)
helpBrowser = QtGui.QTextBrowser()
# text=open('user_continuum.html').read()
# This text should go to a file open with text=open('user_continuum').read()
text = """<title> User-defined continuum help</title>
<p>A user-defined continuum can be added to the continuum calculated from other sources (electron recombination, free-free transition, two-photom, black-body and
power-law emission). It is obtained by the interpolation of the data contained in the
<a href="https://en.wikibooks.org/wiki/Python_Programming/Lists">python list</a> <b>cont_user_table</b>. Each element of this list is an ordered pair of numbers
(λ, <i>f</i>), where <i>f</i> is the additional continuum flux at the wavelength λ.</p>
<p>The parameter <b>cont_user_func</b> defines the kind of the interpolation. Possible values are 'linear', 'quadratic', 'cubic', corresponding to linear
interpolation, second and third order spline interpolation, respectively. Outside the range of wavelenghts given in <b>cont_user_table</b>, the user continuum
component will be extrapolated to zero.</p>
<p>There are three modes of editing the interpolation control points: editing the list <b>cont_user_table</b> directly or as a two columns table, or clicking
with the mouse on the figure at the intended level of total continuum (see Button functions below). To set aside the user-defined continuum, set
<b>cont_user_table</b> to None.</p>
<p>Examples:</p>
<ol>
<li><p>cont_user_func = 'linear'<br>
cont_user_table = [(4674, 0.05), (4690, 0.1), (9000, 1)]
</p></li>
<li><p>cont_user_table = None (to set aside the user-defined continuum)</p></li>
</ol>
<p>Button functions:</p>
<ul>
<li><p>Click on <b><span style="color:red">Show as table</span></b> to display and edit the data contained in <b>cont_user_table</b> as a two columns table.</p></li>
<li><p>Click on <b><span style="color:red">Show as list</span></b> to get back the <b>cont_user_table</b> list from the two columns table.</p></li>
<li><p>Click on <b><span style="color:red">Add points</span></b> to activate/deactivate the mode that allows to add new controls points by mouse-clicking on the
figure. Each time a new control point is included, the interpolation is automatically updated.</p></li>
<li><p>Click on <b><span style="color:red">Del points</span></b> to activate/deactivate the mode that allows to click on the figure to delete the nearest
(in wavelength) control point. Each time a control point is deleted, the interpolation is automatically updated</p></li>
<li><p>Click on <b><span style="color:red">Update</span></b> to incorporate the changes in the user-defined continuum.</p></li>
<li><p>Click on <b><span style="color:red">Close</span></b> to close the dialog and return to the preceding plot setting.</p></li>
</ul>
"""
helpBrowser.document().setHtml(text)
helpBrowser.setOpenExternalLinks(True)
helpBrowser.setVisible(self.showHelpBrowser)
policy = helpBrowser.sizePolicy()
policy.setVerticalStretch(20)
helpBrowser.setSizePolicy(policy)
vbox = QtGui.QVBoxLayout()
buttonBox = QtGui.QDialogButtonBox(QtGui.QDialogButtonBox.Help|
QtGui.QDialogButtonBox.RestoreDefaults|
QtGui.QDialogButtonBox.Retry|
QtGui.QDialogButtonBox.Ignore|
QtGui.QDialogButtonBox.Close|
QtGui.QDialogButtonBox.Apply)
buttonBox.button(QtGui.QDialogButtonBox.Apply).setText("Update")
buttonBox.button(QtGui.QDialogButtonBox.RestoreDefaults).setText("Show as table")
buttonBox.button(QtGui.QDialogButtonBox.Retry).setText("Add points")
buttonBox.button(QtGui.QDialogButtonBox.Ignore).setText("Del points")
vbox.addWidget(self.user_cont_editBox,0)
vbox.addWidget(buttonBox)
vbox.addWidget(linkLabel)
vbox.addWidget(helpBrowser)
buttonBox.button(QtGui.QDialogButtonBox.Help).clicked.connect(toggle_help)
buttonBox.button(QtGui.QDialogButtonBox.RestoreDefaults).clicked.connect(toggle_table)
buttonBox.button(QtGui.QDialogButtonBox.Apply).clicked.connect(do_update)
buttonBox.button(QtGui.QDialogButtonBox.Retry).clicked.connect(get_points)
buttonBox.button(QtGui.QDialogButtonBox.Ignore).clicked.connect(del_points)
buttonBox.rejected.connect(on_close)
#self.interpol_cont_dialog.onCloseEvet(on_close)
self.interpol_cont_dialog.setLayout(vbox)
self.interpol_cont_dialog.setWindowModality(QtCore.Qt.NonModal)
self.interpol_cont_dialog.show()
def isValidFilename(self, filename):
if filename is None:
return False
try:
open(filename,'r')
return True
except IOError:
try:
open(filename, 'w')
return True
except IOError:
return False
def set_cosmetic_file(self):
file_choices = "Line cosmetic files (*cosm*.dat) (*cosm*.dat);;Data files (*.dat) (*.dat);;All files (*) (*)"
title = 'Set the line cosmetic file'
cosmetic_file = str(QtGui.QFileDialog.getSaveFileName(self, title, '', file_choices, options=QtGui.QFileDialog.DontConfirmOverwrite))
msg = "Line cosmetic file '{}' not valid!".format(cosmetic_file)
if cosmetic_file and not self.isValidFilename(cosmetic_file):
QtGui.QMessageBox.critical(self, 'pySSN', msg, QtGui.QMessageBox.Ok )
cosmetic_file = None
if cosmetic_file:
self.sp.set_conf('do_cosmetik', True)
dir_ = os.path.dirname(cosmetic_file)
if dir_ == os.getcwd():
cosmetic_file = cosmetic_file.split('/')[-1]
self.sp.set_conf('fic_cosmetik', cosmetic_file)
self.sp.fic_cosmetik = cosmetic_file
if self.sp is not None:
self.set_status_text()
if self.axes is not None:
self.adjust()
def empty_cosmetic_file(self):
if self.sp.fic_cosmetik is None or self.sp.phyat_file is None:
return
title = 'pySSN: cosmetic file'
msg = 'All lines in the cosmetic file will be removed.\nConfirm?'
ret = QtGui.QMessageBox.question(self, title, msg, QtGui.QMessageBox.Ok, QtGui.QMessageBox.Cancel )
if ret == QtGui.QMessageBox.Ok:
f = open(self.sp.fic_cosmetik, 'w')
f.close()
def order_lines(self, lines):
if lines is None:
return None
numbers = []
for line in lines:
line_num = int(self.sp.fieldStrFromLine(line,'num'))
numbers.append(line_num)
lines = [x for _,x in sorted(zip(numbers, lines))]
return lines
def remove_duplicate_lines(self, lines):
if lines is None:
return None
numbers = []
output = []
for line in lines:
line_num = int(self.sp.fieldStrFromLine(line,'num'))
if line_num not in numbers:
numbers.append(line_num)
output.append(line)
return output
def order_cosmetic_file(self):
if self.sp.fic_cosmetik is None or not os.path.isfile(self.sp.fic_cosmetik):
return
f = open(self.sp.fic_cosmetik, 'r')
cosmetic_lines = f.readlines()
f.close()
cosmetic_lines = self.order_lines(cosmetic_lines)
n0 = len(cosmetic_lines)
cosmetic_lines = self.remove_duplicate_lines(cosmetic_lines)
n1 = len(cosmetic_lines)
f = open(self.sp.fic_cosmetik, 'w')
f.writelines(cosmetic_lines)
f.close()
if n0 > n1:
s = ' and the duplicate lines removed'
else:
s = ''
msg = 'The cosmetic \'{0:}\' file was ordered{1:}.'.format(self.sp.fic_cosmetik, s)
self.statusBar().showMessage(msg, 4000)
def clean_cosmetic_file(self):
def ShowCleanMessage(UnchangedLineList):
nUL = len(UnchangedLineList)
if nUL == 1:
s1 = ''
s2 = 'was'
s3 = 'this line'
elif nUL > 1:
s1 = 's'
s2 = 'were'
s3 = 'these lines'
msgBox = QtGui.QMessageBox()
msgBox.setIcon(QtGui.QMessageBox.Question)
msgBox.title = 'pySSN: cosmetic file'
msg = '{0:} unchanged line{1:} in the cosmetic file {2:} found.'.format(nUL, s1, s2)
msgBox.setText(msg)
msgBox.setInformativeText('Do you want to delete {:}?\n'.format(s3))
detailedText = 'Unchanged line{:}:\n\n'.format(s1)
for i in UnchangedLineList:
detailedText = detailedText + str(i) + '\n'
msgBox.setDetailedText(detailedText)
DelButton = msgBox.addButton(self.tr("Delete"), QtGui.QMessageBox.ActionRole)
s = 'Delete from the cosmetic file all unchanged lines'
if self.enable_tooltips_action.isChecked():
DelButton.setToolTip(s)
msgBox.addButton(QtGui.QMessageBox.Cancel)
answer = msgBox.exec_()
if msgBox.clickedButton() == DelButton:
answer = True
else:
answer = False
return answer
if self.sp.fic_cosmetik is None or self.sp.phyat_file is None:
return
#if not self.sp.get_conf('clean_cosmetic_file'):
# return
if not os.path.isfile(self.sp.fic_cosmetik):
return
f = open(self.sp.fic_cosmetik, 'r')
cosmetic_lines = f.readlines()
f.close()
UnchangedLineList = []
ChangedLines = []
for i in range(len(cosmetic_lines)):
line_c = cosmetic_lines[i].rstrip()
line_num = int(self.sp.fieldStrFromLine(line_c,'num'))
if self.sp.cosmetic_line_unchanged(line_c):
UnchangedLineList.append(line_num)
else:
ChangedLines.append(line_c + '\n')
if len(UnchangedLineList) > 0:
ret = ShowCleanMessage(UnchangedLineList)
if ret == True:
f = open(self.sp.fic_cosmetik, 'w')
f.writelines(ChangedLines)
f.close()
else:
msg = 'No unchanged line in the cosmetic file {:}'.format(self.sp.fic_cosmetik)
self.statusBar().showMessage(msg, 4000)
def match_cosmetic_phyat_files(self):
def ShowErrorMessage():
msg = 'The wavelength or intensity in the cosmetic file does not match that in the atomic database.\n\n' \
'Do you want to try to automatically correct the cosmetic file?'
msgBox = QtGui.QMessageBox()
msgBox.setText("Error in cosmetic file for line: " + str(line_num))
msgBox.setInformativeText(msg)
msgBox.addButton(QtGui.QMessageBox.Yes)
msgBox.addButton(QtGui.QMessageBox.YesToAll)
msgBox.addButton(QtGui.QMessageBox.No)
msgBox.addButton(QtGui.QMessageBox.NoToAll)
msgBox.setDefaultButton(QtGui.QMessageBox.Yes)
answer = msgBox.exec_()
return answer
def ShowFinalMessage(nErr, nCor, nUnCor, nNfd, UnCorList, NotFound):
msgBox = QtGui.QMessageBox()
msgBox.setText('pySSN: error in cosmetic file')
if nCor > 0:
s0 = 'Rerun the synthesis to take into account the changes.\n\n'
else:
s0 = ''
if nUnCor > 0:
s1 = 'The cosmetic data for lines that still have problems will be ignored. ' \
'Do you want to delete them from the cosmetic file?'
else:
s1 = ''
msg = 'Number of lines with problems: {0:}\n' \
'Number of corrected lines: {1:}\n' \
'Number of uncorrected lines: {2:}\n' \
'Number of lines not found in the atomic database: {3:}\n\n' \
'{4:}{5:}'.format(nErr, nCor, nUnCor, nNfd, s0, s1)
msgBox.setInformativeText(msg)
if nNfd > 0:
detailedText = 'Lines not found:\n\n'
for i in NotFound:
detailedText = detailedText + i + '\n'
detailedText = detailedText + '\n'
else:
detailedText = ''
if nUnCor > 0:
detailedText = detailedText + 'Lines not corrected:\n\n'
for i in UnCorList:
detailedText = detailedText + i + '\n'
msgBox.setDetailedText(detailedText)
DelAllButton = msgBox.addButton(self.tr("Delete all"), QtGui.QMessageBox.ActionRole)
DelNotFndButton = msgBox.addButton(self.tr("delete not found"), QtGui.QMessageBox.ActionRole)
DelUncorButton = msgBox.addButton(self.tr("delete uncorrected"), QtGui.QMessageBox.ActionRole)
if self.enable_tooltips_action.isChecked():
s = 'Delete from the cosmetic file all lines that still have problems'
DelAllButton.setToolTip(s)
s = 'Delete from the cosmetic file the lines not found in the atomic database'
DelNotFndButton.setToolTip(s)
s = 'Delete from the cosmetic file the uncorrected lines'
DelUncorButton.setToolTip(s)
msgBox.addButton(QtGui.QMessageBox.Cancel)
msgBox.setMaximumHeight(16777215)
msgBox.setMinimumHeight(800)
# It does not expand! Why?
msgBox.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
msgBox.setSizeGripEnabled(True)
if nUnCor == 0:
DelUncorButton.setEnabled(False)
DelAllButton.setEnabled(False)
if nNfd == 0:
DelNotFndButton.setEnabled(False)
DelAllButton.setEnabled(False)
answer = msgBox.exec_()
if msgBox.clickedButton() == DelAllButton:
answer = ['DelNotFnd', 'DelUncor']
elif msgBox.clickedButton() == DelNotFndButton:
answer = ['DelNotFnd']
elif msgBox.clickedButton() == DelUncorButton:
answer = ['DelUncor']
else:
answer = []
return answer
if self.sp.fic_cosmetik is None:
return
if os.path.isfile(self.sp.fic_cosmetik):
cosmetik_arr, errorMsg = self.sp.read_cosmetik()
if len(errorMsg) > 0:
self.sp.do_cosmetik = False
self.sp.set_conf('do_cosmetik', False)
title = 'Error in cosmetic file: '
msg = 'Unable to read cosmetic data from file \'{}\':{}\n\nLine cosmetics will be disabled!'.format(self.sp.get_conf('fic_cosmetik'), errorMsg)
if self.showErrorBox:
QtGui.QMessageBox.critical(self, title, msg, QtGui.QMessageBox.Ok )
else:
log_.warn('{}: {}'.format(title, msg), calling=self.calling)
return
ret = None
f = open(self.sp.fic_cosmetik, 'r')
cosmetic_lines = f.readlines()
f.close()
ErrorList = []
CorrectedList = []
UnCorList = []
NotFound =[]
k = self.sp.field_pos['id']
keys = [ 'lambda', 'l_shift', 'i_rel', 'i_cor' ]
for i in range(len(cosmetic_lines)):
line_c = cosmetic_lines[i].rstrip()
line_num = int(self.sp.fieldStrFromLine(line_c,'num'))
cosmeticLineOk = self.sp.cosmetic_line_ok(line_c)
if cosmeticLineOk == None:
NotFound.append(line_c[:k])
ErrorList.append(line_c[:k])
elif cosmeticLineOk == False:
ErrorList.append(line_c[:k])
if ret != QtGui.QMessageBox.YesToAll and ret != QtGui.QMessageBox.NoToAll:
ret = ShowErrorMessage()
if ret == QtGui.QMessageBox.Yes or ret == QtGui.QMessageBox.YesToAll:
CorrectedList.append(line_c[:k])
line = self.sp.read_line(self.sp.phyat_file, line_num)
line = line.rstrip()
v0 = {i: np.float(self.sp.fieldStrFromLine(line, i)) for i in keys}
v1 = {i: np.float(self.sp.fieldStrFromLine(line_c, i)) for i in keys}
l_shift = v1['lambda'] + v1['l_shift'] - v0['lambda']
i_cor = v1['i_cor'] * v1['i_rel'] / v0['i_rel']
l_shift_str = self.rightFormat(str(l_shift), 'l_shift')
i_cor_str = self.rightFormat(str(i_cor), 'i_cor')
line = self.sp.replace_field(line, 'l_shift', l_shift_str)
line = self.sp.replace_field(line, 'i_cor', i_cor_str)
log_.warn('(corrected) ' + line + '\n', calling=self.calling)
self.sp.replace_line(self.sp.fic_cosmetik, line)
else:
UnCorList.append(line_c[:k])
log_.warn('Not corrected.\n', calling=self.calling)
nErr = len(ErrorList)
nCor = len(CorrectedList)
nUnCor = len(UnCorList)
nNfd = len(NotFound)
if nErr > 0:
answer = ShowFinalMessage(nErr, nCor, nUnCor, nNfd, UnCorList, NotFound)
if 'DelNotFnd' in answer:
for i in NotFound:
self.sp.remove_line(self.sp.fic_cosmetik, int(i))
if 'DelUncor' in answer:
for i in UnCorList:
self.sp.remove_line(self.sp.fic_cosmetik, int(i))
def set_status_text(self):
if self.sp is None:
return
if self.sp.phyat_file == 'NO_phyat.dat':
self.status_text.setText('pySSN, v {}. init file: {}, No synthesis'.format(__version__,
self.sp.config_file.split('/')[-1]))
elif self.sp.get_conf('do_cosmetik'):
self.status_text.setText('pySSN, v {}. init file: {}, at. data: {}, model: {}, cosmetic: {}'.format(__version__,
self.sp.config_file.split('/')[-1],
self.sp.phyat_file.split('/')[-1],
self.sp.get_conf('fic_modele').split('/')[-1],
self.sp.get_conf('fic_cosmetik').split('/')[-1]))
else:
self.status_text.setText('pySSN, v {}. init file: {}, at. data: {}, model: {}, No cosmetic'.format(__version__,
self.sp.config_file.split('/')[-1],
self.sp.phyat_file.split('/')[-1],
self.sp.get_conf('fic_modele').split('/')[-1]))
def test_init_file(self):
if self.sp == None:
self.showErrorBox = False
self.showErrorBox = True
invalidCommands = []
if os.path.isfile(self.init_file_name):
f = open(self.init_file_name, 'r')
lines = f.readlines()
f.close()
else:
invalidCommands.append('\nFile not found')
lines = []
triple_quoted_string_found = False
newlines = []
rows = []
for i in range(len(lines)):
line = lines[i].split('#')[0].rstrip()
k = line.find('=')
if not (line.strip().startswith('#') or len(line.strip()) == 0):
if '"""' in line:
triple_quoted_string_found = not triple_quoted_string_found
if triple_quoted_string_found:
newlines.append(line.split('#')[0].rstrip())
rows.append(i+1)
else:
s = line.split('#')[0].rstrip()
if len(s.strip()) > 0:
newlines[-1] += '\n' + s
else:
if len(line) == len(line.lstrip()) and not triple_quoted_string_found:
newlines.append(line.split('#')[0].rstrip())
rows.append(i+1)
else:
s = line.split('#')[0].rstrip()
if len(s.strip()) > 0:
newlines[-1] += '\n' + s
for i in range(len(newlines)):
line = newlines[i]
line_list = line.split('\n')
if len(line_list) > 3:
line_str = line_list[0] + '\n' + line_list[1] + '\n' + line_list[2] + '\n...'
else:
line_str = line
try:
exec(line)
except IndentationError:
invalidCommands.append('\nIndentation error, line {}:\n{}'.format(rows[i],line_str))
except SyntaxError:
if '"""' in line and triple_quoted_string_found:
invalidCommands.append('\nUnclosed triple-quotation mark, line {}:\n{}'.format(rows[i],line_str))
else:
invalidCommands.append('\nInvalid syntax, line {}:\n{}'.format(rows[i],line_str))
except(AttributeError, NameError):
invalidCommands.append('\nUndefined variable name or attribute, line {}:\n{}'.format(rows[i],line_str))
except:
invalidCommands.append('\nUndefined error, line {}:\n{}'.format(rows[i],line_str))
if len(invalidCommands) > 0:
title = 'Fatal error'
msg = 'Error in the initialization file \'{0}\': '.format(self.init_file_name)
for line in invalidCommands:
msg = msg + '\n' + line
if self.showErrorBox:
if self.sp == None:
buttom = QtGui.QMessageBox.Abort
else:
buttom = QtGui.QMessageBox.Cancel
QtGui.QMessageBox.critical(self, title, msg, buttom)
else:
log_.warn('{}: {}'.format(title, msg), calling=self.calling)
return False
return True
def start_spectrum(self):
init_file = self.init_file_name.split('/')[-1]
dir_ = self.init_file_name.split(init_file)[0]
if dir_ == '':
dir_ = './'
self.directory = dir_
if not self.test_init_file():
if self.sp == None:
sys.exit()
else:
return
self.sp = spectrum(config_file=self.init_file_name)
if self.sp.errorMsg:
if self.showErrorBox:
msg = 'Synthesis not possible. \n\n{}'.format(self.sp.errorMsg)
msg = self.sp.errorMsg
ret = QtGui.QMessageBox.critical(self, 'Critical Error', msg, QtGui.QMessageBox.Abort, QtGui.QMessageBox.Ignore)
if ret == QtGui.QMessageBox.Abort:
sys.exit()
self.sp.errorMsg = ''
if len(self.sp.read_obs_error) > 0:
title = 'Error reading observations'
msg = self.sp.read_obs_error
if self.showErrorBox:
QtGui.QMessageBox.critical(self, title, msg, QtGui.QMessageBox.Ok )
else:
log_.warn('{}: {}'.format(title, msg), calling=self.calling)
if ( self.sp.get_conf('fic_cosmetik') is None or
self.sp.get_conf('fic_cosmetik') == '' ):
self.sp.set_conf('do_cosmetik', False)
if self.sp.get_conf('do_synth') and self.sp.get_conf('do_cosmetik'):
self.match_cosmetic_phyat_files()
if self.sp.get_conf('clean_cosmetic_file'):
self.clean_cosmetic_file()
if self.sp.get_conf('order_cosmetic_file'):
self.order_cosmetic_file()
self.set_status_text()
self.axes = None
self.sp.ax2_fontsize = 6
self.sp_norm_box.setText('{}'.format(self.sp.get_conf('sp_norm')))
self.obj_velo_box.setText('{}'.format(self.sp.get_conf('obj_velo')))
self.ebv_box.setText('{}'.format(self.sp.get_conf('e_bv', 0)))
self.resol_box.setText('{}'.format(self.sp.get_conf('resol')))
self.cut2_box.setText('{}'.format(self.sp.get_conf('cut_plot2')))
self.magenta_box.setText('{}'.format(self.sp.plot_magenta))
self.magenta_label_box.setText('{}'.format(self.sp.label_magenta))
self.cyan_box.setText('{}'.format(self.sp.plot_cyan))
self.cyan_label_box.setText('{}'.format(self.sp.label_cyan))
self.sp_min_box.setText('{}'.format(self.sp.get_conf('limit_sp')[0]))
self.sp_max_box.setText('{}'.format(self.sp.get_conf('limit_sp')[1]))
self.init_axes()
self.xlim_min_box.setText('{}'.format(self.x_plot_lims[0]))
self.xlim_max_box.setText('{}'.format(self.x_plot_lims[1]))
self.y1lim_min_box.setText('{}'.format(self.y1_plot_lims[0]))
self.y1lim_max_box.setText('{}'.format(self.y1_plot_lims[1]))
self.y3lim_min_box.setText('{}'.format(self.y3_plot_lims[0]))
self.y3lim_max_box.setText('{}'.format(self.y3_plot_lims[1]))
self.verbosity_ag.actions()[self.sp.get_conf('log_level', 0)].setChecked(True)
self.line_tick_ax_ag.actions()[self.sp.get_conf('line_tick_ax', 0)].setChecked(True)
self.line_tick_pos_ag.actions()[self.sp.get_conf('line_tick_pos', 0)].setChecked(True)
self.residual_GroupBox.setChecked(self.sp.get_conf('qt_plot_residuals', True))
self.selected_ions_action.setChecked(self.sp.get_conf('show_selected_ions_only', False))
self.ion_cb.setChecked(self.sp.get_conf('show_selected_ions_only', False))
self.selected_intensities_action.setChecked(self.sp.get_conf('show_selected_intensities_only', False))
self.cut_cb.setChecked(self.sp.get_conf('show_selected_intensities_only', False))
self.diff_lines_ag.actions()[self.sp.get_conf('diff_lines_by', 0)].setChecked(True)
self.line_tick_ax_ag.actions()[self.sp.get_conf('line_tick_ax', 0)].setChecked(True)
self.editing_lines_action.setChecked(self.sp.get_conf('qt_allow_editing_lines', False))
self.update_lines_action.setChecked(self.sp.get_conf('qt_update_after_editing_lines', False))
self.plot_cont_action.setChecked(self.sp.get_conf('cont_plot', False))
self.show_line_ticks_action.setChecked(self.sp.get_conf('show_line_ticks', False))
self.plot_lines_action.setChecked(self.sp.get_conf('plot_lines_of_selected_ions', False))
self.lineIDs_GroupBox.setChecked(self.sp.get_conf('show_line_ticks', False) or self.sp.get_conf('plot_lines_of_selected_ions', False))
try:
selected_ions = self.sp.get_conf('selected_ions')
s = ''
for ion in selected_ions:
s = s + ion + ', '
if not s == '':
s = s[:-2]
self.ion_box.setText(s)
self.set_ion()
except:
self.ion_box.setText('')
self.line_sort_ag.actions()[self.sp.get_conf('save_lines_sort', 0)].setChecked(True)
self.show_header_action.setChecked(self.sp.get_conf('save_lines_header', False))
self.get_line_fields_to_print()
self.readOnlyCells_bg_color = QtGui.QColor('white')
self.editableCells_bg_color = QtGui.QColor('lightgreen')
if 'linux' in sys.platform and 'Plastique' in self.style_list:
default_style = 'Plastique'
elif 'darwin' in sys.platform and 'Macintosh (aqua)' in self.style_list:
default_style = 'Macintosh (aqua)'
else:
default_style = self.style_list[0]
if self.sp.get_conf('qt_style') not in self.style_list:
if 'QT_STYLE' in os.environ:
if os.environ['QT_STYLE'] in self.style_list:
self.sp.set_conf('qt_style', os.environ['QT_STYLE'])
else:
log_.warn('Unknown Qt style {}, using {}'.format(os.environ['QT_STYLE'], default_style))
self.sp.set_conf('qt_style', default_style)
else:
self.sp.set_conf('qt_style', default_style)
index_style = self.style_list.index(self.sp.get_conf('qt_style'))
self.style_ag.actions()[index_style].setChecked(True)
QtGui.qApp.setStyle(self.sp.get_conf('qt_style'))
self.enable_tooltips_action.setChecked(self.sp.get_conf('qt_enable_tooltips', True))
self.enable_tooltips_action_clicked()
self.adjust_fig_action.setChecked(self.sp.get_conf('fig_adjust', True))
def sp_norm(self):
if self.sp is None:
return
if not self.validate_sp_norm():
return
old_sp_norm = self.sp.get_conf('sp_norm')
new_sp_norm = np.float(self.sp_norm_box.text())
if old_sp_norm == new_sp_norm:
return
log_.message('Changing sp_norm. Old: {}, New: {}'.format(old_sp_norm, new_sp_norm), calling=self.calling)
self.statusBar().showMessage('Changing intensity scale of the observed spectrum ...')
QtGui.QApplication.processEvents()
self.sp.renorm(new_sp_norm)
self.on_draw()
def obj_velo(self):
if self.sp is None:
return
if not self.validate_obj_velo():
return
old_obj_velo = self.sp.get_conf('obj_velo')
new_obj_velo = np.float(self.obj_velo_box.text())
if old_obj_velo == new_obj_velo:
return
self.sp.iterpolate_velocity = False
self.sp.set_conf('obj_velo', new_obj_velo)
log_.message('Changing obj_velo. Old: {}, New: {}'.format(old_obj_velo, new_obj_velo), calling=self.calling)
self.statusBar().showMessage('Executing doppler correction of the observed spectrum ...')
QtGui.QApplication.processEvents()
self.sp.init_obs(obj_velo=new_obj_velo)
self.sp.init_red_corr()
self.sp.make_continuum()
self.sp.run(do_synth = self.sp.do_synth, do_read_liste = True, do_profiles=False)
self.on_draw()
def ebv(self):
if self.sp is None:
return
if not self.validate_ebv():
return
old_ebv = self.sp.get_conf('e_bv')
new_ebv = np.float(self.ebv_box.text())
if old_ebv == new_ebv and not self.cont_par_changed:
return
log_.message('Changing E B-V. Old: {}, New: {}'.format(old_ebv, new_ebv), calling=self.calling)
self.statusBar().showMessage('Changing color excess E(B-V) ...', 4000)
self.statusBar().showMessage('Executing reddening correction of the synthetic spectrum ...')
QtGui.QApplication.processEvents()
self.sp.set_conf('e_bv', new_ebv)
self.sp.init_red_corr()
self.sp.make_continuum()
self.sp.run(do_synth = self.sp.do_synth, do_read_liste = False, do_profiles=False)
self.on_draw()
self.cont_par_changed = False
def rerun(self):
if not self.validate_synthesis_parameters():
return
if ( self.x_plot_lims[0] < np.float(self.sp_min_box.text()) or
self.x_plot_lims[1] > np.float(self.sp_max_box.text()) ):
self.xlim_min_box.setText(self.sp_min_box.text())
self.xlim_max_box.setText(self.sp_max_box.text())
self.statusBar().showMessage('Rerunning synthesis ...')
QtGui.QApplication.processEvents()
self.sp.set_conf('limit_sp', (np.float(self.sp_min_box.text()), np.float(self.sp_max_box.text())))
self.sp.set_conf('resol', np.int(self.resol_box.text()))
self.sp.set_conf('obj_velo', np.float(self.obj_velo_box.text()))
self.sp.set_conf('sp_norm', np.float(self.sp_norm_box.text()))
self.sp.set_conf('e_bv', np.float(self.ebv_box.text()))
self.sp.init_obs()
self.sp.init_red_corr()
self.sp.make_continuum()
self.sp.run()
self.set_plot_limits_and_draw()
def adjust(self):
if self.sp is None:
return
self.sp.errorMsg = ''
self.statusBar().showMessage('Running update ...')
QtGui.QApplication.processEvents()
self.sp_norm()
self.obj_velo()
self.ebv()
if self.sp.errorMsg:
if self.showErrorBox:
msg = self.sp.errorMsg
QtGui.QMessageBox.warning(self, 'Update error', msg, QtGui.QMessageBox.Ok)
return 0
ndiff, errorMsg = self.sp.adjust()
if ndiff == -1:
self.sp.do_cosmetik = False
self.sp.set_conf('do_cosmetik', False)
self.sp.fic_cosmetik
self.set_status_text()
title = 'Error in cosmetic file'
msg = 'Unable to read from file \'{}\'\nChanging to \'no cosmetic\':\n{}'.format(self.sp.get_conf('fic_cosmetik'), errorMsg)
if self.showErrorBox:
QtGui.QMessageBox.critical(self, title, msg, QtGui.QMessageBox.Ok )
else:
log_.warn('{}: {}'.format(title, msg), calling=self.calling)
if ndiff > 0:
self.on_draw()
self.statusBar().showMessage('Update finished.', 4000)
return ndiff
def apply_post_proc(self):
path = str(self.post_proc_file or '')
file_choices = "Python files (*.py) (*.py);;All files (*) (*)"
title = 'Open post-process file'
path = unicode(QtGui.QFileDialog.getOpenFileName(self, title, path, file_choices))
path = path.split('/')[-1]
if not path:
return
try:
user_module = {}
execfile(path, user_module)
self.post_proc = user_module['post_proc']
self.post_proc_file = path
log_.message('function post_proc read from {}'.format(self.post_proc_file))
except:
self.post_proc = None
title = 'Error reading post-process file'
msg = 'Unable to read post-process file \'{}\''.format(path)
if self.showErrorBox:
QtGui.QMessageBox.critical(self, title, msg, QtGui.QMessageBox.Ok)
else:
log_.warn(msg, calling = self.calling)
return
try:
self.post_proc(self.fig)
self.canvas.draw()
except:
title = 'Error executing post-process'
msg = 'Error in post-process file \'{}\''.format(self.post_proc_file)
if self.showErrorBox:
QtGui.QMessageBox.critical(self, title, msg, QtGui.QMessageBox.Ok)
else:
log_.warn(msg, calling = self.calling)
def update_profile(self):
if self.sp is None:
return
self.sp.run(do_synth = True, do_read_liste = False, do_profiles=True)
self.on_draw()
def cut2(self):
if self.sp is None:
return
if not self.validate_cut():
return
self.selected_intensities_action.setChecked(True)
self.sp.set_conf('show_selected_intensities_only', True)
self.cut_cb.setChecked(True)
self.draw_ion()
def get_ion_str(self,s):
s = s.strip()
s = s.replace(' ', '_')
if s.isdigit():
line = self.sp.get_line_from_reduce_code(s)
if line is None:
s = ''
else:
s = self.sp.fieldStrFromLine(line,'id').strip()
return s
def set_ion(self):
if self.sp is None:
return
sList = []
s = self.ion_box.text()
k = s.indexOf(',')
while k >= 0:
s0 = self.get_ion_str(str(s[:k]))
if s0 != '' and s0 != '*':
sList.append(s0)
s = s[k+1:]
k = s.indexOf(',')
s0 = self.get_ion_str(str(s))
if s0 != '' and s0 != '*':
sList.append(s0)
s = ''
for s0 in sList:
s = s + s0 + ', '
s = s[:-2]
for item in sList[:]:
sList.remove(item)
if item[-1] == '*':
item = item[:-1]
this_ion_only = False
else:
this_ion_only = True
self.sp.set_ion_list()
if item.ljust(9) in self.sp.liste_raies['id']:
if self.sp.true_ion(item) == item or this_ion_only:
sList = sList + [item]
if not this_ion_only:
sList = sList + self.sp.get_all_ions_from_ion(item)
elif item.ljust(9) in self.sp.sp_theo['raie_ref']['id']:
if self.sp.true_ion(item) == item or this_ion_only:
sList = sList + [item]
if not this_ion_only:
sList = sList + self.sp.get_all_ions_from_ion(item)
else:
ion_list = self.sp.get_ions_from_element(item)
sList = sList + ion_list
self.sp.set_conf('selected_ions', sList)
self.ion_box.setText(s)
def set_refline_to_info_box(self,j):
if self.sp.get_conf('diff_lines_by') == 0 and len(self.sp.selected_ions_data) > 0:
if j == -1:
j = 0
s = str(self.sp.selected_ions_data[j][2][0])
self.line_info_box.setText(s)
def draw_ion(self):
if self.cut_cb.isChecked():
if self.validate_cut():
self.sp.set_conf('cut_plot2', np.float(self.cut2_box.text()))
else:
return
self.set_ion()
self.sp.set_conf('index_of_current_ion', -1)
self.sp.set_selected_ions_data()
self.set_refline_to_info_box(-1)
self.on_draw()
def line_info(self):
if self.sp is None:
return
msg = ''
s = str(self.line_info_box.text())
if s == '':
return
w = self.sp.field_width['num'] - 1
s = s[-w:]
if s[0] == '0':
s = s[1:]
self.line_info_box.setText(s)
try:
new_ref = int(s)
except ValueError:
msg = 'Invalid input.\n It is not an integer'
if msg == '':
line = self.sp.get_line_from_reduce_code(s)
if line is None:
msg = 'No line unambiguously associated with this number.'
if msg == '':
s = self.sp.fieldStrFromLine(line,'num').strip()
self.line_info_box.setText(s)
self.line_info_ref = int(s)
if self.sp.get_conf('qt_show_dialogs', True):
self.show_line_info_dialog()
else:
self.sp.line_info(new_ref, sort='i_rel')
else:
title = 'Error in line number'
QtGui.QMessageBox.critical(self, title, msg, QtGui.QMessageBox.Ok )
def magenta_line(self):
if self.sp is None:
return
ref_str = self.magenta_box.text()
ref_txt = self.magenta_label_box.text()
if ref_str == '':
self.sp.plot_magenta = None
self.sp.label_magenta = ''
self.on_draw()
else:
new_ref = np.int(ref_str)
self.sp.plot_magenta = new_ref
self.sp.label_magenta = ref_txt
self.on_draw()
def cyan_line(self):
if self.sp is None:
return
ref_str = self.cyan_box.text()
ref_txt = self.cyan_label_box.text()
if ref_str == '':
self.sp.plot_cyan = None
self.sp.label_cyan = ''
self.on_draw()
else:
new_ref = np.int(ref_str)
self.sp.plot_cyan = new_ref
self.sp.label_cyan = ref_txt
self.on_draw()
def diff_lines(self):
self.sp.set_conf('index_of_current_ion', -1)
self.set_plot_ax2()
if self.sp.get_conf('diff_lines_by') == 0 and len(self.sp.selected_ions_data) > 0:
s = str(self.sp.selected_ions_data[0][2][0])
self.line_info_box.setText(s)
def set_plot_ax2(self):
self.sp.set_selected_ions_data()
k = self.line_tick_ax_list.index(self.line_tick_ax_ag.checkedAction().text())
self.sp.set_conf('line_tick_ax',k)
k = self.line_tick_pos_list.index(self.line_tick_pos_ag.checkedAction().text())
self.sp.set_conf('line_tick_pos',k)
k = self.diff_lines_list.index(self.diff_lines_ag.checkedAction().text())
self.sp.set_conf('diff_lines_by',k)
if self.show_line_ticks_action.isChecked():
self.make_axes()
def verbosity(self):
verbosity = self.verbosity_list.index(self.verbosity_ag.checkedAction().text())
if verbosity == log_.level:
return
log_.debug('Verbosity changed from {} to {}'.format(log_.level, verbosity), calling=self.calling)
log_.level = verbosity
self.sp.set_conf('log_level', verbosity)
def style(self):
new_style_str = str(self.style_ag.checkedAction().text())
old_style_str = self.sp.get_conf('qt_style')
if new_style_str == old_style_str:
return
self.sp.set_conf('qt_style', new_style_str)
QtGui.qApp.setStyle(new_style_str)
log_.debug('Widget style changed from {} to {}'.format(old_style_str, new_style_str), calling=self.calling)
def update_lim_boxes(self):
xformat = '{:.1f}'
yformat = '{1:.{0}f}'
min_diff = 2
if abs(self.x_plot_lims[1] - self.x_plot_lims[0]) < min_diff:
m = (self.x_plot_lims[0] + self.x_plot_lims[1])/2
x_lims = (m - min_diff/2,m + min_diff/2)
else:
x_lims = self.x_plot_lims
min_diff = 0.2
if abs(self.y1_plot_lims[1] - self.y1_plot_lims[0]) < min_diff:
m = (self.y1_plot_lims[0] + self.y1_plot_lims[1])/2
y1_lims = (m - min_diff/2,m + min_diff/2)
else:
y1_lims = self.y1_plot_lims
min_diff = 0.2
if abs(self.y3_plot_lims[1] - self.y3_plot_lims[0]) < min_diff:
m = (self.y3_plot_lims[0] + self.y3_plot_lims[1])/2
y3_lims = (m - min_diff/2,m + min_diff/2)
else:
y3_lims = self.y3_plot_lims
if self.x_plot_lims[0] != np.float(self.xlim_min_box.text()):
self.xlim_min_box.setText(xformat.format(x_lims[0]))
if self.x_plot_lims[1] != np.float(self.xlim_max_box.text()):
self.xlim_max_box.setText(xformat.format(x_lims[1]))
delta = abs(y1_lims[1]-y1_lims[0])
if delta < 2:
precision = 2
else:
precision = 1
if self.y1_plot_lims[0] != np.float(self.y1lim_min_box.text()):
self.y1lim_min_box.setText(yformat.format(precision, y1_lims[0]))
if self.y1_plot_lims[1] != np.float(self.y1lim_max_box.text()):
self.y1lim_max_box.setText(yformat.format(precision, y1_lims[1]))
delta = abs(y3_lims[1]-y3_lims[0])
if delta < 2:
precision = 2
else:
precision = 1
if self.y3_plot_lims[0] != np.float(self.y3lim_min_box.text()):
self.y3lim_min_box.setText(yformat.format(precision, y3_lims[0]))
if self.y3_plot_lims[1] != np.float(self.y3lim_max_box.text()):
self.y3lim_max_box.setText(yformat.format(precision, y3_lims[1]))
self.set_plot_limits_and_draw()
def validate_input(self, editBox, field, title, varType = 'float', showError = True):
value = editBox.text()
if value == None:
return False
if ( ( varType == 'float' and not self.isFloat(value) ) or \
( varType == 'integer' and not self.isInteger(value) ) or \
( varType == 'positive integer' and not self.isPositiveInteger(value) ) or \
( varType == 'positive odd integer' and not self.isPositiveOdd(value) ) ):
msg = '{} should be a {}'.format(field, varType)
msg.replace('a integer', 'an integer')
editBox.setFocus()
if showError:
if self.showErrorBox:
QtGui.QMessageBox.critical(self, title, msg, QtGui.QMessageBox.Ok )
else:
log_.warn('{}: {}'.format(title, msg), calling=self.calling)
return False
else:
return True
def validate_sp_min(self):
return self.validate_input(self.sp_min_box, 'xmin for the synthesis', 'Input error', 'float')
def validate_sp_max(self):
return self.validate_input(self.sp_max_box, 'xmax for the synthesis', 'Input error', 'float')
def validate_sp_norm(self):
return self.validate_input(self.sp_norm_box, 'normalization factor', 'Input error', 'float')
def validate_ebv(self):
return self.validate_input(self.ebv_box, 'color excess E(B-V)', 'Input error', 'float')
def validate_obj_velo(self):
return self.validate_input(self.obj_velo_box, 'radial velocity', 'Input error', 'float')
def validate_resol(self):
return self.validate_input(self.resol_box, 'rebinning factor', 'Input error', 'positive odd integer')
def validate_xlim_min(self, showError = True):
return self.validate_input(self.xlim_min_box, 'xmin', 'Invalid plot limit', 'float', showError)
def validate_xlim_max(self, showError = True):
return self.validate_input(self.xlim_max_box, 'xmax', 'Invalid plot limit', 'float', showError)
def validate_y1lim_min(self):
return self.validate_input(self.y1lim_min_box, 'ymin', 'Invalid plot limit', 'float')
def validate_y1lim_max(self):
return self.validate_input(self.y1lim_max_box, 'ymax', 'Invalid plot limit', 'float')
def validate_y3lim_min(self):
return self.validate_input(self.y3lim_min_box, 'residual ymin', 'Invalid plot limit', 'float')
def validate_y3lim_max(self):
return self.validate_input(self.y3lim_max_box, 'residual ymax', 'Invalid plot limit', 'float')
def validate_cut(self):
return self.validate_input(self.cut2_box, 'cut', 'Input error', 'float')
def sp_lim_in_range(self):
xmin = np.float(self.sp_min_box.text())
xmax = np.float(self.sp_max_box.text())
if ( xmin < xmax - 9.999 ) and ( xmin > 0. ) and ( xmax < 200000000.):
return True
else:
if self.showErrorBox:
QtGui.QMessageBox.critical(self, 'Invalid synthesis limits', 'The acceptable values are:\n\n xmax - xmin > 10,\n xmin > 0,\n xmax < 200000000.',
QtGui.QMessageBox.Ok )
else:
log_.warn('Invalid synthesis limits', 'The acceptable values are:\n\n xmax - xmin > 10,\n xmin > 0,\n xmax < 200000000.', calling=self.calling)
return False
def validate_synthesis_parameters(self):
return ( self.validate_sp_min() and
self.validate_sp_max() and
self.sp_lim_in_range() and
self.validate_sp_norm() and
self.validate_obj_velo() and
self.validate_ebv() and
self.validate_resol() )
def validate_plot_parameters(self):
return ( self.validate_xlim_min() and
self.validate_xlim_max() and
self.validate_y1lim_min() and
self.validate_y1lim_max() and
self.validate_y3lim_min() and
self.validate_y3lim_max() )
def set_plot_limits_and_draw(self):
if not self.validate_plot_parameters():
return
self.x_plot_lims = (np.float(self.xlim_min_box.text()), np.float(self.xlim_max_box.text()))
self.y1_plot_lims = (np.float(self.y1lim_min_box.text()), np.float(self.y1lim_max_box.text()))
self.y3_plot_lims = (np.float(self.y3lim_min_box.text()), np.float(self.y3lim_max_box.text()))
self.sp.set_conf('x_plot_lims', self.x_plot_lims)
self.sp.set_conf('y1_plot_lims', self.y1_plot_lims)
self.sp.set_conf('y3_plot_lims', self.y3_plot_lims)
self.restore_axes()
self.draw_ion()
def set_limit_sp(self):
if not ( self.validate_sp_min() and
self.validate_sp_max() and
self.sp_lim_in_range() ):
return
limit_sp = (np.float(self.sp_min_box.text()), np.float(self.sp_max_box.text()))
self.sp.set_conf('limit_sp', limit_sp)
def set_limit_sp_and_run(self):
if str(self.sp_min_box.text()).strip() == '':
self.sp_min_box.setText('{:.1f}'.format(self.sp.w_min))
if str(self.sp_max_box.text()).strip() == '':
self.sp_max_box.setText('{:.1f}'.format(self.sp.w_max))
if not ( self.validate_sp_min() and
self.validate_sp_max() and
self.sp_lim_in_range() ):
return
old_limit_sp = self.sp.get_conf('limit_sp')
new_limit_sp = (np.float(self.sp_min_box.text()), np.float(self.sp_max_box.text()))
if old_limit_sp == new_limit_sp:
if not self.axes_fixed:
self.xlim_min_box.setText(self.sp_min_box.text())
self.xlim_max_box.setText(self.sp_max_box.text())
self.set_plot_limits_and_draw()
return
if not self.validate_xlim_min(False):
self.xlim_min_box.setText(self.sp_min_box.text())
if not self.validate_xlim_max(False):
self.xlim_max_box.setText(self.sp_max_box.text())
if ( np.float(self.xlim_min_box.text()) >= new_limit_sp[1] or
np.float(self.xlim_max_box.text()) <= new_limit_sp[0] ):
self.xlim_min_box.setText(self.sp_min_box.text())
self.xlim_max_box.setText(self.sp_max_box.text())
self.sp.set_conf('limit_sp', new_limit_sp)
log_.message('Changing limit_sp. Old: {}, New: {}'.format(old_limit_sp, new_limit_sp), calling=self.calling)
self.statusBar().showMessage('Changing the synthesis wavelength limits ...')
QtGui.QApplication.processEvents()
self.sp.init_obs()
self.sp.init_red_corr()
self.sp.make_continuum()
self.sp.run(do_synth = True, do_read_liste = True, do_profiles=False)
self.set_plot_limits_and_draw()
def resol(self):
if self.sp is None:
return
if not self.validate_resol():
return
old_resol = self.sp.get_conf('resol')
new_resol = np.int(self.resol_box.text())
if old_resol == new_resol:
return
self.sp.set_conf('resol', new_resol)
log_.message('Changing resol. Old: {}, New: {}'.format(old_resol, new_resol), calling=self.calling)
self.statusBar().showMessage('Changing rebinning factor ...')
QtGui.QApplication.processEvents()
self.sp.set_conf('resol', new_resol)
self.sp.init_obs()
self.sp.init_red_corr()
self.sp.make_continuum()
self.sp.run(do_synth = True, do_read_liste = True, do_profiles=False)
self.on_draw()
def leave_fig(self, event):
self.sp.firstClick = True
if ( self.x_plot_lims != self.axes.get_xlim() or
self.y1_plot_lims != self.axes.get_ylim() or
( self.axes3 is not None and self.y3_plot_lims != self.axes3.get_ylim() ) ):
limits_changed = True
else:
limits_changed = False
if not self.axes_fixed and limits_changed:
self.save_axes()
self.update_lim_boxes()
def fix_axes(self):
if self.fix_axes_cb.isChecked():
self.axes_fixed = True
else:
self.axes_fixed = False
def get_line_fields_to_print(self):
field_list = self.sp.get_conf('save_lines_fields')
for i in range(0,len(self.line_field_menu.actions())):
if self.line_print_dic.keys()[i] in field_list:
self.line_field_menu.actions()[i].setChecked(True)
else:
self.line_field_menu.actions()[i].setChecked(False)
def set_show_header(self):
if self.show_header_action.isChecked():
self.sp.set_conf('save_lines_header', True)
else:
self.sp.set_conf('save_lines_header', False)
def set_line_fields_to_print(self):
s = []
for i in range(0,len(self.line_field_menu.actions())):
if self.line_field_menu.actions()[i].isChecked():
s.append( self.line_print_dic.keys()[i])
self.sp.set_conf('save_lines_fields', s)
def save_lines(self):
self.sp.save_lines()
path = self.sp.get_conf('save_lines_filename')
self.statusBar().showMessage('Lines saved to file %s' % path, 4000)
def save_lines_as(self):
file_choices = "Text files (*.txt *.dat) (*.txt *.dat);;Tex files (*.tex) (*.tex);;CSV files (*.csv) (*.csv);;All Files (*) (*)"
filename = self.sp.get_conf('save_lines_filename')
extension = os.path.splitext(filename)[1][1:].lower()
if extension in ['txt','dat']:
selectedFilter = 'Text files (*.txt *.dat) (*.txt *.dat)'
elif extension in ['tex']:
selectedFilter = 'Tex files (*.tex) (*.tex)'
elif extension in ['csv']:
selectedFilter = 'CSV files (*.csv) (*.csv)'
else:
selectedFilter = 'All Files (*) (*)'
path = unicode(QtGui.QFileDialog.getSaveFileName(self, 'Save lines to file', filename, file_choices, selectedFilter))
if path:
self.sp.set_conf('save_lines_filename', path)
self.sp.save_lines()
self.statusBar().showMessage('Lines saved to file %s' % path, 4000)
def line_sort(self):
k = self.line_sort_list.index(self.line_sort_ag.checkedAction().text())
self.sp.set_conf('save_lines_sort',k)
def main_loc(init_filename=None, post_proc_file=None):
app = QtGui.QApplication(sys.argv)
form = AppForm(init_filename=init_filename, post_proc_file=post_proc_file)
form.show()
app.exec_()
return form.fig
def main_loc_obj(init_filename=None, post_proc_file=None):
app = QtGui.QApplication(sys.argv)
form = AppForm(init_filename=init_filename, post_proc_file=post_proc_file)
form.show()
app.exec_()
return form
def main():
parser = get_parser()
args = parser.parse_args()
log_.level = args.verbosity
app = QtGui.QApplication(sys.argv)
form = AppForm(init_filename=args.file, post_proc_file=args.post_proc)
#import pdb
#pdb.set_trace()
form.show()
app.exec_()
if __name__ == "__main__":
main()
|
mvfcopetti/pySSN
|
pyssn/qt/pyssn_qt.py
|
Python
|
gpl-3.0
| 219,113
|
[
"Gaussian"
] |
7fa8867df663a7f7d75ea5ec56735ffe854d5afcb2399879cac2a8b4a1c9f573
|
##
# Copyright 2009-2015 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing netCDF, implemented as an easyblock
@author: Stijn De Weirdt (Ghent University)
@author: Dries Verdegem (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Pieter De Baets (Ghent University)
@author: Jens Timmerman (Ghent University)
"""
import os
from distutils.version import LooseVersion
import easybuild.tools.environment as env
import easybuild.tools.toolchain as toolchain
from easybuild.easyblocks.generic.cmakemake import CMakeMake
from easybuild.easyblocks.generic.configuremake import ConfigureMake
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.modules import get_software_root, get_software_version, get_software_libdir
class EB_netCDF(CMakeMake):
"""Support for building/installing netCDF"""
def configure_step(self):
"""Configure build: set config options and configure"""
if LooseVersion(self.version) < LooseVersion("4.3"):
self.cfg.update('configopts', "--enable-shared")
if self.toolchain.options['pic']:
self.cfg.update('configopts', '--with-pic')
tup = (os.getenv('FFLAGS'), os.getenv('MPICC'), os.getenv('F90'))
self.cfg.update('configopts', 'FCFLAGS="%s" CC="%s" FC="%s"' % tup)
# add -DgFortran to CPPFLAGS when building with GCC
if self.toolchain.comp_family() == toolchain.GCC: #@UndefinedVariable
self.cfg.update('configopts', 'CPPFLAGS="%s -DgFortran"' % os.getenv('CPPFLAGS'))
ConfigureMake.configure_step(self)
else:
self.cfg.update('configopts', '-DCMAKE_BUILD_TYPE=RELEASE -DCMAKE_C_FLAGS_RELEASE="-DNDEBUG " ')
for (dep, libname) in [('cURL', 'curl'), ('HDF5', 'hdf5'), ('Szip', 'sz'), ('zlib', 'z')]:
dep_root = get_software_root(dep)
dep_libdir = get_software_libdir(dep)
if dep_root:
incdir = os.path.join(dep_root, 'include')
self.cfg.update('configopts', '-D%s_INCLUDE_DIR=%s ' % (dep.upper(), incdir))
if dep == 'HDF5':
env.setvar('HDF5_ROOT', dep_root)
libhdf5 = os.path.join(dep_root, dep_libdir, 'libhdf5.so')
self.cfg.update('configopts', '-DHDF5_LIB=%s ' % libhdf5)
libhdf5_hl = os.path.join(dep_root, dep_libdir, 'libhdf5_hl.so')
self.cfg.update('configopts', '-DHDF5_HL_LIB=%s ' % libhdf5_hl)
else:
libso = os.path.join(dep_root, dep_libdir, 'lib%s.so' % libname)
self.cfg.update('configopts', '-D%s_LIBRARY=%s ' % (dep.upper(), libso))
CMakeMake.configure_step(self)
def sanity_check_step(self):
"""
Custom sanity check for netCDF
"""
incs = ["netcdf.h"]
libs = ["libnetcdf.so", "libnetcdf.a"]
# since v4.2, the non-C libraries have been split off in seperate extensions_step
# see netCDF-Fortran and netCDF-C++
if LooseVersion(self.version) < LooseVersion("4.2"):
incs += ["netcdf%s" % x for x in ["cpp.h", ".hh", ".inc", ".mod"]] + \
["ncvalues.h", "typesizes.mod"]
libs += ["libnetcdf_c++.so", "libnetcdff.so",
"libnetcdf_c++.a", "libnetcdff.a"]
custom_paths = {
'files': ["bin/nc%s" % x for x in ["-config", "copy", "dump",
"gen", "gen3"]] +
[("lib/%s" % x,"lib64/%s" % x) for x in libs] +
["include/%s" % x for x in incs],
'dirs': []
}
super(EB_netCDF, self).sanity_check_step(custom_paths=custom_paths)
def set_netcdf_env_vars(log):
"""Set netCDF environment variables used by other software."""
netcdf = get_software_root('netCDF')
if not netcdf:
raise EasyBuildError("netCDF module not loaded?")
else:
env.setvar('NETCDF', netcdf)
log.debug("Set NETCDF to %s" % netcdf)
netcdff = get_software_root('netCDF-Fortran')
netcdf_ver = get_software_version('netCDF')
if not netcdff:
if LooseVersion(netcdf_ver) >= LooseVersion("4.2"):
raise EasyBuildError("netCDF v4.2 no longer supplies Fortran library, also need netCDF-Fortran")
else:
env.setvar('NETCDFF', netcdff)
log.debug("Set NETCDFF to %s" % netcdff)
def get_netcdf_module_set_cmds(log):
"""Get module setenv commands for netCDF."""
log.deprecated("Use self.module_generator.set_environment rather than relying on get_netcdf_module_set_cmds", '3.0')
netcdf = os.getenv('NETCDF')
if netcdf:
txt = "setenv NETCDF %s\n" % netcdf
# netCDF-Fortran is optional (only for netCDF v4.2 and later)
netcdff = os.getenv('NETCDFF')
if netcdff:
txt += "setenv NETCDFF %s\n" % netcdff
return txt
else:
raise EasyBuildError("NETCDF environment variable not set?")
|
torbjoernk/easybuild-easyblocks
|
easybuild/easyblocks/n/netcdf.py
|
Python
|
gpl-2.0
| 6,256
|
[
"NetCDF"
] |
3298dcbcb91e40a65cb8cd489e9858631e82cd4c77ee9983331237275002e58d
|
#User provided customizations for the gpaw setup
compiler = 'cc'
mpicompiler = 'cc'
mpilinker= 'cc'
extra_compile_args = ['-std=c99']
libraries = []
scalapack = True
hdf5 = True
define_macros += [('GPAW_NO_UNDERSCORE_CBLACS', '1')]
define_macros += [('GPAW_NO_UNDERSCORE_CSCALAPACK', '1')]
define_macros += [("GPAW_ASYNC",1)]
define_macros += [("GPAW_MPI2",1)]
|
robwarm/gpaw-symm
|
doc/install/Cray/customize_louhi.py
|
Python
|
gpl-3.0
| 365
|
[
"GPAW"
] |
2ee33f3aa2d94a8aa4dcd0a6d17d945e1ad4546e3b7afcae01dc3d9ab38e37db
|
# -*- coding: utf-8 -*-
#
# Name: face.com Python API client library
# Description: face.com REST API Python client library.
#
# For more information about the API and the return values,
# visit the official documentation at http://developers.face.com/docs/api/.
#
# Author: Tomaž Muraus (http://www.tomaz.me)
# License: BSD
import urllib
import urllib2
import os.path
import warnings
try:
import json
except ImportError:
import simplejson as json
API_HOST = 'api.face.com'
USE_SSL = True
class FaceClient(object):
def __init__(self, api_key=None, api_secret=None):
if not api_key or not api_secret:
raise AttributeError('Missing api_key or api_secret argument')
self.api_key = api_key
self.api_secret = api_secret
self.format = 'json'
self.twitter_credentials = None
self.facebook_credentials = None
def set_twitter_user_credentials(self, *args, **kwargs):
warnings.warn(('Twitter username & password auth has been ' +
'deprecated. Please use oauth based auth - ' +
'set_twitter_oauth_credentials()'))
def set_twitter_oauth_credentials(self, user=None, secret=None,
token=None):
if not user or not secret or not token:
raise AttributeError('Missing one of the required arguments')
self.twitter_credentials = {'twitter_oauth_user': user,
'twitter_oauth_secret': secret,
'twitter_oauth_token': token}
def set_facebook_access_token(self, *args, **kwargs):
warnings.warn(('Method has been renamed to ' +
' set_facebook_oauth_credentials(). Support for' +
'username & password based auth has also been dropped.' +
'Now only oAuth2 token based auth is supported'))
def set_facebook_oauth_credentials(self, user_id=None, session_id=None,
oauth_token=None):
for (key, value) in [('user_id', user_id), ('session_id', session_id),
('oauth_token', oauth_token)]:
if not value:
raise AttributeError('Missing required argument: %s' % (key))
self.facebook_credentials = {'fb_user_id': user_id,
'fb_session_id': session_id,
'fb_oauth_token': oauth_token}
### Recognition engine methods ###
def faces_detect(self, urls=None, file=None, aggressive=False):
"""
Returns tags for detected faces in one or more photos, with geometric
information of the tag, eyes, nose and mouth, as well as the gender,
glasses, and smiling attributes.
http://developers.face.com/docs/api/faces-detect/
"""
if not urls and not file:
raise AttributeError('Missing URLs/filename argument')
data = {'attributes': 'all'}
files = []
if file:
# Check if the file exists
if not hasattr(file, 'read') and not os.path.exists(file):
raise IOError('File %s does not exist' % (file))
files.append(file)
else:
data['urls'] = urls
if aggressive:
data['detector'] = 'Aggressive'
response = self.send_request('faces/detect', data, files)
return response
def faces_status(self, uids=None, namespace=None):
"""
Reports training set status for the specified UIDs.
http://developers.face.com/docs/api/faces-status/
"""
if not uids:
raise AttributeError('Missing user IDs')
(facebook_uids, twitter_uids) = \
self.__check_user_auth_credentials(uids)
data = {'uids': uids}
self.__append_user_auth_data(data, facebook_uids, twitter_uids)
self.__append_optional_arguments(data, namespace=namespace)
response = self.send_request('faces/status', data)
return response
def faces_recognize(self, uids=None, urls=None, file=None, train=None,
namespace=None):
"""
Attempts to detect and recognize one or more user IDs' faces, in one
or more photos.
For each detected face, the face.com engine will return the most likely
user IDs, or empty result for unrecognized faces. In addition, each
tag includes a threshold score - any score below this number is
considered a low-probability hit.
http://developers.face.com/docs/api/faces-recognize/
"""
if not uids or (not urls and not file):
raise AttributeError('Missing required arguments')
(facebook_uids, twitter_uids) = \
self.__check_user_auth_credentials(uids)
data = {'uids': uids, 'attributes': 'all'}
files = []
if file:
# Check if the file exists
if not hasattr(file, 'read') and not os.path.exists(file):
raise IOError('File %s does not exist' % (file))
files.append(file)
else:
data.update({'urls': urls})
self.__append_user_auth_data(data, facebook_uids, twitter_uids)
self.__append_optional_arguments(data, train=train,
namespace=namespace)
response = self.send_request('faces/recognize', data, files)
return response
def faces_train(self, uids=None, namespace=None):
"""
Calls the training procedure for the specified UIDs, and reports back
changes.
http://developers.face.com/docs/api/faces-train/
"""
if not uids:
raise AttributeError('Missing user IDs')
(facebook_uids, twitter_uids) = \
self.__check_user_auth_credentials(uids)
data = {'uids': uids}
self.__append_user_auth_data(data, facebook_uids, twitter_uids)
self.__append_optional_arguments(data, namespace=namespace)
response = self.send_request('faces/train', data)
return response
### Methods for managing face tags ###
def tags_get(self, uids=None, urls=None, pids=None, order='recent', \
limit=5, together=False, filter=None, namespace=None):
"""
Returns saved tags in one or more photos, or for the specified
User ID(s).
This method also accepts multiple filters for finding tags
corresponding to a more specific criteria such as front-facing,
recent, or where two or more users appear together in same photos.
http://developers.face.com/docs/api/tags-get/
"""
(facebook_uids, twitter_uids) = \
self.__check_user_auth_credentials(uids)
data = {'uids': uids,
'urls': urls,
'together': together,
'limit': limit}
self.__append_user_auth_data(data, facebook_uids, twitter_uids)
self.__append_optional_arguments(data, pids=pids, filter=filter,
namespace=namespace)
response = self.send_request('tags/get', data)
return response
def tags_add(self, url=None, x=None, y=None, width=None, uid=None,
tagger_id=None, label=None, password=None):
"""
Add a (manual) face tag to a photo. Use this method to add face tags
where those were not detected for completeness of your service.
http://developers.face.com/docs/api/tags-add/
"""
if not url or not x or not y or not width or not uid or not tagger_id:
raise AttributeError('Missing one of the required arguments')
(facebook_uids, twitter_uids) = self.__check_user_auth_credentials(uid)
data = {'url': url,
'x': x,
'y': y,
'width': width,
'uid': uid,
'tagger_id': tagger_id}
self.__append_user_auth_data(data, facebook_uids, twitter_uids)
self.__append_optional_arguments(data, label=label, password=password)
response = self.send_request('tags/add', data)
return response
def tags_save(self, tids=None, uid=None, tagger_id=None, label=None, \
password=None):
"""
Saves a face tag. Use this method to save tags for training the
face.com index, or for future use of the faces.detect and tags.get
methods.
http://developers.face.com/docs/api/tags-save/
"""
if not tids or not uid:
raise AttributeError('Missing required argument')
(facebook_uids, twitter_uids) = self.__check_user_auth_credentials(uid)
data = {'tids': tids,
'uid': uid}
self.__append_user_auth_data(data, facebook_uids, twitter_uids)
self.__append_optional_arguments(data, tagger_id=tagger_id,
label=label, password=password)
response = self.send_request('tags/save', data)
return response
def tags_remove(self, tids=None, password=None):
"""
Remove a previously saved face tag from a photo.
http://developers.face.com/docs/api/tags-remove/
"""
if not tids:
raise AttributeError('Missing tag IDs')
data = {'tids': tids}
response = self.send_request('tags/remove', data)
return response
### Account management methods ###
def account_limits(self):
"""
Returns current rate limits for the account represented by the passed
API key and Secret.
http://developers.face.com/docs/api/account-limits/
"""
response = self.send_request('account/limits')
return response['usage']
def account_users(self, namespaces=None):
"""
Returns current rate limits for the account represented by the passed
API key and Secret.
http://developers.face.com/docs/api/account-limits/
"""
if not namespaces:
raise AttributeError('Missing namespaces argument')
response = self.send_request('account/users',
{'namespaces': namespaces})
return response
def __check_user_auth_credentials(self, uids):
# Check if needed credentials are provided
facebook_uids = [uid for uid in uids.split(',') \
if uid.find('@facebook.com') != -1]
twitter_uids = [uid for uid in uids.split(',') \
if uid.find('@twitter.com') != -1]
if facebook_uids and not self.facebook_credentials:
raise AttributeError('You need to set Facebook credentials ' +
'to perform action on Facebook users')
if twitter_uids and not self.twitter_credentials:
raise AttributeError('You need to set Twitter credentials to ' +
'perform action on Twitter users')
return (facebook_uids, twitter_uids)
def __append_user_auth_data(self, data, facebook_uids, twitter_uids):
if facebook_uids:
data.update({'user_auth': 'fb_user:%s,fb_session:%s,' +
'fb_oauth_token:%s' %
(self.facebook_credentials['fb_user_id'],
self.facebook_credentials['fb_session_id'],
self.facebook_credentials['fb_oauth_token'])})
if twitter_uids:
data.update({'user_auth':
('twitter_oauth_user:%s,twitter_oauth_secret:%s,'
'twitter_oauth_token:%s' %
(self.twitter_credentials['twitter_oauth_user'],
self.twitter_credentials['twitter_oauth_secret'],
self.twitter_credentials['twitter_oauth_token']))})
def __append_optional_arguments(self, data, **kwargs):
for key, value in kwargs.iteritems():
if value:
data.update({key: value})
def send_request(self, method=None, parameters=None, files=None):
if USE_SSL:
protocol = 'https://'
else:
protocol = 'http://'
url = '%s%s/%s' % (protocol, API_HOST, method)
data = {'api_key': self.api_key,
'api_secret': self.api_secret,
'format': self.format}
if parameters:
data.update(parameters)
# Local file is provided, use multi-part form
if files:
from multipart import Multipart
form = Multipart()
for key, value in data.iteritems():
form.field(key, value)
for i, file in enumerate(files, 1):
if hasattr(file, 'read'):
if hasattr(file, 'name'):
name = os.path.basename(file.name)
else:
name = 'attachment_%d' % i
close_file = False
else:
name = os.path.basename(file)
file = open(file, 'r')
close_file = True
try:
form.file(name, name, file.read())
finally:
if close_file:
file.close()
(content_type, post_data) = form.get()
headers = {'Content-Type': content_type}
else:
post_data = urllib.urlencode(data)
headers = {}
request = urllib2.Request(url, headers=headers, data=post_data)
response = urllib2.urlopen(request)
response = response.read()
response_data = json.loads(response)
if 'status' in response_data and \
response_data['status'] == 'failure':
raise FaceError(response_data['error_code'],
response_data['error_message'])
return response_data
class FaceError(Exception):
def __init__(self, error_code, error_message):
self.error_code = error_code
self.error_message = error_message
def __str__(self):
return '%s (%d)' % (self.error_message, self.error_code)
|
Kami/python-face-client
|
face_client/face_client.py
|
Python
|
bsd-3-clause
| 14,348
|
[
"VisIt"
] |
ca56433e11e380d809f2172645c6144ce9c080211cd1f08805f8f13dc1539191
|
# $HeadURL: $
''' CSHelpers
Module containing functions interacting with the CS and useful for the RSS
modules.
'''
from DIRAC import S_OK, S_ERROR
from DIRAC.ConfigurationSystem.Client.Helpers.Resources import getGOCSiteName
from DIRAC.ResourceStatusSystem.Utilities import Utils
from DIRAC.ConfigurationSystem.Client.Helpers import Resources
__RCSID__ = '$Id: $'
def getGOCSites( diracSites = None ):
#FIXME: THIS SHOULD GO INTO Resources HELPER
if diracSites is None:
diracSites = Resources.getSites()
if not diracSites[ 'OK' ]:
return diracSites
diracSites = diracSites[ 'Value' ]
gocSites = []
for diracSite in diracSites:
gocSite = getGOCSiteName( diracSite )
if not gocSite[ 'OK' ]:
continue
gocSites.append( gocSite[ 'Value' ] )
return S_OK( list( set( gocSites ) ) )
def getStorageElementsHosts( seNames = None ):
seHosts = []
resources = Resources.Resources()
if seNames is None:
seNames = resources.getEligibleStorageElements()
if not seNames[ 'OK' ]:
return seNames
seNames = seNames[ 'Value' ]
for seName in seNames:
result = getSEProtocolOption( seName, 'Host' )
if result['OK']:
seHosts.append( result['Value'] )
return S_OK( list( set( seHosts ) ) )
def getSEProtocolOption( se, optionName ):
"""
Get option of the Storage Element access protocol
"""
resources = Resources.Resources()
result = resources.getAccessProtocols( se )
if not result['OK']:
return S_ERROR( "Acces Protocol for SE %s not found: %s" % ( se, result['Message'] ) )
try:
ap = result['Value'][0]
except IndexError:
return S_ERROR( 'No AccessProtocol associated to %s' % se )
return resources.getAccessProtocolOption( ap, optionName )
def getStorageElementEndpoint( storageElement ):
resources = Resources.Resources()
result = resources.getAccessProtocols( storageElement )
if not result['OK']:
return result
# FIXME: There can be several access protocols for the same SE !
try:
ap = result['Value'][0]
except IndexError:
return S_ERROR( 'No AccessProtocol associated to %s' % storageElement )
result = resources.getAccessProtocolOptionsDict( ap )
#result = resources.getAccessProtocols( storageElement )
if not result['OK']:
return result
host = result['Value'].get( 'Host', '' )
port = result['Value'].get( 'Port', '' )
wsurl = result['Value'].get( 'WSUrl', '' )
# MAYBE wusrl is not defined
#if host and port and wsurl:
if host and port:
url = 'httpg://%s:%s%s' % ( host, port, wsurl )
url = url.replace( '?SFN=', '' )
return S_OK( url )
return S_ERROR( ( host, port, wsurl ) )
def getStorageElementEndpoints( storageElements = None ):
resources = Resources.Resources()
if storageElements is None:
storageElements = resources.getEligibleStorageElements()
if not storageElements[ 'OK' ]:
return storageElements
storageElements = storageElements[ 'Value' ]
storageElementEndpoints = []
for se in storageElements:
seEndpoint = getStorageElementEndpoint( se )
if not seEndpoint[ 'OK' ]:
continue
storageElementEndpoints.append( seEndpoint[ 'Value' ] )
return S_OK( list( set( storageElementEndpoints ) ) )
def getSpaceTokenEndpoints():
''' Get Space Token Endpoints '''
return Utils.getCSTree( 'Shares/Disk' )
################################################################################
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
|
sposs/DIRAC
|
ResourceStatusSystem/Utilities/CSHelpers.py
|
Python
|
gpl-3.0
| 3,673
|
[
"DIRAC"
] |
9a0cd8b7362fef15b3bec05a3409c85423ba52c80966047b969646b18487e771
|
#!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
from os.path import getmtime, exists
import time
import types
import __builtin__
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import DummyTransaction
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
from Skeleton import Skeleton
##################################################
## MODULE CONSTANTS
try:
True, False
except NameError:
True, False = (1==1), (1==0)
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.0.1'
__CHEETAH_versionTuple__ = (2, 0, 1, 'final', 0)
__CHEETAH_genTime__ = 1309384747.4918439
__CHEETAH_genTimestamp__ = 'Wed Jun 29 17:59:07 2011'
__CHEETAH_src__ = 'CartTemplate.html'
__CHEETAH_srcLastModified__ = 'Sun Apr 18 18:16:51 2010'
__CHEETAH_docstring__ = 'Autogenerated by CHEETAH: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class CartTemplate(Skeleton):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
Skeleton.__init__(self, *args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def pagetitle(self, **KWS):
## CHEETAH: generated from #def pagetitle at line 4, col 1.
trans = KWS.get("trans")
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
write('''Your current cart:
''')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
def body(self, **KWS):
## CHEETAH: generated from #def body at line 8, col 1.
trans = KWS.get("trans")
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
write('''<h1>Your cart currently contains:</h1>
<a class="close" href="javascript:window.close()">X</a>
<form action="/addtocart">
<table>
''')
for q in VFFSL(SL,"quantities",True): # generated from line 13, col 1
write('''<tr><td><input type="text" width="3" name="select_x_like_''')
_v = VFN(VFFSL(SL,"q",True)[0],"id",True) # '${q[0].id}' on line 14, col 58
if _v is not None: write(_filter(_v, rawExpr='${q[0].id}')) # from line 14, col 58.
write('''" value="''')
_v = VFFSL(SL,"q",True)[1] # '${q[1]}' on line 14, col 77
if _v is not None: write(_filter(_v, rawExpr='${q[1]}')) # from line 14, col 77.
write('''" /></td><td>''')
_v = VFN(VFFSL(SL,"q",True)[0],"title.booktitle",True) # '${q[0].title.booktitle}' on line 14, col 97
if _v is not None: write(_filter(_v, rawExpr='${q[0].title.booktitle}')) # from line 14, col 97.
write('''</td></tr>
''')
write('''</table>
<input type="hidden" name="reset_quantities" value="true" />
<input class="submit-inline" type="submit" name="update" value="update quantities"/>
<input class="submit-inline" type="submit" name="checkout" value="checkout" />
</form>
''')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
def writeBody(self, **KWS):
## CHEETAH: main method generated for this template
trans = KWS.get("trans")
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
write('''
''')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_CartTemplate= 'writeBody'
## END CLASS DEFINITION
if not hasattr(CartTemplate, '_initCheetahAttributes'):
templateAPIClass = getattr(CartTemplate, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(CartTemplate)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=CartTemplate()).run()
|
johm/infoshopkeeper
|
inventoryserver/CartTemplate.py
|
Python
|
gpl-2.0
| 7,132
|
[
"VisIt"
] |
cc08075bee3b58972b3d37d51b17287c5c3145544adcb00371172bb8a95ed7b0
|
# Copyright 2014 Roberto Brian Sarrionandia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file handles the deregistration of teams, judges, and institutions.
import webapp2
from google.appengine.ext import ndb
import tusers
import logging
#Handles the deregistration of open teams
class DeregTeamHandler(webapp2.RequestHandler):
def post(self):
user = tusers.get_current_user()
#Check if the t_key parameter is un use
if self.request.get('t_key', default_value=False):
key = ndb.Key(urlsafe=self.request.get('t_key'))
t = key.get()
if t.authorised(user):
key.delete()
self.redirect(self.request.referer)
#Handles the deregistration of independent and institutional judges
class DeregJudgeHandler(webapp2.RequestHandler):
def post(self):
user = tusers.get_current_user()
#Check if the j_key parameter is in use.
if self.request.get('j_key', default_value=False):
key = ndb.Key(urlsafe=self.request.get('j_key'))
j = key.get()
#Delete from the database
if j.authorised(user):
key.delete()
self.redirect(self.request.referer)
class DeregInstitutionHandler(webapp2.RequestHandler):
def post(self):
user = tusers.get_current_user()
#Get the requested tournament
key = ndb.Key(urlsafe=self.request.get('institution'))
institution = key.get()
if institution.authorised(user):
institution.destroy()
self.redirect(self.request.referer)
app = webapp2.WSGIApplication([
('/dereg/team', DeregTeamHandler),
('/dereg/judge', DeregJudgeHandler),
('/dereg/institution', DeregInstitutionHandler)
], debug=True)
|
sarrionandia/tournatrack
|
dereg.py
|
Python
|
apache-2.0
| 2,082
|
[
"Brian"
] |
4a01736607dcd64100f28dedbfb6636b4c65558888d075a3447364ecff514935
|
__author__ = 'rhythmicstar'
import copy
import gffutils
import pandas as pd
import numpy as np
from .util import separate, splitstart, splitend
species1DB = gffutils.FeatureDB('/Users/rhythmicstar/projects/exon_evolution//'
'gencode.v19.annotation.humanrbfox2and'
'fmr1andsnap25.gtf.db', keep_order=True)
species2DB = gffutils.FeatureDB('/Users/rhythmicstar/projects/exon_evolution//'
'gencode.vM5.annotation.mouserbfox2andfmr1and'
'snap25.gtf.db', keep_order=True)
class OrthologyTable(object):
exon1 = 'exon1'
exon2 = 'exon2'
def __init__(self, nucleotide_table, protein_table, species1_name,
species1_version, species2_name, species2_version):
self.nucleotide = self.read_blast_table(nucleotide_table, 'nucleotide')
self.protein = self.read_blast_table(protein_table, 'protein')
def read_blast_table(self, filename, sequence='protein'):
"""Parse blast alignment table, with only non-identical comparisons
Parameters
----------
filename : str
Path of the blast output
sequence : "protein" | "nucleotide"
Specifies whether the table being read is from nucleotide or
protein comparisons, which modifies the column names of the output
pandas DataFrame
Output
------
table : pandas.DataFrame
Parsed table of the exon alignments
"""
abbrev = 'prot' if sequence == 'protein' else "nuc"
columns = [self.exon1, self.exon2, '{}_length_of_overlap'.format(abbrev),
'{}_PID'.format(abbrev)]
table = pd.read_table(filename, names=columns)
# Get only rows where exon1 and exon2 don't have the same ID
different = table[self.exon1] != table[self.exon2]
table = table.loc[different]
return table
def remove_duplicate_comparisons(self, table):
"""Check for the same comparison in the opposite order
E.g. (exonA, exonB) is the same as (exonB, exonA), so we want to
remove that
Parameters
----------
table : pandas.DataFrame
Modified blast orthology table
Output
------
table: pandas.DataFrame
Parsed table with same comparison in opposite order removed
"""
seen = set([])
rows_to_use = []
for i, row in table.iterrows():
exon1 = row[self.exon1]
exon2 = row[self.exon2]
pair = tuple(sorted([exon1, exon2]))
if pair not in seen:
seen.update({pair})
rows_to_use.append(i)
table = table.loc[rows_to_use]
table = table.reset_index(drop=True)
return table
def blast_table_setup(self, ortho_table):
ref_blast_table = copy.deepcopy(ortho_table)
rows = int(ref_blast_table.size/7)
for row in range (0, rows):
ref_blast_table.ix[(row + rows), 0] = ref_blast_table.ix[row, 1]
cols = [1,2,3]
ref_blast_table.drop(ref_blast_table.columns[cols],axis=1,inplace=True)
ref_blast_table = ref_blast_table.drop_duplicates('Exon')
ref_blast_table = ref_blast_table.reset_index(drop=True)
return ref_blast_table
def add_ortho_table_columns(self, nucleotide_table, protein_table,
rows, protrow):
"""add Prot_PID, Prot_Length_of_Overlap to ortho_table
Parameters
----------
nucleotide_table : pandas.DataFrame
Modified blastn orthology table
protein_table : pandas.DataFrame
Modified blastp orthology table
Output
------
ortho_table: pandas.DataFrame
Orthology table with rows from both protein and nucleotide blast
"""
ortholog_columns = pd.DataFrame(columns=['Prot_PID'])
nucleotide_table.insert(3, 'Prot_Length_of_Overlap', 'Below Threshold of ...')
nucleotide_table = pd.concat([nucleotide_table, ortholog_columns], axis=1)
nucleotide_table = nucleotide_table.replace(np.nan,'Below Threshold of ...', regex=True)
for rowN in range (0, rows):
for rowP in range (0, protrow):
exon1n = nucleotide_table.iat[rowN, 0]
exon1p = protein_table.iat[rowP, 0]
exon2n = nucleotide_table.iat[rowN, 1]
exon2p = protein_table.iat[rowP, 1]
if exon1n == exon1p & exon2n == exon2p:
nucleotide_table.iat[rowN, 5] = protein_table.iat[rowP, 3]
nucleotide_table.iat[rowN, 3] = protein_table.iat[rowP, 2]
protein_table.ix[rowP, 0] = np.nan
break
protein_table.dropna(axis=0,inplace=True)
protein_table = protein_table.reset_index(drop=True)
protein_table.insert(2, 'Nuc_Length_of_Overlap', 'Below Threshold of ...')
protein_table.insert(4, 'Nuc_PID', 'Below Threshold of ...')
ortho_table = pd.concat([nucleotide_table, protein_table])
ortho_table = ortho_table.reset_index(drop=True)
return ortho_table
def fill_in_blast_table(self, exon, speciesname, speciesversion):
GFFUtilsExonId = str(exon['exon_id']) # gffutils id
GFFUtilsExonId = separate(GFFUtilsExonId)
if (GFFUtilsExonId == exon_ID2):
exon_geneid = str(exon.attributes['gene_id'])
exon_geneid = separate(exon_geneid)
exon_transcriptid = str(exon.attributes['transcript_id'])
exon_transcriptid = separate(exon_transcriptid)
new_blast_row = [exon_ID, "{}{}{}{}".format(
speciesname,'(', speciesversion, ')'), (
"{}:{}-{}:{}:{}".format(exon.chrom, exon.start, exon.stop,
exon.strand, exon.frame)), exon_geneid,
exon_transcriptid, exon.stop - exon.start + 1]
return new_blast_row
# figure out how to loop and get location
# def location(self):
# ("{}:{}-{}:{}:{}".format(exon.chrom, exon.start, exon.stop,
# exon.strand, exon.frame)
# fix this!!! get rid of blast and call location
# def orthology(self, ortho_table, blast_table, start1 = '0', start2 = '1',
# end1 = '0', end2 = '1'):
# """add orthology classification to ortho_table
#
# Parameters
# ----------
# ortho_table : pandas.DataFrame
# Modified blast orthology table
# blast_table :
# Table with
#
# Output
# ------
# table: pandas.DataFrame
# Orthology table with added columns for protein pid and overlap
# """
#
# if (((ortho_table.iat[row, 0])[0:7]) != ((ortho_table.iat[row, 1])[0:7])):
# ortho_table.ix[row, 'Relationship'] = 'Orthologous'
# else:
# for inrow in range (0, blastRows):
# if (ortho_table.iat[row, 0] == blast_table.iat[inrow, 0]):
# start1 = splitstart(blast_table.iat[inrow, 2])
# end1 = splitend(blast_table.iat[inrow, 2])
# if (ortho_table.iat[row, 1] == blast_table.iat[inrow, 0]):
# start2 = splitstart(blast_table.iat[inrow, 2])
# end2 = splitend(blast_table.iat[inrow, 2])
# if (((start1 >= start2) and (end2 >= end1)) or
# ((start2 >= start1) and (end1 >= end2)) or
# ((start2 >= start1) and (end2 >= end1) and
# (start2 <= end1)) or
# ((start1 >= start2) and (end1 >= end2) and
# (start1 <= end2))):
# ortho_table.iat[row, 6] = 'Overlapping Genomic Loci'
# if (blast_table.iat[inrow, 2] == blast_table.iat[inrow, 2]):
# ortho_table.iat[row, 6] = 'Identical Genomic Loci'
# else:
# ortho_table.iat[row, 6] = 'Paralogous'
# return ortho_table
def save_to_csv(self, table, table_type):
"""Save table to csv format
Parameters
----------
table_type : type of table being saved
Either ortho or blast
"""
blast_columns = ['Exon', 'Species(Version)',
'Chrom:Start-Stop:Strand:Offset', 'Gene', 'Transcript',
'Exon_Length']
ortho_columns = ['Exon', 'Exon2', 'Nuc_Length_of_Overlap',
'Prot_Length_of_Overlap', 'Nuc_PID', 'Prot_PID',
'Relationship']
if table_type == 'blast':
filename = "BLAST_Table.csv"
column = blast_columns
else:
filename = "Ortho_Table.csv"
column = ortho_columns
table.to_csv(filename, columns= column, index=False)
def create_ortho_table(species1name, species2name, species1version,
species2version, blastnucfilename, blastprotfilename):
read_blast_table(self, blastnucfilename, sequence='nucleotide')
read_blast_table(self, blastprotfilename, sequence='protein')
remove_duplicate_comparisons(self, ortho_table)
remove_duplicate_comparisons(self, prot_table)
# add the other columns to the ortho_table
ortholog_columns = pd.DataFrame(columns=['Prot_PID'])
# Overlap_Column = pd.DataFrame(columns=['Prot_Length_of_Overlap'])
ortho_table.insert(3, 'Prot_Length_of_Overlap', 'Below Threshold of ...')
# append new dataframe to ortho_table
ortho_table = pd.concat([ortho_table, ortholog_columns], axis=1)
ortho_table = ortho_table.replace(np.nan,'Below Threshold of ...', regex=True)
BLAST_Table = []
# to drop a row that has the same two exons
rows = int(ortho_table.size/6)
# loop through each row
for row in range (0, rows):
if ortho_table.iat[row, 0] == ortho_table.iat[row, 1]:
ortho_table.ix[row, 0] = np.nan
# remove each duplicate
ortho_table.dropna(axis=0,inplace=True)
ortho_table = ortho_table.reset_index(drop=True)
protrow = int(prot_table.size/4)
# loop through each row
for row in range (0, protrow):
if prot_table.iat[row, 0] == prot_table.iat[row, 1]:
prot_table.ix[row, 0] = np.nan
# remove each duplicate
prot_table.dropna(axis=0,inplace=True)
prot_table = prot_table.reset_index(drop=True)
# to remove rows in nuc table that have the same two exons in the opposite order
seen = set([])
rows_to_use = []
for i, row in ortho_table.iterrows():
exon1 = row['Exon']
exon2 = row['Exon2']
pair = tuple(sorted([exon1, exon2]))
if pair not in seen:
seen.update({pair})
rows_to_use.append(i)
ortho_table_no_duplicate_comparisons = ortho_table.loc[rows_to_use]
ortho_table_no_duplicate_comparisons = ortho_table_no_duplicate_comparisons.reset_index(drop=True)
ortho_table = ortho_table_no_duplicate_comparisons
# to remove rows in protein table that have the same two exons in the opposite order
seen = set([])
rows_to_use = []
for i, row in prot_table.iterrows():
exon1 = row['Exon']
exon2 = row['Exon2']
pair = tuple(sorted([exon1, exon2]))
if pair not in seen:
seen.update({pair})
rows_to_use.append(i)
prot_table_no_duplicate_comparisons = prot_table.loc[rows_to_use]
prot_table_no_duplicate_comparisons = prot_table_no_duplicate_comparisons.reset_index(drop=True)
prot_table = prot_table_no_duplicate_comparisons
rows = int(ortho_table.size/6)
protrow = int(prot_table.size/4)
for rowN in range (0, rows):
for rowP in range (0,protrow):
exon1n = ortho_table.iat[rowN, 0]
exon1p = prot_table.iat[rowP, 0]
exon2n = ortho_table.iat[rowN, 1]
exon2p = prot_table.iat[rowP, 1]
if ((exon1n == exon1p) & (exon2n == exon2p)):
# put protein pid in nuc table
ortho_table.iat[rowN, 5] = prot_table.iat[rowP, 3]
ortho_table.iat[rowN, 3] = prot_table.iat[rowP, 2]
# put na into protein table
prot_table.ix[rowP, 0] = np.nan
break
# remove each duplicate
prot_table.dropna(axis=0,inplace=True)
prot_table = prot_table.reset_index(drop=True)
# in prot_table, add col with 'Nuc_PID' between 'Length_of_Overlap' and 'Prot_PID'
prot_table.insert(2, 'Nuc_Length_of_Overlap', 'Below Threshold of ...')
prot_table.insert(4, 'Nuc_PID', 'Below Threshold of ...')
# add protein table at end
ortho_table = pd.concat([ortho_table, prot_table])
ortho_table = ortho_table.reset_index(drop=True)
# create new dataframe
Relationship_Column = pd.DataFrame(columns=['Relationship'])
# append new dataframe to ortho_table
ortho_table = pd.concat([ortho_table, Relationship_Column], axis=1)
# create BLAST_Table dataframe with exon ids
Ref_BLAST_Table = copy.deepcopy(ortho_table)
rows = int(Ref_BLAST_Table.size/7)
for row in range(0, rows):
Ref_BLAST_Table.ix[(row + rows), 0] = Ref_BLAST_Table.ix[row, 1]
cols = [1,2,3,4,5,6]
Ref_BLAST_Table.drop(Ref_BLAST_Table.columns[cols],axis=1,inplace=True)
Ref_BLAST_Table = Ref_BLAST_Table.drop_duplicates('Exon')
Ref_BLAST_Table = Ref_BLAST_Table.reset_index(drop=True)
# in form to do
# get data to fill in table
rows = int(Ref_BLAST_Table.size)
for exonCode in species1DB.features_of_type('CDS'):
Species1Code = str(exonCode['exon_id'])
Species1Code = separate(Species1Code)
Species1Code = Species1Code[0:7]
break
# loop through each exon and get its gene and length
for row in range(0, rows):
exon_ID = Ref_BLAST_Table.ix[row, 'Exon']
exon_ID2 = separate(exon_ID)
# determine which file to look in
if exon_ID2[0:7] == Species1Code:
for exon in species1DB.features_of_type('CDS'):
gffutilsexonid = str(exon['exon_id']) #gffutils id
gffutilsexonid = separate(gffutilsexonid)
if gffutilsexonid == exon_ID2:
exon_geneid = str(exon.attributes['gene_id'])
exon_geneid = separate(exon_geneid)
exon_transcriptid = str(exon.attributes['transcript_id'])
exon_transcriptid = separate(exon_transcriptid)
New_BLAST_Row = [exon_ID, ("{}{}{}{}").format(species1name,'(', species1version, ')'),
("{}:{}-{}:{}:{}".format(exon.chrom, exon.start, exon.stop,
exon.strand, exon.frame)), exon_geneid,
exon_transcriptid, exon.stop - exon.start + 1]
BLAST_Table.append(New_BLAST_Row)
else:
# if exon is mouse
for exon in species2DB.features_of_type('CDS'):
gffutilsexonid = str(exon['exon_id']) #gffutils id
gffutilsexonid = separate(gffutilsexonid)
if gffutilsexonid == exon_ID2:
exon_geneid = str(exon.attributes['gene_id'])
exon_geneid = separate(exon_geneid)
exon_transcriptid = str(exon.attributes['transcript_id'])
exon_transcriptid = separate(exon_transcriptid)
New_BLAST_Row = [exon_ID, ("{}{}{}{}").format(species2name,'(', species2version, ')'),
("{}:{}-{}:{}:{}".format(exon.chrom, exon.start, exon.stop,
exon.strand, exon.frame)), exon_geneid,
exon_transcriptid, exon.stop - exon.start + 1]
BLAST_Table.append(New_BLAST_Row)
BLAST_Table = pd.DataFrame(BLAST_Table)
BLAST_Table.columns = ['Exon', 'Species(Version)', 'Chrom:Start-Stop:Strand:Offset', 'Gene', 'Transcript', 'Exon_Length']
# fill in row for paralogous or orthologous
rows = int(ortho_table.size/7)
blastRows = int(BLAST_Table.size/6)
for row in range (0, rows):
# paralogous
if ((ortho_table.iat[row, 0])[0:7] != ortho_table.iat[row, 1])[0:7]:
ortho_table.ix[row, 'Relationship'] = 'Orthologous'
# orthologous
else:
exonOne = ortho_table.iat[row, 0]
exonTwo = ortho_table.iat[row, 1]
location_one = 0
location_two = 1
start1 = '0'
start2 = '1'
end1 = '0'
end2 = '1'
for innerrow in range (0, blastRows):
if exonOne == BLAST_Table.iat[innerrow, 0]:
location_one = BLAST_Table.iat[innerrow, 2]
start1 = splitstart(location_one)
end1 = splitend(location_one)
if exonTwo == BLAST_Table.iat[innerrow, 0]:
location_two = BLAST_Table.iat[innerrow, 2]
start2 = splitstart(location_two)
end2 = splitend(location_two)
if (((start1 >= start2) and (end2 >= end1)) or
((start2 >= start1) and (end1 >= end2)) or
((start2 >= start1) and (end2 >= end1) and
(start2 <= end1)) or
((start1 >= start2) and (end1 >= end2) and
(start1 <= end2))):
ortho_table.iat[row, 6] = 'Overlapping Genomic Loci'
if location_one == location_two:
ortho_table.iat[row, 6] = 'Identical Genomic Loci'
else:
ortho_table.iat[row, 6] = 'Paralogous'
# to save BLAST_Table
BLAST_Table.to_csv("BLAST_Table.csv", columns=['Exon', 'Species(Version)', 'Chrom:Start-Stop:Strand:Offset',
'Gene', 'Transcript', 'Exon_Length'], index=False)
# to save ortho_table
ortho_table.to_csv("ortho_table.csv", columns=['Exon', 'Exon2', 'Nuc_Length_of_Overlap', 'Prot_Length_of_Overlap',
'Nuc_PID', 'Prot_PID', 'Relationship'], index=False)
def create_blast_table(species1name, species2name, species1version, species2version):
Ortho_Table = pd.read_table('/Users/rhythmicstar/blast/db//nuctable.html',
names=['Exon', 'Exon2', 'Nuc_Length_of_Overlap', 'Nuc_PID'])
Prot_Table = pd.read_table('/Users/rhythmicstar/blast/db//protable.html',
names=['Exon', 'Exon2', 'Prot_Length_of_Overlap', 'Prot_PID'])
# add the other columns to the ortho_table
Ortholog_Columns = pd.DataFrame(columns=['Prot_PID'])
# Overlap_Column = pd.DataFrame(columns=['Prot_Length_of_Overlap'])
Ortho_Table.insert(3, 'Prot_Length_of_Overlap', 'Below Threshold of ...')
# append new dataframe to ortho_table
Ortho_Table = pd.concat([Ortho_Table, Ortholog_Columns], axis=1)
Ortho_Table = Ortho_Table.replace(np.nan,'Below Threshold of ...', regex=True)
BLAST_Table = []
#to drop a row that has the same two exons
rows = int(Ortho_Table.size/6)
#loop through each row
for row in range(0, rows):
ifOrtho_Table.iat[row, 0] == Ortho_Table.iat[row, 1]:
Ortho_Table.ix[row, 0] = np.nan
#remove each duplicate
Ortho_Table.dropna(axis=0,inplace=True)
Ortho_Table = Ortho_Table.reset_index(drop=True)
protrow = int(Prot_Table.size/4)
#loop through each row
for row in range(0, protrow):
ifProt_Table.iat[row, 0] == Prot_Table.iat[row, 1]:
Prot_Table.ix[row, 0] = np.nan
#remove each duplicate
Prot_Table.dropna(axis=0,inplace=True)
Prot_Table = Prot_Table.reset_index(drop=True)
#to remove rows in nuc table that have the same two exons in the opposite order
seen = set([])
rows_to_use = []
for i, row in Ortho_Table.iterrows():
exon1 = row['Exon']
exon2 = row['Exon2']
pair = tuple(sorted([exon1, exon2]))
if pair not in seen:
seen.update({pair})
rows_to_use.append(i)
ortho_table_no_duplicate_comparisons = Ortho_Table.loc[rows_to_use]
ortho_table_no_duplicate_comparisons = ortho_table_no_duplicate_comparisons.reset_index(drop=True)
Ortho_Table = ortho_table_no_duplicate_comparisons
#to remove rows in protein table that have the same two exons in the opposite order
seen = set([])
rows_to_use = []
for i, row in Prot_Table.iterrows():
exon1 = row['Exon']
exon2 = row['Exon2']
pair = tuple(sorted([exon1, exon2]))
if pair not in seen:
seen.update({pair})
rows_to_use.append(i)
prot_table_no_duplicate_comparisons = Prot_Table.loc[rows_to_use]
prot_table_no_duplicate_comparisons = prot_table_no_duplicate_comparisons.reset_index(drop=True)
Prot_Table = prot_table_no_duplicate_comparisons
rows = int(Ortho_Table.size/6)
protrow = int(Prot_Table.size/4)
for rowN in range (0, rows):
for rowP in range (0,protrow):
exon1n = Ortho_Table.iat[rowN, 0]
exon1p = Prot_Table.iat[rowP, 0]
exon2n = Ortho_Table.iat[rowN, 1]
exon2p = Prot_Table.iat[rowP, 1]
if exon1n == exon1p & exon2n == exon2p:
# put protein pid in nuc table
Ortho_Table.iat[rowN, 5] = Prot_Table.iat[rowP, 3]
Ortho_Table.iat[rowN, 3] = Prot_Table.iat[rowP, 2]
# put na into protein table
Prot_Table.ix[rowP, 0] = np.nan
break
# remove each duplicate
Prot_Table.dropna(axis=0,inplace=True)
Prot_Table = Prot_Table.reset_index(drop=True)
# in prot_table, add col with 'Nuc_PID' between 'Length_of_Overlap' and 'Prot_PID'
Prot_Table.insert(2, 'Nuc_Length_of_Overlap', 'Below Threshold of ...')
Prot_Table.insert(4, 'Nuc_PID', 'Below Threshold of ...')
# add protein table at end
Ortho_Table = pd.concat([Ortho_Table, Prot_Table])
Ortho_Table = Ortho_Table.reset_index(drop=True)
# create new dataframe
Relationship_Column = pd.DataFrame(columns=['Relationship'])
# append new dataframe to ortho_table
Ortho_Table = pd.concat([Ortho_Table, Relationship_Column], axis=1)
# create BLAST_Table dataframe with exon ids
Ref_BLAST_Table = copy.deepcopy(Ortho_Table)
rows = int(Ref_BLAST_Table.size/7)
for row in range(0, rows):
Ref_BLAST_Table.ix[(row + rows), 0] = Ref_BLAST_Table.ix[row, 1]
cols = [1,2,3,4,5,6]
Ref_BLAST_Table.drop(Ref_BLAST_Table.columns[cols],axis=1,inplace=True)
Ref_BLAST_Table = Ref_BLAST_Table.drop_duplicates('Exon')
Ref_BLAST_Table = Ref_BLAST_Table.reset_index(drop=True)
# in form to do
# get data to fill in table
rows = int(Ref_BLAST_Table.size)
for exonCode in species1DB.features_of_type('CDS'):
Species1Code = str(exonCode['exon_id'])
Species1Code = separate(Species1Code)
Species1Code = Species1Code[0:7]
break
# loop through each exon and get its gene and length
for row in range(0, rows):
exon_ID = Ref_BLAST_Table.ix[row, 'Exon']
exon_ID2 = separate(exon_ID)
# determine which file to look in
if exon_ID2[0:7] == Species1Code:
for exon in species1DB.features_of_type('CDS'):
gffutilsexonid = str(exon['exon_id']) #gffutils id
gffutilsexonid = separate(gffutilsexonid)
if gffutilsexonid == exon_ID2:
exon_geneid = str(exon.attributes['gene_id'])
exon_geneid = separate(exon_geneid)
exon_transcriptid = str(exon.attributes['transcript_id'])
exon_transcriptid = separate(exon_transcriptid)
New_BLAST_Row = [exon_ID, ("{}{}{}{}").format(species1name,'(', species1version, ')'),
("{}:{}-{}:{}:{}".format(exon.chrom, exon.start, exon.stop,
exon.strand, exon.frame)), exon_geneid,
exon_transcriptid, exon.stop - exon.start + 1]
BLAST_Table.append(New_BLAST_Row)
else:
# if exon is mouse
for exon in species2DB.features_of_type('CDS'):
gffutilsexonid = str(exon['exon_id']) #gffutils id
gffutilsexonid = separate(gffutilsexonid)
if gffutilsexonid == exon_ID2:
exon_geneid = str(exon.attributes['gene_id'])
exon_geneid = separate(exon_geneid)
exon_transcriptid = str(exon.attributes['transcript_id'])
exon_transcriptid = separate(exon_transcriptid)
New_BLAST_Row = [exon_ID, "{}{}{}{}".format(species2name,'(', species2version, ')'),
("{}:{}-{}:{}:{}".format(exon.chrom, exon.start, exon.stop,
exon.strand, exon.frame)), exon_geneid,
exon_transcriptid, exon.stop - exon.start + 1]
BLAST_Table.append(New_BLAST_Row)
BLAST_Table = pd.DataFrame(BLAST_Table)
BLAST_Table.columns = ['Exon', 'Species(Version)', 'Chrom:Start-Stop:Strand:Offset', 'Gene', 'Transcript', 'Exon_Length']
# fill in row for paralogous or orthologous
rows = int(Ortho_Table.size/7)
blastRows = int(BLAST_Table.size/6)
for row in range (0, rows):
# paralogous
if ((Ortho_Table.iat[row, 0])[0:7]) != (Ortho_Table.iat[row, 1])[0:7]):
Ortho_Table.ix[row, 'Relationship'] = 'Orthologous'
# orthologous
else:
exonOne = Ortho_Table.iat[row, 0]
exonTwo = Ortho_Table.iat[row, 1]
locationOne = 0
locationTwo = 1
start1 = '0'
start2 = '1'
end1 = '0'
end2 = '1'
for innerrow in range (0, blastRows):
if (exonOne == BLAST_Table.iat[innerrow, 0]):
locationOne = BLAST_Table.iat[innerrow, 2]
start1 = splitstart(locationOne)
end1 = splitend(locationOne)
if exonTwo == BLAST_Table.iat[innerrow, 0]:
locationTwo = BLAST_Table.iat[innerrow, 2]
start2 = splitstart(locationTwo)
end2 = splitend(locationTwo)
if (((start1 >= start2) and (end2 >= end1)) or
((start2 >= start1) and (end1 >= end2)) or
((start2 >= start1) and (end2 >= end1) and
(start2 <= end1)) or
((start1 >= start2) and (end1 >= end2) and
(start1 <= end2))):
Ortho_Table.iat[row, 6] = 'Overlapping Genomic Loci'
if locationOne == locationTwo:
Ortho_Table.iat[row, 6] = 'Identical Genomic Loci'
else:
Ortho_Table.iat[row, 6] = 'Paralogous'
#to save BLAST_Table
BLAST_Table.to_csv("BLAST_Table.csv", columns= ['Exon', 'Species(Version)', 'Chrom:Start-Stop:Strand:Offset',
'Gene', 'Transcript', 'Exon_Length'], index=False)
#to save ortho_table
Ortho_Table.to_csv("ortho_table.csv", columns= ['Exon', 'Exon2', 'Nuc_Length_of_Overlap', 'Prot_Length_of_Overlap',
'Nuc_PID', 'Prot_PID', 'Relationship'], index=False)
|
jessicalettes/orthoexon
|
orthoexon/table.py
|
Python
|
bsd-3-clause
| 28,805
|
[
"BLAST"
] |
d36eae21a62f5f0397853b00369f9eb6b0632575f2e9d3d8a8a1fb7b175a1906
|
from ase.structure import molecule
from ase.calculators.emt import EMT
from ase.optimize import QuasiNewton
from ase.vibrations import Vibrations
from ase.thermochemistry import IdealGasThermo
atoms = molecule('N2')
atoms.set_calculator(EMT())
dyn = QuasiNewton(atoms)
dyn.run(fmax=0.01)
electronicenergy = atoms.get_potential_energy()
vib = Vibrations(atoms)
vib.run()
vib_energies = vib.get_energies()
thermo = IdealGasThermo(vib_energies=vib_energies,
electronicenergy=electronicenergy,
atoms=atoms,
geometry='linear',
symmetrynumber=2, spin=0)
G = thermo.get_gibbs_energy(temperature=298.15, pressure=101325.)
|
askhl/ase
|
doc/ase/thermochemistry/nitrogen.py
|
Python
|
gpl-2.0
| 713
|
[
"ASE"
] |
a8d2e3542465bc69fa4231e492a2afb4582a8a6b652c161d4636907b33e2d17f
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
"""
This module provides classes for representing species substitution
probabilities.
"""
from six.moves import zip
__author__ = "Will Richards, Geoffroy Hautier"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "1.2"
__maintainer__ = "Will Richards"
__email__ = "wrichard@mit.edu"
__date__ = "Aug 31, 2012"
from collections import defaultdict
from operator import mul
from pymatgen import get_el_sp
from pymatgen.core.periodic_table import Specie
from monty.design_patterns import cached_class
import itertools
import json
import logging
import math
import os
import six
@cached_class
class SubstitutionProbability(object):
"""
This class finds substitution probabilities given lists of atoms
to substitute. The inputs make more sense if you look through the
from_defaults static method.
The substitution prediction algorithm is presented in:
Hautier, G., Fischer, C., Ehrlacher, V., Jain, A., and Ceder, G. (2011)
Data Mined Ionic Substitutions for the Discovery of New Compounds.
Inorganic Chemistry, 50(2), 656-663. doi:10.1021/ic102031h
Args:
lambda_table:
json table of the weight functions lambda if None,
will use the default lambda.json table
alpha:
weight function for never observed substitutions
"""
def __init__(self, lambda_table=None, alpha=-5):
if lambda_table is not None:
self._lambda_table = lambda_table
else:
module_dir = os.path.dirname(__file__)
json_file = os.path.join(module_dir, 'data', 'lambda.json')
with open(json_file) as f:
self._lambda_table = json.load(f)
#build map of specie pairs to lambdas
self.alpha = alpha
self._l = {}
self.species = set()
for row in self._lambda_table:
if 'D1+' not in row:
s1 = Specie.from_string(row[0])
s2 = Specie.from_string(row[1])
self.species.add(s1)
self.species.add(s2)
self._l[frozenset([s1, s2])] = float(row[2])
#create Z and px
self.Z = 0
self._px = defaultdict(float)
for s1, s2 in itertools.product(self.species, repeat=2):
value = math.exp(self.get_lambda(s1, s2))
self._px[s1] += value / 2
self._px[s2] += value / 2
self.Z += value
def get_lambda(self, s1, s2):
k = frozenset([get_el_sp(s1),
get_el_sp(s2)])
return self._l.get(k, self.alpha)
def get_px(self, sp):
return self._px[get_el_sp(sp)]
def prob(self, s1, s2):
"""
Gets the probability of 2 species substitution. Not used by the
structure predictor.
Returns:
Probability of s1 and s2 substitution.
"""
return math.exp(self.get_lambda(s1, s2)) / self.Z
def cond_prob(self, s1, s2):
"""
Conditional probability of substituting s1 for s2.
Args:
s1:
The *variable* specie
s2:
The *fixed* specie
Returns:
Conditional probability used by structure predictor.
"""
return math.exp(self.get_lambda(s1, s2)) / self.get_px(s2)
def pair_corr(self, s1, s2):
"""
Pair correlation of two species.
Returns:
The pair correlation of 2 species
"""
return math.exp(self.get_lambda(s1, s2)) * \
self.Z / (self.get_px(s1) * self.get_px(s2))
def cond_prob_list(self, l1, l2):
"""
Find the probabilities of 2 lists. These should include ALL species.
This is the probability conditional on l2
Args:
l1, l2:
lists of species
Returns:
The conditional probability (assuming these species are in
l2)
"""
assert len(l1) == len(l2)
p = 1
for s1, s2 in zip(l1, l2):
p *= self.cond_prob(s1, s2)
return p
def as_dict(self):
return {"name": self.__class__.__name__, "version": __version__,
"init_args": {"lambda_table": self._lambda_table,
"alpha": self._alpha},
"@module": self.__class__.__module__,
"@class": self.__class__.__name__}
@classmethod
def from_dict(cls, d):
return cls(**d['init_args'])
class SubstitutionPredictor(object):
"""
Predicts likely substitutions either to or from a given composition
or species list using the SubstitutionProbability
"""
def __init__(self, lambda_table=None, alpha=-5, threshold=1e-3):
self.p = SubstitutionProbability(lambda_table, alpha)
self.threshold = threshold
def list_prediction(self, species, to_this_composition = True):
"""
Args:
species:
list of species
to_this_composition:
If true, substitutions with this as a final composition
will be found. If false, substitutions with this as a
starting composition will be found (these are slightly
different)
Returns:
List of predictions in the form of dictionaries.
If to_this_composition is true, the values of the dictionary
will be from the list species. If false, the keys will be
from that list.
"""
for sp in species:
if get_el_sp(sp) not in self.p.species:
raise ValueError("the species {} is not allowed for the"
"probability model you are using".format(sp))
max_probabilities = []
for s1 in species:
if to_this_composition:
max_p = max([self.p.cond_prob(s2, s1) for s2 in self.p.species])
else:
max_p = max([self.p.cond_prob(s1, s2) for s2 in self.p.species])
max_probabilities.append(max_p)
output = []
def _recurse(output_prob, output_species):
best_case_prob = list(max_probabilities)
best_case_prob[:len(output_prob)] = output_prob
if six.moves.reduce(mul, best_case_prob) > self.threshold:
if len(output_species) == len(species):
odict = {
'probability': six.moves.reduce(mul, best_case_prob)}
if to_this_composition:
odict['substitutions'] = dict(
zip(output_species, species))
else:
odict['substitutions'] = dict(
zip(species, output_species))
if len(output_species) == len(set(output_species)):
output.append(odict)
return
for sp in self.p.species:
i = len(output_prob)
if to_this_composition:
prob = self.p.cond_prob(sp, species[i])
else:
prob = self.p.cond_prob(species[i], sp)
_recurse(output_prob + [prob], output_species + [sp])
_recurse([], [])
logging.info('{} substitutions found'.format(len(output)))
return output
def composition_prediction(self, composition, to_this_composition = True):
"""
Returns charged balanced substitutions from a starting or ending
composition.
Args:
composition:
starting or ending composition
to_this_composition:
If true, substitutions with this as a final composition
will be found. If false, substitutions with this as a
starting composition will be found (these are slightly
different)
Returns:
List of predictions in the form of dictionaries.
If to_this_composition is true, the values of the dictionary
will be from the list species. If false, the keys will be
from that list.
"""
preds = self.list_prediction(list(composition.keys()),
to_this_composition)
output = []
for p in preds:
if to_this_composition:
subs = {v:k for k, v in p['substitutions'].items()}
else:
subs = p['substitutions']
charge = 0
for k, v in composition.items():
charge += subs[k].oxi_state * v
if abs(charge) < 1e-8:
output.append(p)
logging.info('{} charge balanced substitutions found'
.format(len(output)))
return output
|
sonium0/pymatgen
|
pymatgen/structure_prediction/substitution_probability.py
|
Python
|
mit
| 9,015
|
[
"pymatgen"
] |
ab123671a5890610849b24ee7b023a96cf598fef2b3be995ac5c8802679790fb
|
# -*- coding: utf-8 -*-
"""Assemble a BEL graph as an `ideogram <https://github.com/eweitz/ideogram>`_ chart in HTML.."""
import random
from typing import Any, Mapping, Optional, TextIO
from IPython.display import Javascript
from pybel import BELGraph
from pybel.dsl import CentralDogma
from pybel.io.jinja_utils import build_template_renderer
__all__ = [
'to_html',
'to_html_file',
'to_html_path',
'to_jupyter',
]
COLUMNS = [
'start_position_on_the_genomic_accession',
'end_position_on_the_genomic_accession',
]
render_template = build_template_renderer(__file__)
def to_jupyter(graph: BELGraph, chart: Optional[str] = None) -> Javascript:
"""Render the graph as JavaScript in a Jupyter Notebook."""
context = _get_context(graph, chart=chart)
javascript_str = render_template('render_with_javascript.js', **context)
return Javascript(javascript_str)
def to_html(graph: BELGraph, chart: Optional[str] = None) -> str:
"""Render the graph as an HTML string.
Common usage may involve writing to a file like:
>>> from pybel.examples import sialic_acid_graph
>>> with open('ideogram_output.html', 'w') as file:
... print(to_html(sialic_acid_graph), file=file)
"""
context = _get_context(graph, chart=chart)
return render_template('index.html', **context)
def to_html_file(graph: BELGraph, file: Optional[TextIO] = None, chart: Optional[str] = None) -> None:
"""Write the graph as an HTML file."""
print(to_html(graph=graph, chart=chart), file=file)
def to_html_path(graph: BELGraph, path: str, chart: Optional[str] = None) -> None:
"""Write the graph as an HTML file."""
with open(path, 'w') as file:
to_html_file(graph=graph, file=file, chart=chart)
def _get_context(graph: BELGraph, chart: Optional[str] = None) -> Mapping[str, Any]:
annotations = list(prerender(graph).values())
return dict(
annotations=annotations,
title=graph.name or 'BEL Graph Information Density',
chart=chart or _generate_id(),
)
def _generate_id() -> str:
"""Generate a random string of letters."""
return ''.join(random.sample('abcdefghjkmopqrstuvqxyz', 16))
def prerender(graph: BELGraph, hgnc_manager=None) -> Mapping[str, Mapping[str, Any]]:
"""Generate the annotations JSON for Ideogram."""
import bio2bel_hgnc
from bio2bel_entrez.parser import get_human_refseq_slim_df
from bio2bel_hgnc.models import HumanGene
if hgnc_manager is None:
hgnc_manager = bio2bel_hgnc.Manager()
hgnc_symbols = {
node.name
for node in graph
if isinstance(node, CentralDogma) and node.namespace.lower() == 'hgnc'
}
refseq_df = get_human_refseq_slim_df()
result = {
hgnc_symbol: dict(name=hgnc_symbol, start=start, stop=stop)
for _, hgnc_symbol, start, stop in refseq_df[refseq_df['Symbol'].isin(hgnc_symbols)].values
}
human_genes = (
hgnc_manager
.session
.query(HumanGene.symbol, HumanGene.location)
.filter(HumanGene.symbol.in_(hgnc_symbols))
.all()
)
for human_gene in human_genes:
if human_gene.symbol not in result:
continue # something doesn't have a mapping in HGNC
result[human_gene.symbol]['chr'] = (
human_gene.location.split('q')[0]
if 'q' in human_gene.location else
human_gene.location.split('p')[0]
)
return result
|
pybel/pybel-tools
|
src/pybel_tools/assembler/ideogram/assembler.py
|
Python
|
mit
| 3,475
|
[
"Pybel"
] |
9f5ae26a81a6ed5003b68fce74796346ffe02261f78cce0397441cfc66c5084a
|
"""Tests for user-friendly public interface to polynomial functions. """
from sympy.polys.polytools import (
Poly, PurePoly, poly,
parallel_poly_from_expr,
degree, degree_list,
LC, LM, LT,
pdiv, prem, pquo, pexquo,
div, rem, quo, exquo,
half_gcdex, gcdex, invert,
subresultants,
resultant, discriminant,
terms_gcd, cofactors,
gcd, gcd_list,
lcm, lcm_list,
trunc,
monic, content, primitive,
compose, decompose,
sturm,
gff_list, gff,
sqf_norm, sqf_part, sqf_list, sqf,
factor_list, factor,
intervals, refine_root, count_roots,
real_roots, nroots, ground_roots,
nth_power_roots_poly,
cancel, reduced, groebner,
GroebnerBasis, is_zero_dimensional,
_torational_factor_list,
to_rational_coeffs)
from sympy.polys.polyerrors import (
MultivariatePolynomialError,
ExactQuotientFailed,
PolificationFailed,
ComputationFailed,
UnificationFailed,
RefinementFailed,
GeneratorsNeeded,
GeneratorsError,
PolynomialError,
CoercionFailed,
DomainError,
OptionError,
FlagError)
from sympy.polys.polyclasses import DMP
from sympy.polys.fields import field
from sympy.polys.domains import FF, ZZ, QQ, RR, EX
from sympy.polys.orderings import lex, grlex, grevlex
from sympy import (
S, Integer, Rational, Float, Mul, Symbol, sqrt, Piecewise,
exp, sin, tanh, expand, oo, I, pi, re, im, RootOf, Eq, Tuple, Expr)
from sympy.core.basic import _aresame
from sympy.core.compatibility import iterable
from sympy.core.mul import _keep_coeff
from sympy.utilities.pytest import raises, XFAIL
from sympy.abc import a, b, c, d, p, q, t, w, x, y, z
from sympy import MatrixSymbol
def _epsilon_eq(a, b):
for x, y in zip(a, b):
if abs(x - y) > 1e-10:
return False
return True
def _strict_eq(a, b):
if type(a) == type(b):
if iterable(a):
if len(a) == len(b):
return all(_strict_eq(c, d) for c, d in zip(a, b))
else:
return False
else:
return isinstance(a, Poly) and a.eq(b, strict=True)
else:
return False
def test_Poly_from_dict():
K = FF(3)
assert Poly.from_dict(
{0: 1, 1: 2}, gens=x, domain=K).rep == DMP([K(2), K(1)], K)
assert Poly.from_dict(
{0: 1, 1: 5}, gens=x, domain=K).rep == DMP([K(2), K(1)], K)
assert Poly.from_dict(
{(0,): 1, (1,): 2}, gens=x, domain=K).rep == DMP([K(2), K(1)], K)
assert Poly.from_dict(
{(0,): 1, (1,): 5}, gens=x, domain=K).rep == DMP([K(2), K(1)], K)
assert Poly.from_dict({(0, 0): 1, (1, 1): 2}, gens=(
x, y), domain=K).rep == DMP([[K(2), K(0)], [K(1)]], K)
assert Poly.from_dict({0: 1, 1: 2}, gens=x).rep == DMP([ZZ(2), ZZ(1)], ZZ)
assert Poly.from_dict(
{0: 1, 1: 2}, gens=x, field=True).rep == DMP([QQ(2), QQ(1)], QQ)
assert Poly.from_dict(
{0: 1, 1: 2}, gens=x, domain=ZZ).rep == DMP([ZZ(2), ZZ(1)], ZZ)
assert Poly.from_dict(
{0: 1, 1: 2}, gens=x, domain=QQ).rep == DMP([QQ(2), QQ(1)], QQ)
assert Poly.from_dict(
{(0,): 1, (1,): 2}, gens=x).rep == DMP([ZZ(2), ZZ(1)], ZZ)
assert Poly.from_dict(
{(0,): 1, (1,): 2}, gens=x, field=True).rep == DMP([QQ(2), QQ(1)], QQ)
assert Poly.from_dict(
{(0,): 1, (1,): 2}, gens=x, domain=ZZ).rep == DMP([ZZ(2), ZZ(1)], ZZ)
assert Poly.from_dict(
{(0,): 1, (1,): 2}, gens=x, domain=QQ).rep == DMP([QQ(2), QQ(1)], QQ)
assert Poly.from_dict({(1,): sin(y)}, gens=x, composite=False) == \
Poly(sin(y)*x, x, domain='EX')
assert Poly.from_dict({(1,): y}, gens=x, composite=False) == \
Poly(y*x, x, domain='EX')
assert Poly.from_dict({(1, 1): 1}, gens=(x, y), composite=False) == \
Poly(x*y, x, y, domain='ZZ')
assert Poly.from_dict({(1, 0): y}, gens=(x, z), composite=False) == \
Poly(y*x, x, z, domain='EX')
def test_Poly_from_list():
K = FF(3)
assert Poly.from_list([2, 1], gens=x, domain=K).rep == DMP([K(2), K(1)], K)
assert Poly.from_list([5, 1], gens=x, domain=K).rep == DMP([K(2), K(1)], K)
assert Poly.from_list([2, 1], gens=x).rep == DMP([ZZ(2), ZZ(1)], ZZ)
assert Poly.from_list([2, 1], gens=x, field=True).rep == DMP([QQ(2), QQ(1)], QQ)
assert Poly.from_list([2, 1], gens=x, domain=ZZ).rep == DMP([ZZ(2), ZZ(1)], ZZ)
assert Poly.from_list([2, 1], gens=x, domain=QQ).rep == DMP([QQ(2), QQ(1)], QQ)
assert Poly.from_list([0, 1.0], gens=x).rep == DMP([RR(1.0)], RR)
assert Poly.from_list([1.0, 0], gens=x).rep == DMP([RR(1.0), RR(0.0)], RR)
raises(MultivariatePolynomialError, lambda: Poly.from_list([[]], gens=(x, y)))
def test_Poly_from_poly():
f = Poly(x + 7, x, domain=ZZ)
g = Poly(x + 2, x, modulus=3)
h = Poly(x + y, x, y, domain=ZZ)
K = FF(3)
assert Poly.from_poly(f) == f
assert Poly.from_poly(f, domain=K).rep == DMP([K(1), K(1)], K)
assert Poly.from_poly(f, domain=ZZ).rep == DMP([1, 7], ZZ)
assert Poly.from_poly(f, domain=QQ).rep == DMP([1, 7], QQ)
assert Poly.from_poly(f, gens=x) == f
assert Poly.from_poly(f, gens=x, domain=K).rep == DMP([K(1), K(1)], K)
assert Poly.from_poly(f, gens=x, domain=ZZ).rep == DMP([1, 7], ZZ)
assert Poly.from_poly(f, gens=x, domain=QQ).rep == DMP([1, 7], QQ)
assert Poly.from_poly(f, gens=y) == Poly(x + 7, y, domain='ZZ[x]')
raises(CoercionFailed, lambda: Poly.from_poly(f, gens=y, domain=K))
raises(CoercionFailed, lambda: Poly.from_poly(f, gens=y, domain=ZZ))
raises(CoercionFailed, lambda: Poly.from_poly(f, gens=y, domain=QQ))
assert Poly.from_poly(f, gens=(x, y)) == Poly(x + 7, x, y, domain='ZZ')
assert Poly.from_poly(
f, gens=(x, y), domain=ZZ) == Poly(x + 7, x, y, domain='ZZ')
assert Poly.from_poly(
f, gens=(x, y), domain=QQ) == Poly(x + 7, x, y, domain='QQ')
assert Poly.from_poly(
f, gens=(x, y), modulus=3) == Poly(x + 7, x, y, domain='FF(3)')
K = FF(2)
assert Poly.from_poly(g) == g
assert Poly.from_poly(g, domain=ZZ).rep == DMP([1, -1], ZZ)
raises(CoercionFailed, lambda: Poly.from_poly(g, domain=QQ))
assert Poly.from_poly(g, domain=K).rep == DMP([K(1), K(0)], K)
assert Poly.from_poly(g, gens=x) == g
assert Poly.from_poly(g, gens=x, domain=ZZ).rep == DMP([1, -1], ZZ)
raises(CoercionFailed, lambda: Poly.from_poly(g, gens=x, domain=QQ))
assert Poly.from_poly(g, gens=x, domain=K).rep == DMP([K(1), K(0)], K)
K = FF(3)
assert Poly.from_poly(h) == h
assert Poly.from_poly(
h, domain=ZZ).rep == DMP([[ZZ(1)], [ZZ(1), ZZ(0)]], ZZ)
assert Poly.from_poly(
h, domain=QQ).rep == DMP([[QQ(1)], [QQ(1), QQ(0)]], QQ)
assert Poly.from_poly(h, domain=K).rep == DMP([[K(1)], [K(1), K(0)]], K)
assert Poly.from_poly(h, gens=x) == Poly(x + y, x, domain=ZZ[y])
raises(CoercionFailed, lambda: Poly.from_poly(h, gens=x, domain=ZZ))
assert Poly.from_poly(
h, gens=x, domain=ZZ[y]) == Poly(x + y, x, domain=ZZ[y])
raises(CoercionFailed, lambda: Poly.from_poly(h, gens=x, domain=QQ))
assert Poly.from_poly(
h, gens=x, domain=QQ[y]) == Poly(x + y, x, domain=QQ[y])
raises(CoercionFailed, lambda: Poly.from_poly(h, gens=x, modulus=3))
assert Poly.from_poly(h, gens=y) == Poly(x + y, y, domain=ZZ[x])
raises(CoercionFailed, lambda: Poly.from_poly(h, gens=y, domain=ZZ))
assert Poly.from_poly(
h, gens=y, domain=ZZ[x]) == Poly(x + y, y, domain=ZZ[x])
raises(CoercionFailed, lambda: Poly.from_poly(h, gens=y, domain=QQ))
assert Poly.from_poly(
h, gens=y, domain=QQ[x]) == Poly(x + y, y, domain=QQ[x])
raises(CoercionFailed, lambda: Poly.from_poly(h, gens=y, modulus=3))
assert Poly.from_poly(h, gens=(x, y)) == h
assert Poly.from_poly(
h, gens=(x, y), domain=ZZ).rep == DMP([[ZZ(1)], [ZZ(1), ZZ(0)]], ZZ)
assert Poly.from_poly(
h, gens=(x, y), domain=QQ).rep == DMP([[QQ(1)], [QQ(1), QQ(0)]], QQ)
assert Poly.from_poly(
h, gens=(x, y), domain=K).rep == DMP([[K(1)], [K(1), K(0)]], K)
assert Poly.from_poly(
h, gens=(y, x)).rep == DMP([[ZZ(1)], [ZZ(1), ZZ(0)]], ZZ)
assert Poly.from_poly(
h, gens=(y, x), domain=ZZ).rep == DMP([[ZZ(1)], [ZZ(1), ZZ(0)]], ZZ)
assert Poly.from_poly(
h, gens=(y, x), domain=QQ).rep == DMP([[QQ(1)], [QQ(1), QQ(0)]], QQ)
assert Poly.from_poly(
h, gens=(y, x), domain=K).rep == DMP([[K(1)], [K(1), K(0)]], K)
assert Poly.from_poly(
h, gens=(x, y), field=True).rep == DMP([[QQ(1)], [QQ(1), QQ(0)]], QQ)
assert Poly.from_poly(
h, gens=(x, y), field=True).rep == DMP([[QQ(1)], [QQ(1), QQ(0)]], QQ)
def test_Poly_from_expr():
raises(GeneratorsNeeded, lambda: Poly.from_expr(S(0)))
raises(GeneratorsNeeded, lambda: Poly.from_expr(S(7)))
F3 = FF(3)
assert Poly.from_expr(x + 5, domain=F3).rep == DMP([F3(1), F3(2)], F3)
assert Poly.from_expr(y + 5, domain=F3).rep == DMP([F3(1), F3(2)], F3)
assert Poly.from_expr(x + 5, x, domain=F3).rep == DMP([F3(1), F3(2)], F3)
assert Poly.from_expr(y + 5, y, domain=F3).rep == DMP([F3(1), F3(2)], F3)
assert Poly.from_expr(x + y, domain=F3).rep == DMP([[F3(1)], [F3(1), F3(0)]], F3)
assert Poly.from_expr(x + y, x, y, domain=F3).rep == DMP([[F3(1)], [F3(1), F3(0)]], F3)
assert Poly.from_expr(x + 5).rep == DMP([1, 5], ZZ)
assert Poly.from_expr(y + 5).rep == DMP([1, 5], ZZ)
assert Poly.from_expr(x + 5, x).rep == DMP([1, 5], ZZ)
assert Poly.from_expr(y + 5, y).rep == DMP([1, 5], ZZ)
assert Poly.from_expr(x + 5, domain=ZZ).rep == DMP([1, 5], ZZ)
assert Poly.from_expr(y + 5, domain=ZZ).rep == DMP([1, 5], ZZ)
assert Poly.from_expr(x + 5, x, domain=ZZ).rep == DMP([1, 5], ZZ)
assert Poly.from_expr(y + 5, y, domain=ZZ).rep == DMP([1, 5], ZZ)
assert Poly.from_expr(x + 5, x, y, domain=ZZ).rep == DMP([[1], [5]], ZZ)
assert Poly.from_expr(y + 5, x, y, domain=ZZ).rep == DMP([[1, 5]], ZZ)
def test_Poly__new__():
raises(GeneratorsError, lambda: Poly(x + 1, x, x))
raises(GeneratorsError, lambda: Poly(x + y, x, y, domain=ZZ[x]))
raises(GeneratorsError, lambda: Poly(x + y, x, y, domain=ZZ[y]))
raises(OptionError, lambda: Poly(x, x, symmetric=True))
raises(OptionError, lambda: Poly(x + 2, x, modulus=3, domain=QQ))
raises(OptionError, lambda: Poly(x + 2, x, domain=ZZ, gaussian=True))
raises(OptionError, lambda: Poly(x + 2, x, modulus=3, gaussian=True))
raises(OptionError, lambda: Poly(x + 2, x, domain=ZZ, extension=[sqrt(3)]))
raises(OptionError, lambda: Poly(x + 2, x, modulus=3, extension=[sqrt(3)]))
raises(OptionError, lambda: Poly(x + 2, x, domain=ZZ, extension=True))
raises(OptionError, lambda: Poly(x + 2, x, modulus=3, extension=True))
raises(OptionError, lambda: Poly(x + 2, x, domain=ZZ, greedy=True))
raises(OptionError, lambda: Poly(x + 2, x, domain=QQ, field=True))
raises(OptionError, lambda: Poly(x + 2, x, domain=ZZ, greedy=False))
raises(OptionError, lambda: Poly(x + 2, x, domain=QQ, field=False))
raises(NotImplementedError, lambda: Poly(x + 1, x, modulus=3, order='grlex'))
raises(NotImplementedError, lambda: Poly(x + 1, x, order='grlex'))
raises(GeneratorsNeeded, lambda: Poly({1: 2, 0: 1}))
raises(GeneratorsNeeded, lambda: Poly([2, 1]))
raises(GeneratorsNeeded, lambda: Poly((2, 1)))
raises(GeneratorsNeeded, lambda: Poly(1))
f = a*x**2 + b*x + c
assert Poly({2: a, 1: b, 0: c}, x) == f
assert Poly(iter([a, b, c]), x) == f
assert Poly([a, b, c], x) == f
assert Poly((a, b, c), x) == f
f = Poly({}, x, y, z)
assert f.gens == (x, y, z) and f.as_expr() == 0
assert Poly(Poly(a*x + b*y, x, y), x) == Poly(a*x + b*y, x)
assert Poly(3*x**2 + 2*x + 1, domain='ZZ').all_coeffs() == [3, 2, 1]
assert Poly(3*x**2 + 2*x + 1, domain='QQ').all_coeffs() == [3, 2, 1]
assert Poly(3*x**2 + 2*x + 1, domain='RR').all_coeffs() == [3.0, 2.0, 1.0]
raises(CoercionFailed, lambda: Poly(3*x**2/5 + 2*x/5 + 1, domain='ZZ'))
assert Poly(
3*x**2/5 + 2*x/5 + 1, domain='QQ').all_coeffs() == [S(3)/5, S(2)/5, 1]
assert _epsilon_eq(
Poly(3*x**2/5 + 2*x/5 + 1, domain='RR').all_coeffs(), [0.6, 0.4, 1.0])
assert Poly(3.0*x**2 + 2.0*x + 1, domain='ZZ').all_coeffs() == [3, 2, 1]
assert Poly(3.0*x**2 + 2.0*x + 1, domain='QQ').all_coeffs() == [3, 2, 1]
assert Poly(
3.0*x**2 + 2.0*x + 1, domain='RR').all_coeffs() == [3.0, 2.0, 1.0]
raises(CoercionFailed, lambda: Poly(3.1*x**2 + 2.1*x + 1, domain='ZZ'))
assert Poly(3.1*x**2 + 2.1*x + 1, domain='QQ').all_coeffs() == [S(31)/10, S(21)/10, 1]
assert Poly(3.1*x**2 + 2.1*x + 1, domain='RR').all_coeffs() == [3.1, 2.1, 1.0]
assert Poly({(2, 1): 1, (1, 2): 2, (1, 1): 3}, x, y) == \
Poly(x**2*y + 2*x*y**2 + 3*x*y, x, y)
assert Poly(x**2 + 1, extension=I).get_domain() == QQ.algebraic_field(I)
f = 3*x**5 - x**4 + x**3 - x** 2 + 65538
assert Poly(f, x, modulus=65537, symmetric=True) == \
Poly(3*x**5 - x**4 + x**3 - x** 2 + 1, x, modulus=65537,
symmetric=True)
assert Poly(f, x, modulus=65537, symmetric=False) == \
Poly(3*x**5 + 65536*x**4 + x**3 + 65536*x** 2 + 1, x,
modulus=65537, symmetric=False)
assert Poly(x**2 + x + 1.0).get_domain() == RR
def test_Poly__args():
assert Poly(x**2 + 1).args == (x**2 + 1,)
def test_Poly__gens():
assert Poly((x - p)*(x - q), x).gens == (x,)
assert Poly((x - p)*(x - q), p).gens == (p,)
assert Poly((x - p)*(x - q), q).gens == (q,)
assert Poly((x - p)*(x - q), x, p).gens == (x, p)
assert Poly((x - p)*(x - q), x, q).gens == (x, q)
assert Poly((x - p)*(x - q), x, p, q).gens == (x, p, q)
assert Poly((x - p)*(x - q), p, x, q).gens == (p, x, q)
assert Poly((x - p)*(x - q), p, q, x).gens == (p, q, x)
assert Poly((x - p)*(x - q)).gens == (x, p, q)
assert Poly((x - p)*(x - q), sort='x > p > q').gens == (x, p, q)
assert Poly((x - p)*(x - q), sort='p > x > q').gens == (p, x, q)
assert Poly((x - p)*(x - q), sort='p > q > x').gens == (p, q, x)
assert Poly((x - p)*(x - q), x, p, q, sort='p > q > x').gens == (x, p, q)
assert Poly((x - p)*(x - q), wrt='x').gens == (x, p, q)
assert Poly((x - p)*(x - q), wrt='p').gens == (p, x, q)
assert Poly((x - p)*(x - q), wrt='q').gens == (q, x, p)
assert Poly((x - p)*(x - q), wrt=x).gens == (x, p, q)
assert Poly((x - p)*(x - q), wrt=p).gens == (p, x, q)
assert Poly((x - p)*(x - q), wrt=q).gens == (q, x, p)
assert Poly((x - p)*(x - q), x, p, q, wrt='p').gens == (x, p, q)
assert Poly((x - p)*(x - q), wrt='p', sort='q > x').gens == (p, q, x)
assert Poly((x - p)*(x - q), wrt='q', sort='p > x').gens == (q, p, x)
def test_Poly_zero():
assert Poly(x).zero == Poly(0, x, domain=ZZ)
assert Poly(x/2).zero == Poly(0, x, domain=QQ)
def test_Poly_one():
assert Poly(x).one == Poly(1, x, domain=ZZ)
assert Poly(x/2).one == Poly(1, x, domain=QQ)
def test_Poly__unify():
raises(UnificationFailed, lambda: Poly(x)._unify(y))
F3 = FF(3)
F5 = FF(5)
assert Poly(x, x, modulus=3)._unify(Poly(y, y, modulus=3))[2:] == (
DMP([[F3(1)], []], F3), DMP([[F3(1), F3(0)]], F3))
assert Poly(x, x, modulus=3)._unify(Poly(y, y, modulus=5))[2:] == (
DMP([[F5(1)], []], F5), DMP([[F5(1), F5(0)]], F5))
assert Poly(y, x, y)._unify(Poly(x, x, modulus=3))[2:] == (DMP([[F3(1), F3(0)]], F3), DMP([[F3(1)], []], F3))
assert Poly(x, x, modulus=3)._unify(Poly(y, x, y))[2:] == (DMP([[F3(1)], []], F3), DMP([[F3(1), F3(0)]], F3))
assert Poly(x + 1, x)._unify(Poly(x + 2, x))[2:] == (DMP([1, 1], ZZ), DMP([1, 2], ZZ))
assert Poly(x + 1, x, domain='QQ')._unify(Poly(x + 2, x))[2:] == (DMP([1, 1], QQ), DMP([1, 2], QQ))
assert Poly(x + 1, x)._unify(Poly(x + 2, x, domain='QQ'))[2:] == (DMP([1, 1], QQ), DMP([1, 2], QQ))
assert Poly(x + 1, x)._unify(Poly(x + 2, x, y))[2:] == (DMP([[1], [1]], ZZ), DMP([[1], [2]], ZZ))
assert Poly(x + 1, x, domain='QQ')._unify(Poly(x + 2, x, y))[2:] == (DMP([[1], [1]], QQ), DMP([[1], [2]], QQ))
assert Poly(x + 1, x)._unify(Poly(x + 2, x, y, domain='QQ'))[2:] == (DMP([[1], [1]], QQ), DMP([[1], [2]], QQ))
assert Poly(x + 1, x, y)._unify(Poly(x + 2, x))[2:] == (DMP([[1], [1]], ZZ), DMP([[1], [2]], ZZ))
assert Poly(x + 1, x, y, domain='QQ')._unify(Poly(x + 2, x))[2:] == (DMP([[1], [1]], QQ), DMP([[1], [2]], QQ))
assert Poly(x + 1, x, y)._unify(Poly(x + 2, x, domain='QQ'))[2:] == (DMP([[1], [1]], QQ), DMP([[1], [2]], QQ))
assert Poly(x + 1, x, y)._unify(Poly(x + 2, x, y))[2:] == (DMP([[1], [1]], ZZ), DMP([[1], [2]], ZZ))
assert Poly(x + 1, x, y, domain='QQ')._unify(Poly(x + 2, x, y))[2:] == (DMP([[1], [1]], QQ), DMP([[1], [2]], QQ))
assert Poly(x + 1, x, y)._unify(Poly(x + 2, x, y, domain='QQ'))[2:] == (DMP([[1], [1]], QQ), DMP([[1], [2]], QQ))
assert Poly(x + 1, x)._unify(Poly(x + 2, y, x))[2:] == (DMP([[1, 1]], ZZ), DMP([[1, 2]], ZZ))
assert Poly(x + 1, x, domain='QQ')._unify(Poly(x + 2, y, x))[2:] == (DMP([[1, 1]], QQ), DMP([[1, 2]], QQ))
assert Poly(x + 1, x)._unify(Poly(x + 2, y, x, domain='QQ'))[2:] == (DMP([[1, 1]], QQ), DMP([[1, 2]], QQ))
assert Poly(x + 1, y, x)._unify(Poly(x + 2, x))[2:] == (DMP([[1, 1]], ZZ), DMP([[1, 2]], ZZ))
assert Poly(x + 1, y, x, domain='QQ')._unify(Poly(x + 2, x))[2:] == (DMP([[1, 1]], QQ), DMP([[1, 2]], QQ))
assert Poly(x + 1, y, x)._unify(Poly(x + 2, x, domain='QQ'))[2:] == (DMP([[1, 1]], QQ), DMP([[1, 2]], QQ))
assert Poly(x + 1, x, y)._unify(Poly(x + 2, y, x))[2:] == (DMP([[1], [1]], ZZ), DMP([[1], [2]], ZZ))
assert Poly(x + 1, x, y, domain='QQ')._unify(Poly(x + 2, y, x))[2:] == (DMP([[1], [1]], QQ), DMP([[1], [2]], QQ))
assert Poly(x + 1, x, y)._unify(Poly(x + 2, y, x, domain='QQ'))[2:] == (DMP([[1], [1]], QQ), DMP([[1], [2]], QQ))
assert Poly(x + 1, y, x)._unify(Poly(x + 2, x, y))[2:] == (DMP([[1, 1]], ZZ), DMP([[1, 2]], ZZ))
assert Poly(x + 1, y, x, domain='QQ')._unify(Poly(x + 2, x, y))[2:] == (DMP([[1, 1]], QQ), DMP([[1, 2]], QQ))
assert Poly(x + 1, y, x)._unify(Poly(x + 2, x, y, domain='QQ'))[2:] == (DMP([[1, 1]], QQ), DMP([[1, 2]], QQ))
F, A, B = field("a,b", ZZ)
assert Poly(a*x, x, domain='ZZ[a]')._unify(Poly(a*b*x, x, domain='ZZ(a,b)'))[2:] == \
(DMP([A, F(0)], F.to_domain()), DMP([A*B, F(0)], F.to_domain()))
assert Poly(a*x, x, domain='ZZ(a)')._unify(Poly(a*b*x, x, domain='ZZ(a,b)'))[2:] == \
(DMP([A, F(0)], F.to_domain()), DMP([A*B, F(0)], F.to_domain()))
raises(CoercionFailed, lambda: Poly(Poly(x**2 + x**2*z, y, field=True), domain='ZZ(x)'))
f = Poly(t**2 + t/3 + x, t, domain='QQ(x)')
g = Poly(t**2 + t/3 + x, t, domain='QQ[x]')
assert f._unify(g)[2:] == (f.rep, f.rep)
def test_Poly_free_symbols():
assert Poly(x**2 + 1).free_symbols == set([x])
assert Poly(x**2 + y*z).free_symbols == set([x, y, z])
assert Poly(x**2 + y*z, x).free_symbols == set([x, y, z])
assert Poly(x**2 + sin(y*z)).free_symbols == set([x, y, z])
assert Poly(x**2 + sin(y*z), x).free_symbols == set([x, y, z])
assert Poly(x**2 + sin(y*z), x, domain=EX).free_symbols == set([x, y, z])
def test_PurePoly_free_symbols():
assert PurePoly(x**2 + 1).free_symbols == set([])
assert PurePoly(x**2 + y*z).free_symbols == set([])
assert PurePoly(x**2 + y*z, x).free_symbols == set([y, z])
assert PurePoly(x**2 + sin(y*z)).free_symbols == set([])
assert PurePoly(x**2 + sin(y*z), x).free_symbols == set([y, z])
assert PurePoly(x**2 + sin(y*z), x, domain=EX).free_symbols == set([y, z])
def test_Poly__eq__():
assert (Poly(x, x) == Poly(x, x)) is True
assert (Poly(x, x, domain=QQ) == Poly(x, x)) is True
assert (Poly(x, x) == Poly(x, x, domain=QQ)) is True
assert (Poly(x, x, domain=ZZ[a]) == Poly(x, x)) is True
assert (Poly(x, x) == Poly(x, x, domain=ZZ[a])) is True
assert (Poly(x*y, x, y) == Poly(x, x)) is False
assert (Poly(x, x, y) == Poly(x, x)) is False
assert (Poly(x, x) == Poly(x, x, y)) is False
assert (Poly(x**2 + 1, x) == Poly(y**2 + 1, y)) is False
assert (Poly(y**2 + 1, y) == Poly(x**2 + 1, x)) is False
f = Poly(x, x, domain=ZZ)
g = Poly(x, x, domain=QQ)
assert f.eq(g) is True
assert f.ne(g) is False
assert f.eq(g, strict=True) is False
assert f.ne(g, strict=True) is True
t0 = Symbol('t0')
f = Poly((t0/2 + x**2)*t**2 - x**2*t, t, domain='QQ[x,t0]')
g = Poly((t0/2 + x**2)*t**2 - x**2*t, t, domain='ZZ(x,t0)')
assert (f == g) is True
def test_PurePoly__eq__():
assert (PurePoly(x, x) == PurePoly(x, x)) is True
assert (PurePoly(x, x, domain=QQ) == PurePoly(x, x)) is True
assert (PurePoly(x, x) == PurePoly(x, x, domain=QQ)) is True
assert (PurePoly(x, x, domain=ZZ[a]) == PurePoly(x, x)) is True
assert (PurePoly(x, x) == PurePoly(x, x, domain=ZZ[a])) is True
assert (PurePoly(x*y, x, y) == PurePoly(x, x)) is False
assert (PurePoly(x, x, y) == PurePoly(x, x)) is False
assert (PurePoly(x, x) == PurePoly(x, x, y)) is False
assert (PurePoly(x**2 + 1, x) == PurePoly(y**2 + 1, y)) is True
assert (PurePoly(y**2 + 1, y) == PurePoly(x**2 + 1, x)) is True
f = PurePoly(x, x, domain=ZZ)
g = PurePoly(x, x, domain=QQ)
assert f.eq(g) is True
assert f.ne(g) is False
assert f.eq(g, strict=True) is False
assert f.ne(g, strict=True) is True
f = PurePoly(x, x, domain=ZZ)
g = PurePoly(y, y, domain=QQ)
assert f.eq(g) is True
assert f.ne(g) is False
assert f.eq(g, strict=True) is False
assert f.ne(g, strict=True) is True
def test_PurePoly_Poly():
assert isinstance(PurePoly(Poly(x**2 + 1)), PurePoly) is True
assert isinstance(Poly(PurePoly(x**2 + 1)), Poly) is True
def test_Poly_get_domain():
assert Poly(2*x).get_domain() == ZZ
assert Poly(2*x, domain='ZZ').get_domain() == ZZ
assert Poly(2*x, domain='QQ').get_domain() == QQ
assert Poly(x/2).get_domain() == QQ
raises(CoercionFailed, lambda: Poly(x/2, domain='ZZ'))
assert Poly(x/2, domain='QQ').get_domain() == QQ
assert Poly(0.2*x).get_domain() == RR
def test_Poly_set_domain():
assert Poly(2*x + 1).set_domain(ZZ) == Poly(2*x + 1)
assert Poly(2*x + 1).set_domain('ZZ') == Poly(2*x + 1)
assert Poly(2*x + 1).set_domain(QQ) == Poly(2*x + 1, domain='QQ')
assert Poly(2*x + 1).set_domain('QQ') == Poly(2*x + 1, domain='QQ')
assert Poly(S(2)/10*x + S(1)/10).set_domain('RR') == Poly(0.2*x + 0.1)
assert Poly(0.2*x + 0.1).set_domain('QQ') == Poly(S(2)/10*x + S(1)/10)
raises(CoercionFailed, lambda: Poly(x/2 + 1).set_domain(ZZ))
raises(CoercionFailed, lambda: Poly(x + 1, modulus=2).set_domain(QQ))
raises(GeneratorsError, lambda: Poly(x*y, x, y).set_domain(ZZ[y]))
def test_Poly_get_modulus():
assert Poly(x**2 + 1, modulus=2).get_modulus() == 2
raises(PolynomialError, lambda: Poly(x**2 + 1).get_modulus())
def test_Poly_set_modulus():
assert Poly(
x**2 + 1, modulus=2).set_modulus(7) == Poly(x**2 + 1, modulus=7)
assert Poly(
x**2 + 5, modulus=7).set_modulus(2) == Poly(x**2 + 1, modulus=2)
assert Poly(x**2 + 1).set_modulus(2) == Poly(x**2 + 1, modulus=2)
raises(CoercionFailed, lambda: Poly(x/2 + 1).set_modulus(2))
def test_Poly_add_ground():
assert Poly(x + 1).add_ground(2) == Poly(x + 3)
def test_Poly_sub_ground():
assert Poly(x + 1).sub_ground(2) == Poly(x - 1)
def test_Poly_mul_ground():
assert Poly(x + 1).mul_ground(2) == Poly(2*x + 2)
def test_Poly_quo_ground():
assert Poly(2*x + 4).quo_ground(2) == Poly(x + 2)
assert Poly(2*x + 3).quo_ground(2) == Poly(x + 1)
def test_Poly_exquo_ground():
assert Poly(2*x + 4).exquo_ground(2) == Poly(x + 2)
raises(ExactQuotientFailed, lambda: Poly(2*x + 3).exquo_ground(2))
def test_Poly_abs():
assert Poly(-x + 1, x).abs() == abs(Poly(-x + 1, x)) == Poly(x + 1, x)
def test_Poly_neg():
assert Poly(-x + 1, x).neg() == -Poly(-x + 1, x) == Poly(x - 1, x)
def test_Poly_add():
assert Poly(0, x).add(Poly(0, x)) == Poly(0, x)
assert Poly(0, x) + Poly(0, x) == Poly(0, x)
assert Poly(1, x).add(Poly(0, x)) == Poly(1, x)
assert Poly(1, x, y) + Poly(0, x) == Poly(1, x, y)
assert Poly(0, x).add(Poly(1, x, y)) == Poly(1, x, y)
assert Poly(0, x, y) + Poly(1, x, y) == Poly(1, x, y)
assert Poly(1, x) + x == Poly(x + 1, x)
assert Poly(1, x) + sin(x) == 1 + sin(x)
assert Poly(x, x) + 1 == Poly(x + 1, x)
assert 1 + Poly(x, x) == Poly(x + 1, x)
def test_Poly_sub():
assert Poly(0, x).sub(Poly(0, x)) == Poly(0, x)
assert Poly(0, x) - Poly(0, x) == Poly(0, x)
assert Poly(1, x).sub(Poly(0, x)) == Poly(1, x)
assert Poly(1, x, y) - Poly(0, x) == Poly(1, x, y)
assert Poly(0, x).sub(Poly(1, x, y)) == Poly(-1, x, y)
assert Poly(0, x, y) - Poly(1, x, y) == Poly(-1, x, y)
assert Poly(1, x) - x == Poly(1 - x, x)
assert Poly(1, x) - sin(x) == 1 - sin(x)
assert Poly(x, x) - 1 == Poly(x - 1, x)
assert 1 - Poly(x, x) == Poly(1 - x, x)
def test_Poly_mul():
assert Poly(0, x).mul(Poly(0, x)) == Poly(0, x)
assert Poly(0, x) * Poly(0, x) == Poly(0, x)
assert Poly(2, x).mul(Poly(4, x)) == Poly(8, x)
assert Poly(2, x, y) * Poly(4, x) == Poly(8, x, y)
assert Poly(4, x).mul(Poly(2, x, y)) == Poly(8, x, y)
assert Poly(4, x, y) * Poly(2, x, y) == Poly(8, x, y)
assert Poly(1, x) * x == Poly(x, x)
assert Poly(1, x) * sin(x) == sin(x)
assert Poly(x, x) * 2 == Poly(2*x, x)
assert 2 * Poly(x, x) == Poly(2*x, x)
def test_Poly_sqr():
assert Poly(x*y, x, y).sqr() == Poly(x**2*y**2, x, y)
def test_Poly_pow():
assert Poly(x, x).pow(10) == Poly(x**10, x)
assert Poly(x, x).pow(Integer(10)) == Poly(x**10, x)
assert Poly(2*y, x, y).pow(4) == Poly(16*y**4, x, y)
assert Poly(2*y, x, y).pow(Integer(4)) == Poly(16*y**4, x, y)
assert Poly(7*x*y, x, y)**3 == Poly(343*x**3*y**3, x, y)
assert Poly(x*y + 1, x, y)**(-1) == (x*y + 1)**(-1)
assert Poly(x*y + 1, x, y)**x == (x*y + 1)**x
def test_Poly_divmod():
f, g = Poly(x**2), Poly(x)
q, r = g, Poly(0, x)
assert divmod(f, g) == (q, r)
assert f // g == q
assert f % g == r
assert divmod(f, x) == (q, r)
assert f // x == q
assert f % x == r
q, r = Poly(0, x), Poly(2, x)
assert divmod(2, g) == (q, r)
assert 2 // g == q
assert 2 % g == r
assert Poly(x)/Poly(x) == 1
assert Poly(x**2)/Poly(x) == x
assert Poly(x)/Poly(x**2) == 1/x
def test_Poly_eq_ne():
assert (Poly(x + y, x, y) == Poly(x + y, x, y)) is True
assert (Poly(x + y, x) == Poly(x + y, x, y)) is False
assert (Poly(x + y, x, y) == Poly(x + y, x)) is False
assert (Poly(x + y, x) == Poly(x + y, x)) is True
assert (Poly(x + y, y) == Poly(x + y, y)) is True
assert (Poly(x + y, x, y) == x + y) is True
assert (Poly(x + y, x) == x + y) is True
assert (Poly(x + y, x, y) == x + y) is True
assert (Poly(x + y, x) == x + y) is True
assert (Poly(x + y, y) == x + y) is True
assert (Poly(x + y, x, y) != Poly(x + y, x, y)) is False
assert (Poly(x + y, x) != Poly(x + y, x, y)) is True
assert (Poly(x + y, x, y) != Poly(x + y, x)) is True
assert (Poly(x + y, x) != Poly(x + y, x)) is False
assert (Poly(x + y, y) != Poly(x + y, y)) is False
assert (Poly(x + y, x, y) != x + y) is False
assert (Poly(x + y, x) != x + y) is False
assert (Poly(x + y, x, y) != x + y) is False
assert (Poly(x + y, x) != x + y) is False
assert (Poly(x + y, y) != x + y) is False
assert (Poly(x, x) == sin(x)) is False
assert (Poly(x, x) != sin(x)) is True
def test_Poly_nonzero():
assert not bool(Poly(0, x)) is True
assert not bool(Poly(1, x)) is False
def test_Poly_properties():
assert Poly(0, x).is_zero is True
assert Poly(1, x).is_zero is False
assert Poly(1, x).is_one is True
assert Poly(2, x).is_one is False
assert Poly(x - 1, x).is_sqf is True
assert Poly((x - 1)**2, x).is_sqf is False
assert Poly(x - 1, x).is_monic is True
assert Poly(2*x - 1, x).is_monic is False
assert Poly(3*x + 2, x).is_primitive is True
assert Poly(4*x + 2, x).is_primitive is False
assert Poly(1, x).is_ground is True
assert Poly(x, x).is_ground is False
assert Poly(x + y + z + 1).is_linear is True
assert Poly(x*y*z + 1).is_linear is False
assert Poly(x*y + z + 1).is_quadratic is True
assert Poly(x*y*z + 1).is_quadratic is False
assert Poly(x*y).is_monomial is True
assert Poly(x*y + 1).is_monomial is False
assert Poly(x**2 + x*y).is_homogeneous is True
assert Poly(x**3 + x*y).is_homogeneous is False
assert Poly(x).is_univariate is True
assert Poly(x*y).is_univariate is False
assert Poly(x*y).is_multivariate is True
assert Poly(x).is_multivariate is False
assert Poly(
x**16 + x**14 - x**10 + x**8 - x**6 + x**2 + 1).is_cyclotomic is False
assert Poly(
x**16 + x**14 - x**10 - x**8 - x**6 + x**2 + 1).is_cyclotomic is True
def test_Poly_is_irreducible():
assert Poly(x**2 + x + 1).is_irreducible is True
assert Poly(x**2 + 2*x + 1).is_irreducible is False
assert Poly(7*x + 3, modulus=11).is_irreducible is True
assert Poly(7*x**2 + 3*x + 1, modulus=11).is_irreducible is False
def test_Poly_subs():
assert Poly(x + 1).subs(x, 0) == 1
assert Poly(x + 1).subs(x, x) == Poly(x + 1)
assert Poly(x + 1).subs(x, y) == Poly(y + 1)
assert Poly(x*y, x).subs(y, x) == x**2
assert Poly(x*y, x).subs(x, y) == y**2
def test_Poly_replace():
assert Poly(x + 1).replace(x) == Poly(x + 1)
assert Poly(x + 1).replace(y) == Poly(y + 1)
raises(PolynomialError, lambda: Poly(x + y).replace(z))
assert Poly(x + 1).replace(x, x) == Poly(x + 1)
assert Poly(x + 1).replace(x, y) == Poly(y + 1)
assert Poly(x + y).replace(x, x) == Poly(x + y)
assert Poly(x + y).replace(x, z) == Poly(z + y, z, y)
assert Poly(x + y).replace(y, y) == Poly(x + y)
assert Poly(x + y).replace(y, z) == Poly(x + z, x, z)
raises(PolynomialError, lambda: Poly(x + y).replace(x, y))
raises(PolynomialError, lambda: Poly(x + y).replace(z, t))
assert Poly(x + y, x).replace(x, z) == Poly(z + y, z)
assert Poly(x + y, y).replace(y, z) == Poly(x + z, z)
raises(PolynomialError, lambda: Poly(x + y, x).replace(x, y))
raises(PolynomialError, lambda: Poly(x + y, y).replace(y, x))
def test_Poly_reorder():
raises(PolynomialError, lambda: Poly(x + y).reorder(x, z))
assert Poly(x + y, x, y).reorder(x, y) == Poly(x + y, x, y)
assert Poly(x + y, x, y).reorder(y, x) == Poly(x + y, y, x)
assert Poly(x + y, y, x).reorder(x, y) == Poly(x + y, x, y)
assert Poly(x + y, y, x).reorder(y, x) == Poly(x + y, y, x)
assert Poly(x + y, x, y).reorder(wrt=x) == Poly(x + y, x, y)
assert Poly(x + y, x, y).reorder(wrt=y) == Poly(x + y, y, x)
def test_Poly_ltrim():
f = Poly(y**2 + y*z**2, x, y, z).ltrim(y)
assert f.as_expr() == y**2 + y*z**2 and f.gens == (y, z)
raises(PolynomialError, lambda: Poly(x*y**2 + y**2, x, y).ltrim(y))
def test_Poly_has_only_gens():
assert Poly(x*y + 1, x, y, z).has_only_gens(x, y) is True
assert Poly(x*y + z, x, y, z).has_only_gens(x, y) is False
raises(GeneratorsError, lambda: Poly(x*y**2 + y**2, x, y).has_only_gens(t))
def test_Poly_to_ring():
assert Poly(2*x + 1, domain='ZZ').to_ring() == Poly(2*x + 1, domain='ZZ')
assert Poly(2*x + 1, domain='QQ').to_ring() == Poly(2*x + 1, domain='ZZ')
raises(CoercionFailed, lambda: Poly(x/2 + 1).to_ring())
raises(DomainError, lambda: Poly(2*x + 1, modulus=3).to_ring())
def test_Poly_to_field():
assert Poly(2*x + 1, domain='ZZ').to_field() == Poly(2*x + 1, domain='QQ')
assert Poly(2*x + 1, domain='QQ').to_field() == Poly(2*x + 1, domain='QQ')
assert Poly(x/2 + 1, domain='QQ').to_field() == Poly(x/2 + 1, domain='QQ')
assert Poly(2*x + 1, modulus=3).to_field() == Poly(2*x + 1, modulus=3)
assert Poly(2.0*x + 1.0).to_field() == Poly(2.0*x + 1.0)
def test_Poly_to_exact():
assert Poly(2*x).to_exact() == Poly(2*x)
assert Poly(x/2).to_exact() == Poly(x/2)
assert Poly(0.1*x).to_exact() == Poly(x/10)
def test_Poly_retract():
f = Poly(x**2 + 1, x, domain=QQ[y])
assert f.retract() == Poly(x**2 + 1, x, domain='ZZ')
assert f.retract(field=True) == Poly(x**2 + 1, x, domain='QQ')
assert Poly(0, x, y).retract() == Poly(0, x, y)
def test_Poly_slice():
f = Poly(x**3 + 2*x**2 + 3*x + 4)
assert f.slice(0, 0) == Poly(0, x)
assert f.slice(0, 1) == Poly(4, x)
assert f.slice(0, 2) == Poly(3*x + 4, x)
assert f.slice(0, 3) == Poly(2*x**2 + 3*x + 4, x)
assert f.slice(0, 4) == Poly(x**3 + 2*x**2 + 3*x + 4, x)
assert f.slice(x, 0, 0) == Poly(0, x)
assert f.slice(x, 0, 1) == Poly(4, x)
assert f.slice(x, 0, 2) == Poly(3*x + 4, x)
assert f.slice(x, 0, 3) == Poly(2*x**2 + 3*x + 4, x)
assert f.slice(x, 0, 4) == Poly(x**3 + 2*x**2 + 3*x + 4, x)
def test_Poly_coeffs():
assert Poly(0, x).coeffs() == [0]
assert Poly(1, x).coeffs() == [1]
assert Poly(2*x + 1, x).coeffs() == [2, 1]
assert Poly(7*x**2 + 2*x + 1, x).coeffs() == [7, 2, 1]
assert Poly(7*x**4 + 2*x + 1, x).coeffs() == [7, 2, 1]
assert Poly(x*y**7 + 2*x**2*y**3).coeffs('lex') == [2, 1]
assert Poly(x*y**7 + 2*x**2*y**3).coeffs('grlex') == [1, 2]
def test_Poly_monoms():
assert Poly(0, x).monoms() == [(0,)]
assert Poly(1, x).monoms() == [(0,)]
assert Poly(2*x + 1, x).monoms() == [(1,), (0,)]
assert Poly(7*x**2 + 2*x + 1, x).monoms() == [(2,), (1,), (0,)]
assert Poly(7*x**4 + 2*x + 1, x).monoms() == [(4,), (1,), (0,)]
assert Poly(x*y**7 + 2*x**2*y**3).monoms('lex') == [(2, 3), (1, 7)]
assert Poly(x*y**7 + 2*x**2*y**3).monoms('grlex') == [(1, 7), (2, 3)]
def test_Poly_terms():
assert Poly(0, x).terms() == [((0,), 0)]
assert Poly(1, x).terms() == [((0,), 1)]
assert Poly(2*x + 1, x).terms() == [((1,), 2), ((0,), 1)]
assert Poly(7*x**2 + 2*x + 1, x).terms() == [((2,), 7), ((1,), 2), ((0,), 1)]
assert Poly(7*x**4 + 2*x + 1, x).terms() == [((4,), 7), ((1,), 2), ((0,), 1)]
assert Poly(
x*y**7 + 2*x**2*y**3).terms('lex') == [((2, 3), 2), ((1, 7), 1)]
assert Poly(
x*y**7 + 2*x**2*y**3).terms('grlex') == [((1, 7), 1), ((2, 3), 2)]
def test_Poly_all_coeffs():
assert Poly(0, x).all_coeffs() == [0]
assert Poly(1, x).all_coeffs() == [1]
assert Poly(2*x + 1, x).all_coeffs() == [2, 1]
assert Poly(7*x**2 + 2*x + 1, x).all_coeffs() == [7, 2, 1]
assert Poly(7*x**4 + 2*x + 1, x).all_coeffs() == [7, 0, 0, 2, 1]
def test_Poly_all_monoms():
assert Poly(0, x).all_monoms() == [(0,)]
assert Poly(1, x).all_monoms() == [(0,)]
assert Poly(2*x + 1, x).all_monoms() == [(1,), (0,)]
assert Poly(7*x**2 + 2*x + 1, x).all_monoms() == [(2,), (1,), (0,)]
assert Poly(7*x**4 + 2*x + 1, x).all_monoms() == [(4,), (3,), (2,), (1,), (0,)]
def test_Poly_all_terms():
assert Poly(0, x).all_terms() == [((0,), 0)]
assert Poly(1, x).all_terms() == [((0,), 1)]
assert Poly(2*x + 1, x).all_terms() == [((1,), 2), ((0,), 1)]
assert Poly(7*x**2 + 2*x + 1, x).all_terms() == \
[((2,), 7), ((1,), 2), ((0,), 1)]
assert Poly(7*x**4 + 2*x + 1, x).all_terms() == \
[((4,), 7), ((3,), 0), ((2,), 0), ((1,), 2), ((0,), 1)]
def test_Poly_termwise():
f = Poly(x**2 + 20*x + 400)
g = Poly(x**2 + 2*x + 4)
def func(monom, coeff):
(k,) = monom
return coeff//10**(2 - k)
assert f.termwise(func) == g
def func(monom, coeff):
(k,) = monom
return (k,), coeff//10**(2 - k)
assert f.termwise(func) == g
def test_Poly_length():
assert Poly(0, x).length() == 0
assert Poly(1, x).length() == 1
assert Poly(x, x).length() == 1
assert Poly(x + 1, x).length() == 2
assert Poly(x**2 + 1, x).length() == 2
assert Poly(x**2 + x + 1, x).length() == 3
def test_Poly_as_dict():
assert Poly(0, x).as_dict() == {}
assert Poly(0, x, y, z).as_dict() == {}
assert Poly(1, x).as_dict() == {(0,): 1}
assert Poly(1, x, y, z).as_dict() == {(0, 0, 0): 1}
assert Poly(x**2 + 3, x).as_dict() == {(2,): 1, (0,): 3}
assert Poly(x**2 + 3, x, y, z).as_dict() == {(2, 0, 0): 1, (0, 0, 0): 3}
assert Poly(3*x**2*y*z**3 + 4*x*y + 5*x*z).as_dict() == {(2, 1, 3): 3,
(1, 1, 0): 4, (1, 0, 1): 5}
def test_Poly_as_expr():
assert Poly(0, x).as_expr() == 0
assert Poly(0, x, y, z).as_expr() == 0
assert Poly(1, x).as_expr() == 1
assert Poly(1, x, y, z).as_expr() == 1
assert Poly(x**2 + 3, x).as_expr() == x**2 + 3
assert Poly(x**2 + 3, x, y, z).as_expr() == x**2 + 3
assert Poly(
3*x**2*y*z**3 + 4*x*y + 5*x*z).as_expr() == 3*x**2*y*z**3 + 4*x*y + 5*x*z
f = Poly(x**2 + 2*x*y**2 - y, x, y)
assert f.as_expr() == -y + x**2 + 2*x*y**2
assert f.as_expr({x: 5}) == 25 - y + 10*y**2
assert f.as_expr({y: 6}) == -6 + 72*x + x**2
assert f.as_expr({x: 5, y: 6}) == 379
assert f.as_expr(5, 6) == 379
raises(GeneratorsError, lambda: f.as_expr({z: 7}))
def test_Poly_lift():
assert Poly(x**4 - I*x + 17*I, x, gaussian=True).lift() == \
Poly(x**16 + 2*x**10 + 578*x**8 + x**4 - 578*x**2 + 83521,
x, domain='QQ')
def test_Poly_deflate():
assert Poly(0, x).deflate() == ((1,), Poly(0, x))
assert Poly(1, x).deflate() == ((1,), Poly(1, x))
assert Poly(x, x).deflate() == ((1,), Poly(x, x))
assert Poly(x**2, x).deflate() == ((2,), Poly(x, x))
assert Poly(x**17, x).deflate() == ((17,), Poly(x, x))
assert Poly(
x**2*y*z**11 + x**4*z**11).deflate() == ((2, 1, 11), Poly(x*y*z + x**2*z))
def test_Poly_inject():
f = Poly(x**2*y + x*y**3 + x*y + 1, x)
assert f.inject() == Poly(x**2*y + x*y**3 + x*y + 1, x, y)
assert f.inject(front=True) == Poly(y**3*x + y*x**2 + y*x + 1, y, x)
def test_Poly_eject():
f = Poly(x**2*y + x*y**3 + x*y + 1, x, y)
assert f.eject(x) == Poly(x*y**3 + (x**2 + x)*y + 1, y, domain='ZZ[x]')
assert f.eject(y) == Poly(y*x**2 + (y**3 + y)*x + 1, x, domain='ZZ[y]')
ex = x + y + z + t + w
g = Poly(ex, x, y, z, t, w)
assert g.eject(x) == Poly(ex, y, z, t, w, domain='ZZ[x]')
assert g.eject(x, y) == Poly(ex, z, t, w, domain='ZZ[x, y]')
assert g.eject(x, y, z) == Poly(ex, t, w, domain='ZZ[x, y, z]')
assert g.eject(w) == Poly(ex, x, y, z, t, domain='ZZ[w]')
assert g.eject(t, w) == Poly(ex, x, y, z, domain='ZZ[w, t]')
assert g.eject(z, t, w) == Poly(ex, x, y, domain='ZZ[w, t, z]')
raises(DomainError, lambda: Poly(x*y, x, y, domain=ZZ[z]).eject(y))
raises(NotImplementedError, lambda: Poly(x*y, x, y, z).eject(y))
def test_Poly_exclude():
assert Poly(x, x, y).exclude() == Poly(x, x)
assert Poly(x*y, x, y).exclude() == Poly(x*y, x, y)
assert Poly(1, x, y).exclude() == Poly(1, x, y)
def test_Poly__gen_to_level():
assert Poly(1, x, y)._gen_to_level(-2) == 0
assert Poly(1, x, y)._gen_to_level(-1) == 1
assert Poly(1, x, y)._gen_to_level( 0) == 0
assert Poly(1, x, y)._gen_to_level( 1) == 1
raises(PolynomialError, lambda: Poly(1, x, y)._gen_to_level(-3))
raises(PolynomialError, lambda: Poly(1, x, y)._gen_to_level( 2))
assert Poly(1, x, y)._gen_to_level(x) == 0
assert Poly(1, x, y)._gen_to_level(y) == 1
assert Poly(1, x, y)._gen_to_level('x') == 0
assert Poly(1, x, y)._gen_to_level('y') == 1
raises(PolynomialError, lambda: Poly(1, x, y)._gen_to_level(z))
raises(PolynomialError, lambda: Poly(1, x, y)._gen_to_level('z'))
def test_Poly_degree():
assert Poly(0, x).degree() == -oo
assert Poly(1, x).degree() == 0
assert Poly(x, x).degree() == 1
assert Poly(0, x).degree(gen=0) == -oo
assert Poly(1, x).degree(gen=0) == 0
assert Poly(x, x).degree(gen=0) == 1
assert Poly(0, x).degree(gen=x) == -oo
assert Poly(1, x).degree(gen=x) == 0
assert Poly(x, x).degree(gen=x) == 1
assert Poly(0, x).degree(gen='x') == -oo
assert Poly(1, x).degree(gen='x') == 0
assert Poly(x, x).degree(gen='x') == 1
raises(PolynomialError, lambda: Poly(1, x).degree(gen=1))
raises(PolynomialError, lambda: Poly(1, x).degree(gen=y))
raises(PolynomialError, lambda: Poly(1, x).degree(gen='y'))
assert Poly(1, x, y).degree() == 0
assert Poly(2*y, x, y).degree() == 0
assert Poly(x*y, x, y).degree() == 1
assert Poly(1, x, y).degree(gen=x) == 0
assert Poly(2*y, x, y).degree(gen=x) == 0
assert Poly(x*y, x, y).degree(gen=x) == 1
assert Poly(1, x, y).degree(gen=y) == 0
assert Poly(2*y, x, y).degree(gen=y) == 1
assert Poly(x*y, x, y).degree(gen=y) == 1
assert degree(1, x) == 0
assert degree(x, x) == 1
assert degree(x*y**2, gen=x) == 1
assert degree(x*y**2, gen=y) == 2
assert degree(x*y**2, x, y) == 1
assert degree(x*y**2, y, x) == 2
raises(ComputationFailed, lambda: degree(1))
def test_Poly_degree_list():
assert Poly(0, x).degree_list() == (-oo,)
assert Poly(0, x, y).degree_list() == (-oo, -oo)
assert Poly(0, x, y, z).degree_list() == (-oo, -oo, -oo)
assert Poly(1, x).degree_list() == (0,)
assert Poly(1, x, y).degree_list() == (0, 0)
assert Poly(1, x, y, z).degree_list() == (0, 0, 0)
assert Poly(x**2*y + x**3*z**2 + 1).degree_list() == (3, 1, 2)
assert degree_list(1, x) == (0,)
assert degree_list(x, x) == (1,)
assert degree_list(x*y**2) == (1, 2)
raises(ComputationFailed, lambda: degree_list(1))
def test_Poly_total_degree():
assert Poly(x**2*y + x**3*z**2 + 1).total_degree() == 5
assert Poly(x**2 + z**3).total_degree() == 3
assert Poly(x*y*z + z**4).total_degree() == 4
assert Poly(x**3 + x + 1).total_degree() == 3
def test_Poly_homogenize():
assert Poly(x**2+y).homogenize(z) == Poly(x**2+y*z)
assert Poly(x+y).homogenize(z) == Poly(x+y, x, y, z)
assert Poly(x+y**2).homogenize(y) == Poly(x*y+y**2)
def test_Poly_homogeneous_order():
assert Poly(0, x, y).homogeneous_order() == -oo
assert Poly(1, x, y).homogeneous_order() == 0
assert Poly(x, x, y).homogeneous_order() == 1
assert Poly(x*y, x, y).homogeneous_order() == 2
assert Poly(x + 1, x, y).homogeneous_order() is None
assert Poly(x*y + x, x, y).homogeneous_order() is None
assert Poly(x**5 + 2*x**3*y**2 + 9*x*y**4).homogeneous_order() == 5
assert Poly(x**5 + 2*x**3*y**3 + 9*x*y**4).homogeneous_order() is None
def test_Poly_LC():
assert Poly(0, x).LC() == 0
assert Poly(1, x).LC() == 1
assert Poly(2*x**2 + x, x).LC() == 2
assert Poly(x*y**7 + 2*x**2*y**3).LC('lex') == 2
assert Poly(x*y**7 + 2*x**2*y**3).LC('grlex') == 1
assert LC(x*y**7 + 2*x**2*y**3, order='lex') == 2
assert LC(x*y**7 + 2*x**2*y**3, order='grlex') == 1
def test_Poly_TC():
assert Poly(0, x).TC() == 0
assert Poly(1, x).TC() == 1
assert Poly(2*x**2 + x, x).TC() == 0
def test_Poly_EC():
assert Poly(0, x).EC() == 0
assert Poly(1, x).EC() == 1
assert Poly(2*x**2 + x, x).EC() == 1
assert Poly(x*y**7 + 2*x**2*y**3).EC('lex') == 1
assert Poly(x*y**7 + 2*x**2*y**3).EC('grlex') == 2
def test_Poly_coeff():
assert Poly(0, x).coeff_monomial(1) == 0
assert Poly(0, x).coeff_monomial(x) == 0
assert Poly(1, x).coeff_monomial(1) == 1
assert Poly(1, x).coeff_monomial(x) == 0
assert Poly(x**8, x).coeff_monomial(1) == 0
assert Poly(x**8, x).coeff_monomial(x**7) == 0
assert Poly(x**8, x).coeff_monomial(x**8) == 1
assert Poly(x**8, x).coeff_monomial(x**9) == 0
assert Poly(3*x*y**2 + 1, x, y).coeff_monomial(1) == 1
assert Poly(3*x*y**2 + 1, x, y).coeff_monomial(x*y**2) == 3
p = Poly(24*x*y*exp(8) + 23*x, x, y)
assert p.coeff_monomial(x) == 23
assert p.coeff_monomial(y) == 0
assert p.coeff_monomial(x*y) == 24*exp(8)
assert p.as_expr().coeff(x) == 24*y*exp(8) + 23
raises(NotImplementedError, lambda: p.coeff(x))
raises(ValueError, lambda: Poly(x + 1).coeff_monomial(0))
raises(ValueError, lambda: Poly(x + 1).coeff_monomial(3*x))
raises(ValueError, lambda: Poly(x + 1).coeff_monomial(3*x*y))
def test_Poly_nth():
assert Poly(0, x).nth(0) == 0
assert Poly(0, x).nth(1) == 0
assert Poly(1, x).nth(0) == 1
assert Poly(1, x).nth(1) == 0
assert Poly(x**8, x).nth(0) == 0
assert Poly(x**8, x).nth(7) == 0
assert Poly(x**8, x).nth(8) == 1
assert Poly(x**8, x).nth(9) == 0
assert Poly(3*x*y**2 + 1, x, y).nth(0, 0) == 1
assert Poly(3*x*y**2 + 1, x, y).nth(1, 2) == 3
raises(ValueError, lambda: Poly(x*y + 1, x, y).nth(1))
def test_Poly_LM():
assert Poly(0, x).LM() == (0,)
assert Poly(1, x).LM() == (0,)
assert Poly(2*x**2 + x, x).LM() == (2,)
assert Poly(x*y**7 + 2*x**2*y**3).LM('lex') == (2, 3)
assert Poly(x*y**7 + 2*x**2*y**3).LM('grlex') == (1, 7)
assert LM(x*y**7 + 2*x**2*y**3, order='lex') == x**2*y**3
assert LM(x*y**7 + 2*x**2*y**3, order='grlex') == x*y**7
def test_Poly_LM_custom_order():
f = Poly(x**2*y**3*z + x**2*y*z**3 + x*y*z + 1)
rev_lex = lambda monom: tuple(reversed(monom))
assert f.LM(order='lex') == (2, 3, 1)
assert f.LM(order=rev_lex) == (2, 1, 3)
def test_Poly_EM():
assert Poly(0, x).EM() == (0,)
assert Poly(1, x).EM() == (0,)
assert Poly(2*x**2 + x, x).EM() == (1,)
assert Poly(x*y**7 + 2*x**2*y**3).EM('lex') == (1, 7)
assert Poly(x*y**7 + 2*x**2*y**3).EM('grlex') == (2, 3)
def test_Poly_LT():
assert Poly(0, x).LT() == ((0,), 0)
assert Poly(1, x).LT() == ((0,), 1)
assert Poly(2*x**2 + x, x).LT() == ((2,), 2)
assert Poly(x*y**7 + 2*x**2*y**3).LT('lex') == ((2, 3), 2)
assert Poly(x*y**7 + 2*x**2*y**3).LT('grlex') == ((1, 7), 1)
assert LT(x*y**7 + 2*x**2*y**3, order='lex') == 2*x**2*y**3
assert LT(x*y**7 + 2*x**2*y**3, order='grlex') == x*y**7
def test_Poly_ET():
assert Poly(0, x).ET() == ((0,), 0)
assert Poly(1, x).ET() == ((0,), 1)
assert Poly(2*x**2 + x, x).ET() == ((1,), 1)
assert Poly(x*y**7 + 2*x**2*y**3).ET('lex') == ((1, 7), 1)
assert Poly(x*y**7 + 2*x**2*y**3).ET('grlex') == ((2, 3), 2)
def test_Poly_max_norm():
assert Poly(-1, x).max_norm() == 1
assert Poly( 0, x).max_norm() == 0
assert Poly( 1, x).max_norm() == 1
def test_Poly_l1_norm():
assert Poly(-1, x).l1_norm() == 1
assert Poly( 0, x).l1_norm() == 0
assert Poly( 1, x).l1_norm() == 1
def test_Poly_clear_denoms():
coeff, poly = Poly(x + 2, x).clear_denoms()
assert coeff == 1 and poly == Poly(
x + 2, x, domain='ZZ') and poly.get_domain() == ZZ
coeff, poly = Poly(x/2 + 1, x).clear_denoms()
assert coeff == 2 and poly == Poly(
x + 2, x, domain='QQ') and poly.get_domain() == QQ
coeff, poly = Poly(x/2 + 1, x).clear_denoms(convert=True)
assert coeff == 2 and poly == Poly(
x + 2, x, domain='ZZ') and poly.get_domain() == ZZ
coeff, poly = Poly(x/y + 1, x).clear_denoms(convert=True)
assert coeff == y and poly == Poly(
x + y, x, domain='ZZ[y]') and poly.get_domain() == ZZ[y]
coeff, poly = Poly(x/3 + sqrt(2), x, domain='EX').clear_denoms()
assert coeff == 3 and poly == Poly(
x + 3*sqrt(2), x, domain='EX') and poly.get_domain() == EX
coeff, poly = Poly(
x/3 + sqrt(2), x, domain='EX').clear_denoms(convert=True)
assert coeff == 3 and poly == Poly(
x + 3*sqrt(2), x, domain='EX') and poly.get_domain() == EX
def test_Poly_rat_clear_denoms():
f = Poly(x**2/y + 1, x)
g = Poly(x**3 + y, x)
assert f.rat_clear_denoms(g) == \
(Poly(x**2 + y, x), Poly(y*x**3 + y**2, x))
f = f.set_domain(EX)
g = g.set_domain(EX)
assert f.rat_clear_denoms(g) == (f, g)
def test_Poly_integrate():
assert Poly(x + 1).integrate() == Poly(x**2/2 + x)
assert Poly(x + 1).integrate(x) == Poly(x**2/2 + x)
assert Poly(x + 1).integrate((x, 1)) == Poly(x**2/2 + x)
assert Poly(x*y + 1).integrate(x) == Poly(x**2*y/2 + x)
assert Poly(x*y + 1).integrate(y) == Poly(x*y**2/2 + y)
assert Poly(x*y + 1).integrate(x, x) == Poly(x**3*y/6 + x**2/2)
assert Poly(x*y + 1).integrate(y, y) == Poly(x*y**3/6 + y**2/2)
assert Poly(x*y + 1).integrate((x, 2)) == Poly(x**3*y/6 + x**2/2)
assert Poly(x*y + 1).integrate((y, 2)) == Poly(x*y**3/6 + y**2/2)
assert Poly(x*y + 1).integrate(x, y) == Poly(x**2*y**2/4 + x*y)
assert Poly(x*y + 1).integrate(y, x) == Poly(x**2*y**2/4 + x*y)
def test_Poly_diff():
assert Poly(x**2 + x).diff() == Poly(2*x + 1)
assert Poly(x**2 + x).diff(x) == Poly(2*x + 1)
assert Poly(x**2 + x).diff((x, 1)) == Poly(2*x + 1)
assert Poly(x**2*y**2 + x*y).diff(x) == Poly(2*x*y**2 + y)
assert Poly(x**2*y**2 + x*y).diff(y) == Poly(2*x**2*y + x)
assert Poly(x**2*y**2 + x*y).diff(x, x) == Poly(2*y**2, x, y)
assert Poly(x**2*y**2 + x*y).diff(y, y) == Poly(2*x**2, x, y)
assert Poly(x**2*y**2 + x*y).diff((x, 2)) == Poly(2*y**2, x, y)
assert Poly(x**2*y**2 + x*y).diff((y, 2)) == Poly(2*x**2, x, y)
assert Poly(x**2*y**2 + x*y).diff(x, y) == Poly(4*x*y + 1)
assert Poly(x**2*y**2 + x*y).diff(y, x) == Poly(4*x*y + 1)
def test_Poly_eval():
assert Poly(0, x).eval(7) == 0
assert Poly(1, x).eval(7) == 1
assert Poly(x, x).eval(7) == 7
assert Poly(0, x).eval(0, 7) == 0
assert Poly(1, x).eval(0, 7) == 1
assert Poly(x, x).eval(0, 7) == 7
assert Poly(0, x).eval(x, 7) == 0
assert Poly(1, x).eval(x, 7) == 1
assert Poly(x, x).eval(x, 7) == 7
assert Poly(0, x).eval('x', 7) == 0
assert Poly(1, x).eval('x', 7) == 1
assert Poly(x, x).eval('x', 7) == 7
raises(PolynomialError, lambda: Poly(1, x).eval(1, 7))
raises(PolynomialError, lambda: Poly(1, x).eval(y, 7))
raises(PolynomialError, lambda: Poly(1, x).eval('y', 7))
assert Poly(123, x, y).eval(7) == Poly(123, y)
assert Poly(2*y, x, y).eval(7) == Poly(2*y, y)
assert Poly(x*y, x, y).eval(7) == Poly(7*y, y)
assert Poly(123, x, y).eval(x, 7) == Poly(123, y)
assert Poly(2*y, x, y).eval(x, 7) == Poly(2*y, y)
assert Poly(x*y, x, y).eval(x, 7) == Poly(7*y, y)
assert Poly(123, x, y).eval(y, 7) == Poly(123, x)
assert Poly(2*y, x, y).eval(y, 7) == Poly(14, x)
assert Poly(x*y, x, y).eval(y, 7) == Poly(7*x, x)
assert Poly(x*y + y, x, y).eval({x: 7}) == Poly(8*y, y)
assert Poly(x*y + y, x, y).eval({y: 7}) == Poly(7*x + 7, x)
assert Poly(x*y + y, x, y).eval({x: 6, y: 7}) == 49
assert Poly(x*y + y, x, y).eval({x: 7, y: 6}) == 48
assert Poly(x*y + y, x, y).eval((6, 7)) == 49
assert Poly(x*y + y, x, y).eval([6, 7]) == 49
assert Poly(x + 1, domain='ZZ').eval(S(1)/2) == S(3)/2
assert Poly(x + 1, domain='ZZ').eval(sqrt(2)) == sqrt(2) + 1
raises(ValueError, lambda: Poly(x*y + y, x, y).eval((6, 7, 8)))
raises(DomainError, lambda: Poly(x + 1, domain='ZZ').eval(S(1)/2, auto=False))
# issue 6344
alpha = Symbol('alpha')
result = (2*alpha*z - 2*alpha + z**2 + 3)/(z**2 - 2*z + 1)
f = Poly(x**2 + (alpha - 1)*x - alpha + 1, x, domain='ZZ[alpha]')
assert f.eval((z + 1)/(z - 1)) == result
g = Poly(x**2 + (alpha - 1)*x - alpha + 1, x, y, domain='ZZ[alpha]')
assert g.eval((z + 1)/(z - 1)) == Poly(result, y, domain='ZZ(alpha,z)')
def test_Poly___call__():
f = Poly(2*x*y + 3*x + y + 2*z)
assert f(2) == Poly(5*y + 2*z + 6)
assert f(2, 5) == Poly(2*z + 31)
assert f(2, 5, 7) == 45
def test_parallel_poly_from_expr():
assert parallel_poly_from_expr(
[x - 1, x**2 - 1], x)[0] == [Poly(x - 1, x), Poly(x**2 - 1, x)]
assert parallel_poly_from_expr(
[Poly(x - 1, x), x**2 - 1], x)[0] == [Poly(x - 1, x), Poly(x**2 - 1, x)]
assert parallel_poly_from_expr(
[x - 1, Poly(x**2 - 1, x)], x)[0] == [Poly(x - 1, x), Poly(x**2 - 1, x)]
assert parallel_poly_from_expr([Poly(
x - 1, x), Poly(x**2 - 1, x)], x)[0] == [Poly(x - 1, x), Poly(x**2 - 1, x)]
assert parallel_poly_from_expr(
[x - 1, x**2 - 1], x, y)[0] == [Poly(x - 1, x, y), Poly(x**2 - 1, x, y)]
assert parallel_poly_from_expr([Poly(
x - 1, x), x**2 - 1], x, y)[0] == [Poly(x - 1, x, y), Poly(x**2 - 1, x, y)]
assert parallel_poly_from_expr([x - 1, Poly(
x**2 - 1, x)], x, y)[0] == [Poly(x - 1, x, y), Poly(x**2 - 1, x, y)]
assert parallel_poly_from_expr([Poly(x - 1, x), Poly(
x**2 - 1, x)], x, y)[0] == [Poly(x - 1, x, y), Poly(x**2 - 1, x, y)]
assert parallel_poly_from_expr(
[x - 1, x**2 - 1])[0] == [Poly(x - 1, x), Poly(x**2 - 1, x)]
assert parallel_poly_from_expr(
[Poly(x - 1, x), x**2 - 1])[0] == [Poly(x - 1, x), Poly(x**2 - 1, x)]
assert parallel_poly_from_expr(
[x - 1, Poly(x**2 - 1, x)])[0] == [Poly(x - 1, x), Poly(x**2 - 1, x)]
assert parallel_poly_from_expr(
[Poly(x - 1, x), Poly(x**2 - 1, x)])[0] == [Poly(x - 1, x), Poly(x**2 - 1, x)]
assert parallel_poly_from_expr(
[1, x**2 - 1])[0] == [Poly(1, x), Poly(x**2 - 1, x)]
assert parallel_poly_from_expr(
[1, x**2 - 1])[0] == [Poly(1, x), Poly(x**2 - 1, x)]
assert parallel_poly_from_expr(
[1, Poly(x**2 - 1, x)])[0] == [Poly(1, x), Poly(x**2 - 1, x)]
assert parallel_poly_from_expr(
[1, Poly(x**2 - 1, x)])[0] == [Poly(1, x), Poly(x**2 - 1, x)]
assert parallel_poly_from_expr(
[x**2 - 1, 1])[0] == [Poly(x**2 - 1, x), Poly(1, x)]
assert parallel_poly_from_expr(
[x**2 - 1, 1])[0] == [Poly(x**2 - 1, x), Poly(1, x)]
assert parallel_poly_from_expr(
[Poly(x**2 - 1, x), 1])[0] == [Poly(x**2 - 1, x), Poly(1, x)]
assert parallel_poly_from_expr(
[Poly(x**2 - 1, x), 1])[0] == [Poly(x**2 - 1, x), Poly(1, x)]
assert parallel_poly_from_expr([Poly(x, x, y), Poly(y, x, y)], x, y, order='lex')[0] == \
[Poly(x, x, y, domain='ZZ'), Poly(y, x, y, domain='ZZ')]
raises(PolificationFailed, lambda: parallel_poly_from_expr([0, 1]))
def test_pdiv():
f, g = x**2 - y**2, x - y
q, r = x + y, 0
F, G, Q, R = [ Poly(h, x, y) for h in (f, g, q, r) ]
assert F.pdiv(G) == (Q, R)
assert F.prem(G) == R
assert F.pquo(G) == Q
assert F.pexquo(G) == Q
assert pdiv(f, g) == (q, r)
assert prem(f, g) == r
assert pquo(f, g) == q
assert pexquo(f, g) == q
assert pdiv(f, g, x, y) == (q, r)
assert prem(f, g, x, y) == r
assert pquo(f, g, x, y) == q
assert pexquo(f, g, x, y) == q
assert pdiv(f, g, (x, y)) == (q, r)
assert prem(f, g, (x, y)) == r
assert pquo(f, g, (x, y)) == q
assert pexquo(f, g, (x, y)) == q
assert pdiv(F, G) == (Q, R)
assert prem(F, G) == R
assert pquo(F, G) == Q
assert pexquo(F, G) == Q
assert pdiv(f, g, polys=True) == (Q, R)
assert prem(f, g, polys=True) == R
assert pquo(f, g, polys=True) == Q
assert pexquo(f, g, polys=True) == Q
assert pdiv(F, G, polys=False) == (q, r)
assert prem(F, G, polys=False) == r
assert pquo(F, G, polys=False) == q
assert pexquo(F, G, polys=False) == q
raises(ComputationFailed, lambda: pdiv(4, 2))
raises(ComputationFailed, lambda: prem(4, 2))
raises(ComputationFailed, lambda: pquo(4, 2))
raises(ComputationFailed, lambda: pexquo(4, 2))
def test_div():
f, g = x**2 - y**2, x - y
q, r = x + y, 0
F, G, Q, R = [ Poly(h, x, y) for h in (f, g, q, r) ]
assert F.div(G) == (Q, R)
assert F.rem(G) == R
assert F.quo(G) == Q
assert F.exquo(G) == Q
assert div(f, g) == (q, r)
assert rem(f, g) == r
assert quo(f, g) == q
assert exquo(f, g) == q
assert div(f, g, x, y) == (q, r)
assert rem(f, g, x, y) == r
assert quo(f, g, x, y) == q
assert exquo(f, g, x, y) == q
assert div(f, g, (x, y)) == (q, r)
assert rem(f, g, (x, y)) == r
assert quo(f, g, (x, y)) == q
assert exquo(f, g, (x, y)) == q
assert div(F, G) == (Q, R)
assert rem(F, G) == R
assert quo(F, G) == Q
assert exquo(F, G) == Q
assert div(f, g, polys=True) == (Q, R)
assert rem(f, g, polys=True) == R
assert quo(f, g, polys=True) == Q
assert exquo(f, g, polys=True) == Q
assert div(F, G, polys=False) == (q, r)
assert rem(F, G, polys=False) == r
assert quo(F, G, polys=False) == q
assert exquo(F, G, polys=False) == q
raises(ComputationFailed, lambda: div(4, 2))
raises(ComputationFailed, lambda: rem(4, 2))
raises(ComputationFailed, lambda: quo(4, 2))
raises(ComputationFailed, lambda: exquo(4, 2))
f, g = x**2 + 1, 2*x - 4
qz, rz = 0, x**2 + 1
qq, rq = x/2 + 1, 5
assert div(f, g) == (qq, rq)
assert div(f, g, auto=True) == (qq, rq)
assert div(f, g, auto=False) == (qz, rz)
assert div(f, g, domain=ZZ) == (qz, rz)
assert div(f, g, domain=QQ) == (qq, rq)
assert div(f, g, domain=ZZ, auto=True) == (qq, rq)
assert div(f, g, domain=ZZ, auto=False) == (qz, rz)
assert div(f, g, domain=QQ, auto=True) == (qq, rq)
assert div(f, g, domain=QQ, auto=False) == (qq, rq)
assert rem(f, g) == rq
assert rem(f, g, auto=True) == rq
assert rem(f, g, auto=False) == rz
assert rem(f, g, domain=ZZ) == rz
assert rem(f, g, domain=QQ) == rq
assert rem(f, g, domain=ZZ, auto=True) == rq
assert rem(f, g, domain=ZZ, auto=False) == rz
assert rem(f, g, domain=QQ, auto=True) == rq
assert rem(f, g, domain=QQ, auto=False) == rq
assert quo(f, g) == qq
assert quo(f, g, auto=True) == qq
assert quo(f, g, auto=False) == qz
assert quo(f, g, domain=ZZ) == qz
assert quo(f, g, domain=QQ) == qq
assert quo(f, g, domain=ZZ, auto=True) == qq
assert quo(f, g, domain=ZZ, auto=False) == qz
assert quo(f, g, domain=QQ, auto=True) == qq
assert quo(f, g, domain=QQ, auto=False) == qq
f, g, q = x**2, 2*x, x/2
assert exquo(f, g) == q
assert exquo(f, g, auto=True) == q
raises(ExactQuotientFailed, lambda: exquo(f, g, auto=False))
raises(ExactQuotientFailed, lambda: exquo(f, g, domain=ZZ))
assert exquo(f, g, domain=QQ) == q
assert exquo(f, g, domain=ZZ, auto=True) == q
raises(ExactQuotientFailed, lambda: exquo(f, g, domain=ZZ, auto=False))
assert exquo(f, g, domain=QQ, auto=True) == q
assert exquo(f, g, domain=QQ, auto=False) == q
f, g = Poly(x**2), Poly(x)
q, r = f.div(g)
assert q.get_domain().is_ZZ and r.get_domain().is_ZZ
r = f.rem(g)
assert r.get_domain().is_ZZ
q = f.quo(g)
assert q.get_domain().is_ZZ
q = f.exquo(g)
assert q.get_domain().is_ZZ
def test_gcdex():
f, g = 2*x, x**2 - 16
s, t, h = x/32, -Rational(1, 16), 1
F, G, S, T, H = [ Poly(u, x, domain='QQ') for u in (f, g, s, t, h) ]
assert F.half_gcdex(G) == (S, H)
assert F.gcdex(G) == (S, T, H)
assert F.invert(G) == S
assert half_gcdex(f, g) == (s, h)
assert gcdex(f, g) == (s, t, h)
assert invert(f, g) == s
assert half_gcdex(f, g, x) == (s, h)
assert gcdex(f, g, x) == (s, t, h)
assert invert(f, g, x) == s
assert half_gcdex(f, g, (x,)) == (s, h)
assert gcdex(f, g, (x,)) == (s, t, h)
assert invert(f, g, (x,)) == s
assert half_gcdex(F, G) == (S, H)
assert gcdex(F, G) == (S, T, H)
assert invert(F, G) == S
assert half_gcdex(f, g, polys=True) == (S, H)
assert gcdex(f, g, polys=True) == (S, T, H)
assert invert(f, g, polys=True) == S
assert half_gcdex(F, G, polys=False) == (s, h)
assert gcdex(F, G, polys=False) == (s, t, h)
assert invert(F, G, polys=False) == s
assert half_gcdex(100, 2004) == (-20, 4)
assert gcdex(100, 2004) == (-20, 1, 4)
assert invert(3, 7) == 5
raises(DomainError, lambda: half_gcdex(x + 1, 2*x + 1, auto=False))
raises(DomainError, lambda: gcdex(x + 1, 2*x + 1, auto=False))
raises(DomainError, lambda: invert(x + 1, 2*x + 1, auto=False))
def test_revert():
f = Poly(1 - x**2/2 + x**4/24 - x**6/720)
g = Poly(61*x**6/720 + 5*x**4/24 + x**2/2 + 1)
assert f.revert(8) == g
def test_subresultants():
f, g, h = x**2 - 2*x + 1, x**2 - 1, 2*x - 2
F, G, H = Poly(f), Poly(g), Poly(h)
assert F.subresultants(G) == [F, G, H]
assert subresultants(f, g) == [f, g, h]
assert subresultants(f, g, x) == [f, g, h]
assert subresultants(f, g, (x,)) == [f, g, h]
assert subresultants(F, G) == [F, G, H]
assert subresultants(f, g, polys=True) == [F, G, H]
assert subresultants(F, G, polys=False) == [f, g, h]
raises(ComputationFailed, lambda: subresultants(4, 2))
def test_resultant():
f, g, h = x**2 - 2*x + 1, x**2 - 1, 0
F, G = Poly(f), Poly(g)
assert F.resultant(G) == h
assert resultant(f, g) == h
assert resultant(f, g, x) == h
assert resultant(f, g, (x,)) == h
assert resultant(F, G) == h
assert resultant(f, g, polys=True) == h
assert resultant(F, G, polys=False) == h
assert resultant(f, g, includePRS=True) == (h, [f, g, 2*x - 2])
f, g, h = x - a, x - b, a - b
F, G, H = Poly(f), Poly(g), Poly(h)
assert F.resultant(G) == H
assert resultant(f, g) == h
assert resultant(f, g, x) == h
assert resultant(f, g, (x,)) == h
assert resultant(F, G) == H
assert resultant(f, g, polys=True) == H
assert resultant(F, G, polys=False) == h
raises(ComputationFailed, lambda: resultant(4, 2))
def test_discriminant():
f, g = x**3 + 3*x**2 + 9*x - 13, -11664
F = Poly(f)
assert F.discriminant() == g
assert discriminant(f) == g
assert discriminant(f, x) == g
assert discriminant(f, (x,)) == g
assert discriminant(F) == g
assert discriminant(f, polys=True) == g
assert discriminant(F, polys=False) == g
f, g = a*x**2 + b*x + c, b**2 - 4*a*c
F, G = Poly(f), Poly(g)
assert F.discriminant() == G
assert discriminant(f) == g
assert discriminant(f, x, a, b, c) == g
assert discriminant(f, (x, a, b, c)) == g
assert discriminant(F) == G
assert discriminant(f, polys=True) == G
assert discriminant(F, polys=False) == g
raises(ComputationFailed, lambda: discriminant(4))
def test_dispersion():
# We test only the API here. For more mathematical
# tests see the dedicated test file.
fp = poly((x + 1)*(x + 2), x)
assert sorted(fp.dispersionset()) == [0, 1]
assert fp.dispersion() == 1
fp = poly(x**4 - 3*x**2 + 1, x)
gp = fp.shift(-3)
assert sorted(fp.dispersionset(gp)) == [2, 3, 4]
assert fp.dispersion(gp) == 4
def test_gcd_list():
F = [x**3 - 1, x**2 - 1, x**2 - 3*x + 2]
assert gcd_list(F) == x - 1
assert gcd_list(F, polys=True) == Poly(x - 1)
assert gcd_list([]) == 0
assert gcd_list([1, 2]) == 1
assert gcd_list([4, 6, 8]) == 2
assert gcd_list([x*(y + 42) - x*y - x*42]) == 0
gcd = gcd_list([], x)
assert gcd.is_Number and gcd is S.Zero
gcd = gcd_list([], x, polys=True)
assert gcd.is_Poly and gcd.is_zero
raises(ComputationFailed, lambda: gcd_list([], polys=True))
def test_lcm_list():
F = [x**3 - 1, x**2 - 1, x**2 - 3*x + 2]
assert lcm_list(F) == x**5 - x**4 - 2*x**3 - x**2 + x + 2
assert lcm_list(F, polys=True) == Poly(x**5 - x**4 - 2*x**3 - x**2 + x + 2)
assert lcm_list([]) == 1
assert lcm_list([1, 2]) == 2
assert lcm_list([4, 6, 8]) == 24
assert lcm_list([x*(y + 42) - x*y - x*42]) == 0
lcm = lcm_list([], x)
assert lcm.is_Number and lcm is S.One
lcm = lcm_list([], x, polys=True)
assert lcm.is_Poly and lcm.is_one
raises(ComputationFailed, lambda: lcm_list([], polys=True))
def test_gcd():
f, g = x**3 - 1, x**2 - 1
s, t = x**2 + x + 1, x + 1
h, r = x - 1, x**4 + x**3 - x - 1
F, G, S, T, H, R = [ Poly(u) for u in (f, g, s, t, h, r) ]
assert F.cofactors(G) == (H, S, T)
assert F.gcd(G) == H
assert F.lcm(G) == R
assert cofactors(f, g) == (h, s, t)
assert gcd(f, g) == h
assert lcm(f, g) == r
assert cofactors(f, g, x) == (h, s, t)
assert gcd(f, g, x) == h
assert lcm(f, g, x) == r
assert cofactors(f, g, (x,)) == (h, s, t)
assert gcd(f, g, (x,)) == h
assert lcm(f, g, (x,)) == r
assert cofactors(F, G) == (H, S, T)
assert gcd(F, G) == H
assert lcm(F, G) == R
assert cofactors(f, g, polys=True) == (H, S, T)
assert gcd(f, g, polys=True) == H
assert lcm(f, g, polys=True) == R
assert cofactors(F, G, polys=False) == (h, s, t)
assert gcd(F, G, polys=False) == h
assert lcm(F, G, polys=False) == r
f, g = 1.0*x**2 - 1.0, 1.0*x - 1.0
h, s, t = g, 1.0*x + 1.0, 1.0
assert cofactors(f, g) == (h, s, t)
assert gcd(f, g) == h
assert lcm(f, g) == f
f, g = 1.0*x**2 - 1.0, 1.0*x - 1.0
h, s, t = g, 1.0*x + 1.0, 1.0
assert cofactors(f, g) == (h, s, t)
assert gcd(f, g) == h
assert lcm(f, g) == f
assert cofactors(8, 6) == (2, 4, 3)
assert gcd(8, 6) == 2
assert lcm(8, 6) == 24
f, g = x**2 - 3*x - 4, x**3 - 4*x**2 + x - 4
l = x**4 - 3*x**3 - 3*x**2 - 3*x - 4
h, s, t = x - 4, x + 1, x**2 + 1
assert cofactors(f, g, modulus=11) == (h, s, t)
assert gcd(f, g, modulus=11) == h
assert lcm(f, g, modulus=11) == l
f, g = x**2 + 8*x + 7, x**3 + 7*x**2 + x + 7
l = x**4 + 8*x**3 + 8*x**2 + 8*x + 7
h, s, t = x + 7, x + 1, x**2 + 1
assert cofactors(f, g, modulus=11, symmetric=False) == (h, s, t)
assert gcd(f, g, modulus=11, symmetric=False) == h
assert lcm(f, g, modulus=11, symmetric=False) == l
raises(TypeError, lambda: gcd(x))
raises(TypeError, lambda: lcm(x))
def test_gcd_numbers_vs_polys():
assert isinstance(gcd(3, 9), Integer)
assert isinstance(gcd(3*x, 9), Integer)
assert gcd(3, 9) == 3
assert gcd(3*x, 9) == 3
assert isinstance(gcd(S(3)/2, S(9)/4), Rational)
assert isinstance(gcd(S(3)/2*x, S(9)/4), Rational)
assert gcd(S(3)/2, S(9)/4) == S(3)/4
assert gcd(S(3)/2*x, S(9)/4) == 1
assert isinstance(gcd(3.0, 9.0), Float)
assert isinstance(gcd(3.0*x, 9.0), Float)
assert gcd(3.0, 9.0) == 1.0
assert gcd(3.0*x, 9.0) == 1.0
def test_terms_gcd():
assert terms_gcd(1) == 1
assert terms_gcd(1, x) == 1
assert terms_gcd(x - 1) == x - 1
assert terms_gcd(-x - 1) == -x - 1
assert terms_gcd(2*x + 3) == 2*x + 3
assert terms_gcd(6*x + 4) == Mul(2, 3*x + 2, evaluate=False)
assert terms_gcd(x**3*y + x*y**3) == x*y*(x**2 + y**2)
assert terms_gcd(2*x**3*y + 2*x*y**3) == 2*x*y*(x**2 + y**2)
assert terms_gcd(x**3*y/2 + x*y**3/2) == x*y/2*(x**2 + y**2)
assert terms_gcd(x**3*y + 2*x*y**3) == x*y*(x**2 + 2*y**2)
assert terms_gcd(2*x**3*y + 4*x*y**3) == 2*x*y*(x**2 + 2*y**2)
assert terms_gcd(2*x**3*y/3 + 4*x*y**3/5) == 2*x*y/15*(5*x**2 + 6*y**2)
assert terms_gcd(2.0*x**3*y + 4.1*x*y**3) == x*y*(2.0*x**2 + 4.1*y**2)
assert _aresame(terms_gcd(2.0*x + 3), 2.0*x + 3)
assert terms_gcd((3 + 3*x)*(x + x*y), expand=False) == \
(3*x + 3)*(x*y + x)
assert terms_gcd((3 + 3*x)*(x + x*sin(3 + 3*y)), expand=False, deep=True) == \
3*x*(x + 1)*(sin(Mul(3, y + 1, evaluate=False)) + 1)
assert terms_gcd(sin(x + x*y), deep=True) == \
sin(x*(y + 1))
eq = Eq(2*x, 2*y + 2*z*y)
assert terms_gcd(eq) == eq
assert terms_gcd(eq, deep=True) == Eq(2*x, 2*y*(z + 1))
def test_trunc():
f, g = x**5 + 2*x**4 + 3*x**3 + 4*x**2 + 5*x + 6, x**5 - x**4 + x**2 - x
F, G = Poly(f), Poly(g)
assert F.trunc(3) == G
assert trunc(f, 3) == g
assert trunc(f, 3, x) == g
assert trunc(f, 3, (x,)) == g
assert trunc(F, 3) == G
assert trunc(f, 3, polys=True) == G
assert trunc(F, 3, polys=False) == g
f, g = 6*x**5 + 5*x**4 + 4*x**3 + 3*x**2 + 2*x + 1, -x**4 + x**3 - x + 1
F, G = Poly(f), Poly(g)
assert F.trunc(3) == G
assert trunc(f, 3) == g
assert trunc(f, 3, x) == g
assert trunc(f, 3, (x,)) == g
assert trunc(F, 3) == G
assert trunc(f, 3, polys=True) == G
assert trunc(F, 3, polys=False) == g
f = Poly(x**2 + 2*x + 3, modulus=5)
assert f.trunc(2) == Poly(x**2 + 1, modulus=5)
def test_monic():
f, g = 2*x - 1, x - S(1)/2
F, G = Poly(f, domain='QQ'), Poly(g)
assert F.monic() == G
assert monic(f) == g
assert monic(f, x) == g
assert monic(f, (x,)) == g
assert monic(F) == G
assert monic(f, polys=True) == G
assert monic(F, polys=False) == g
raises(ComputationFailed, lambda: monic(4))
assert monic(2*x**2 + 6*x + 4, auto=False) == x**2 + 3*x + 2
raises(ExactQuotientFailed, lambda: monic(2*x + 6*x + 1, auto=False))
assert monic(2.0*x**2 + 6.0*x + 4.0) == 1.0*x**2 + 3.0*x + 2.0
assert monic(2*x**2 + 3*x + 4, modulus=5) == x**2 - x + 2
def test_content():
f, F = 4*x + 2, Poly(4*x + 2)
assert F.content() == 2
assert content(f) == 2
raises(ComputationFailed, lambda: content(4))
f = Poly(2*x, modulus=3)
assert f.content() == 1
def test_primitive():
f, g = 4*x + 2, 2*x + 1
F, G = Poly(f), Poly(g)
assert F.primitive() == (2, G)
assert primitive(f) == (2, g)
assert primitive(f, x) == (2, g)
assert primitive(f, (x,)) == (2, g)
assert primitive(F) == (2, G)
assert primitive(f, polys=True) == (2, G)
assert primitive(F, polys=False) == (2, g)
raises(ComputationFailed, lambda: primitive(4))
f = Poly(2*x, modulus=3)
g = Poly(2.0*x, domain=RR)
assert f.primitive() == (1, f)
assert g.primitive() == (1.0, g)
assert primitive(S('-3*x/4 + y + 11/8')) == \
S('(1/8, -6*x + 8*y + 11)')
def test_compose():
f = x**12 + 20*x**10 + 150*x**8 + 500*x**6 + 625*x**4 - 2*x**3 - 10*x + 9
g = x**4 - 2*x + 9
h = x**3 + 5*x
F, G, H = map(Poly, (f, g, h))
assert G.compose(H) == F
assert compose(g, h) == f
assert compose(g, h, x) == f
assert compose(g, h, (x,)) == f
assert compose(G, H) == F
assert compose(g, h, polys=True) == F
assert compose(G, H, polys=False) == f
assert F.decompose() == [G, H]
assert decompose(f) == [g, h]
assert decompose(f, x) == [g, h]
assert decompose(f, (x,)) == [g, h]
assert decompose(F) == [G, H]
assert decompose(f, polys=True) == [G, H]
assert decompose(F, polys=False) == [g, h]
raises(ComputationFailed, lambda: compose(4, 2))
raises(ComputationFailed, lambda: decompose(4))
assert compose(x**2 - y**2, x - y, x, y) == x**2 - 2*x*y
assert compose(x**2 - y**2, x - y, y, x) == -y**2 + 2*x*y
def test_shift():
assert Poly(x**2 - 2*x + 1, x).shift(2) == Poly(x**2 + 2*x + 1, x)
def test_sturm():
f, F = x, Poly(x, domain='QQ')
g, G = 1, Poly(1, x, domain='QQ')
assert F.sturm() == [F, G]
assert sturm(f) == [f, g]
assert sturm(f, x) == [f, g]
assert sturm(f, (x,)) == [f, g]
assert sturm(F) == [F, G]
assert sturm(f, polys=True) == [F, G]
assert sturm(F, polys=False) == [f, g]
raises(ComputationFailed, lambda: sturm(4))
raises(DomainError, lambda: sturm(f, auto=False))
f = Poly(S(1024)/(15625*pi**8)*x**5
- S(4096)/(625*pi**8)*x**4
+ S(32)/(15625*pi**4)*x**3
- S(128)/(625*pi**4)*x**2
+ S(1)/62500*x
- S(1)/625, x, domain='ZZ(pi)')
assert sturm(f) == \
[Poly(x**3 - 100*x**2 + pi**4/64*x - 25*pi**4/16, x, domain='ZZ(pi)'),
Poly(3*x**2 - 200*x + pi**4/64, x, domain='ZZ(pi)'),
Poly((S(20000)/9 - pi**4/96)*x + 25*pi**4/18, x, domain='ZZ(pi)'),
Poly((-3686400000000*pi**4 - 11520000*pi**8 - 9*pi**12)/(26214400000000 - 245760000*pi**4 + 576*pi**8), x, domain='ZZ(pi)')]
def test_gff():
f = x**5 + 2*x**4 - x**3 - 2*x**2
assert Poly(f).gff_list() == [(Poly(x), 1), (Poly(x + 2), 4)]
assert gff_list(f) == [(x, 1), (x + 2, 4)]
raises(NotImplementedError, lambda: gff(f))
f = x*(x - 1)**3*(x - 2)**2*(x - 4)**2*(x - 5)
assert Poly(f).gff_list() == [(
Poly(x**2 - 5*x + 4), 1), (Poly(x**2 - 5*x + 4), 2), (Poly(x), 3)]
assert gff_list(f) == [(x**2 - 5*x + 4, 1), (x**2 - 5*x + 4, 2), (x, 3)]
raises(NotImplementedError, lambda: gff(f))
def test_sqf_norm():
assert sqf_norm(x**2 - 2, extension=sqrt(3)) == \
(1, x**2 - 2*sqrt(3)*x + 1, x**4 - 10*x**2 + 1)
assert sqf_norm(x**2 - 3, extension=sqrt(2)) == \
(1, x**2 - 2*sqrt(2)*x - 1, x**4 - 10*x**2 + 1)
assert Poly(x**2 - 2, extension=sqrt(3)).sqf_norm() == \
(1, Poly(x**2 - 2*sqrt(3)*x + 1, x, extension=sqrt(3)),
Poly(x**4 - 10*x**2 + 1, x, domain='QQ'))
assert Poly(x**2 - 3, extension=sqrt(2)).sqf_norm() == \
(1, Poly(x**2 - 2*sqrt(2)*x - 1, x, extension=sqrt(2)),
Poly(x**4 - 10*x**2 + 1, x, domain='QQ'))
def test_sqf():
f = x**5 - x**3 - x**2 + 1
g = x**3 + 2*x**2 + 2*x + 1
h = x - 1
p = x**4 + x**3 - x - 1
F, G, H, P = map(Poly, (f, g, h, p))
assert F.sqf_part() == P
assert sqf_part(f) == p
assert sqf_part(f, x) == p
assert sqf_part(f, (x,)) == p
assert sqf_part(F) == P
assert sqf_part(f, polys=True) == P
assert sqf_part(F, polys=False) == p
assert F.sqf_list() == (1, [(G, 1), (H, 2)])
assert sqf_list(f) == (1, [(g, 1), (h, 2)])
assert sqf_list(f, x) == (1, [(g, 1), (h, 2)])
assert sqf_list(f, (x,)) == (1, [(g, 1), (h, 2)])
assert sqf_list(F) == (1, [(G, 1), (H, 2)])
assert sqf_list(f, polys=True) == (1, [(G, 1), (H, 2)])
assert sqf_list(F, polys=False) == (1, [(g, 1), (h, 2)])
assert F.sqf_list_include() == [(G, 1), (H, 2)]
raises(ComputationFailed, lambda: sqf_part(4))
assert sqf(1) == 1
assert sqf_list(1) == (1, [])
assert sqf((2*x**2 + 2)**7) == 128*(x**2 + 1)**7
assert sqf(f) == g*h**2
assert sqf(f, x) == g*h**2
assert sqf(f, (x,)) == g*h**2
d = x**2 + y**2
assert sqf(f/d) == (g*h**2)/d
assert sqf(f/d, x) == (g*h**2)/d
assert sqf(f/d, (x,)) == (g*h**2)/d
assert sqf(x - 1) == x - 1
assert sqf(-x - 1) == -x - 1
assert sqf(x - 1) == x - 1
assert sqf(6*x - 10) == Mul(2, 3*x - 5, evaluate=False)
assert sqf((6*x - 10)/(3*x - 6)) == S(2)/3*((3*x - 5)/(x - 2))
assert sqf(Poly(x**2 - 2*x + 1)) == (x - 1)**2
f = 3 + x - x*(1 + x) + x**2
assert sqf(f) == 3
f = (x**2 + 2*x + 1)**20000000000
assert sqf(f) == (x + 1)**40000000000
assert sqf_list(f) == (1, [(x + 1, 40000000000)])
def test_factor():
f = x**5 - x**3 - x**2 + 1
u = x + 1
v = x - 1
w = x**2 + x + 1
F, U, V, W = map(Poly, (f, u, v, w))
assert F.factor_list() == (1, [(U, 1), (V, 2), (W, 1)])
assert factor_list(f) == (1, [(u, 1), (v, 2), (w, 1)])
assert factor_list(f, x) == (1, [(u, 1), (v, 2), (w, 1)])
assert factor_list(f, (x,)) == (1, [(u, 1), (v, 2), (w, 1)])
assert factor_list(F) == (1, [(U, 1), (V, 2), (W, 1)])
assert factor_list(f, polys=True) == (1, [(U, 1), (V, 2), (W, 1)])
assert factor_list(F, polys=False) == (1, [(u, 1), (v, 2), (w, 1)])
assert F.factor_list_include() == [(U, 1), (V, 2), (W, 1)]
assert factor_list(1) == (1, [])
assert factor_list(6) == (6, [])
assert factor_list(sqrt(3), x) == (1, [(3, S.Half)])
assert factor_list((-1)**x, x) == (1, [(-1, x)])
assert factor_list((2*x)**y, x) == (1, [(2, y), (x, y)])
assert factor_list(sqrt(x*y), x) == (1, [(x*y, S.Half)])
assert factor(6) == 6 and factor(6).is_Integer
assert factor_list(3*x) == (3, [(x, 1)])
assert factor_list(3*x**2) == (3, [(x, 2)])
assert factor(3*x) == 3*x
assert factor(3*x**2) == 3*x**2
assert factor((2*x**2 + 2)**7) == 128*(x**2 + 1)**7
assert factor(f) == u*v**2*w
assert factor(f, x) == u*v**2*w
assert factor(f, (x,)) == u*v**2*w
g, p, q, r = x**2 - y**2, x - y, x + y, x**2 + 1
assert factor(f/g) == (u*v**2*w)/(p*q)
assert factor(f/g, x) == (u*v**2*w)/(p*q)
assert factor(f/g, (x,)) == (u*v**2*w)/(p*q)
p = Symbol('p', positive=True)
i = Symbol('i', integer=True)
r = Symbol('r', real=True)
assert factor(sqrt(x*y)).is_Pow is True
assert factor(sqrt(3*x**2 - 3)) == sqrt(3)*sqrt((x - 1)*(x + 1))
assert factor(sqrt(3*x**2 + 3)) == sqrt(3)*sqrt(x**2 + 1)
assert factor((y*x**2 - y)**i) == y**i*(x - 1)**i*(x + 1)**i
assert factor((y*x**2 + y)**i) == y**i*(x**2 + 1)**i
assert factor((y*x**2 - y)**t) == (y*(x - 1)*(x + 1))**t
assert factor((y*x**2 + y)**t) == (y*(x**2 + 1))**t
f = sqrt(expand((r**2 + 1)*(p + 1)*(p - 1)*(p - 2)**3))
g = sqrt((p - 2)**3*(p - 1))*sqrt(p + 1)*sqrt(r**2 + 1)
assert factor(f) == g
assert factor(g) == g
g = (x - 1)**5*(r**2 + 1)
f = sqrt(expand(g))
assert factor(f) == sqrt(g)
f = Poly(sin(1)*x + 1, x, domain=EX)
assert f.factor_list() == (1, [(f, 1)])
f = x**4 + 1
assert factor(f) == f
assert factor(f, extension=I) == (x**2 - I)*(x**2 + I)
assert factor(f, gaussian=True) == (x**2 - I)*(x**2 + I)
assert factor(
f, extension=sqrt(2)) == (x**2 + sqrt(2)*x + 1)*(x**2 - sqrt(2)*x + 1)
f = x**2 + 2*sqrt(2)*x + 2
assert factor(f, extension=sqrt(2)) == (x + sqrt(2))**2
assert factor(f**3, extension=sqrt(2)) == (x + sqrt(2))**6
assert factor(x**2 - 2*y**2, extension=sqrt(2)) == \
(x + sqrt(2)*y)*(x - sqrt(2)*y)
assert factor(2*x**2 - 4*y**2, extension=sqrt(2)) == \
2*((x + sqrt(2)*y)*(x - sqrt(2)*y))
assert factor(x - 1) == x - 1
assert factor(-x - 1) == -x - 1
assert factor(x - 1) == x - 1
assert factor(6*x - 10) == Mul(2, 3*x - 5, evaluate=False)
assert factor(x**11 + x + 1, modulus=65537, symmetric=True) == \
(x**2 + x + 1)*(x**9 - x**8 + x**6 - x**5 + x**3 - x** 2 + 1)
assert factor(x**11 + x + 1, modulus=65537, symmetric=False) == \
(x**2 + x + 1)*(x**9 + 65536*x**8 + x**6 + 65536*x**5 +
x**3 + 65536*x** 2 + 1)
f = x/pi + x*sin(x)/pi
g = y/(pi**2 + 2*pi + 1) + y*sin(x)/(pi**2 + 2*pi + 1)
assert factor(f) == x*(sin(x) + 1)/pi
assert factor(g) == y*(sin(x) + 1)/(pi + 1)**2
assert factor(Eq(
x**2 + 2*x + 1, x**3 + 1)) == Eq((x + 1)**2, (x + 1)*(x**2 - x + 1))
f = (x**2 - 1)/(x**2 + 4*x + 4)
assert factor(f) == (x + 1)*(x - 1)/(x + 2)**2
assert factor(f, x) == (x + 1)*(x - 1)/(x + 2)**2
f = 3 + x - x*(1 + x) + x**2
assert factor(f) == 3
assert factor(f, x) == 3
assert factor(1/(x**2 + 2*x + 1/x) - 1) == -((1 - x + 2*x**2 +
x**3)/(1 + 2*x**2 + x**3))
assert factor(f, expand=False) == f
raises(PolynomialError, lambda: factor(f, x, expand=False))
raises(FlagError, lambda: factor(x**2 - 1, polys=True))
assert factor([x, Eq(x**2 - y**2, Tuple(x**2 - z**2, 1/x + 1/y))]) == \
[x, Eq((x - y)*(x + y), Tuple((x - z)*(x + z), (x + y)/x/y))]
assert not isinstance(
Poly(x**3 + x + 1).factor_list()[1][0][0], PurePoly) is True
assert isinstance(
PurePoly(x**3 + x + 1).factor_list()[1][0][0], PurePoly) is True
assert factor(sqrt(-x)) == sqrt(-x)
# issue 5917
e = (-2*x*(-x + 1)*(x - 1)*(-x*(-x + 1)*(x - 1) - x*(x - 1)**2)*(x**2*(x -
1) - x*(x - 1) - x) - (-2*x**2*(x - 1)**2 - x*(-x + 1)*(-x*(-x + 1) +
x*(x - 1)))*(x**2*(x - 1)**4 - x*(-x*(-x + 1)*(x - 1) - x*(x - 1)**2)))
assert factor(e) == 0
# deep option
assert factor(sin(x**2 + x) + x, deep=True) == sin(x*(x + 1)) + x
assert factor(sqrt(x**2)) == sqrt(x**2)
def test_factor_large():
f = (x**2 + 4*x + 4)**10000000*(x**2 + 1)*(x**2 + 2*x + 1)**1234567
g = ((x**2 + 2*x + 1)**3000*y**2 + (x**2 + 2*x + 1)**3000*2*y + (
x**2 + 2*x + 1)**3000)
assert factor(f) == (x + 2)**20000000*(x**2 + 1)*(x + 1)**2469134
assert factor(g) == (x + 1)**6000*(y + 1)**2
assert factor_list(
f) == (1, [(x + 1, 2469134), (x + 2, 20000000), (x**2 + 1, 1)])
assert factor_list(g) == (1, [(y + 1, 2), (x + 1, 6000)])
f = (x**2 - y**2)**200000*(x**7 + 1)
g = (x**2 + y**2)**200000*(x**7 + 1)
assert factor(f) == \
(x + 1)*(x - y)**200000*(x + y)**200000*(x**6 - x**5 +
x**4 - x**3 + x**2 - x + 1)
assert factor(g, gaussian=True) == \
(x + 1)*(x - I*y)**200000*(x + I*y)**200000*(x**6 - x**5 +
x**4 - x**3 + x**2 - x + 1)
assert factor_list(f) == \
(1, [(x + 1, 1), (x - y, 200000), (x + y, 200000), (x**6 -
x**5 + x**4 - x**3 + x**2 - x + 1, 1)])
assert factor_list(g, gaussian=True) == \
(1, [(x + 1, 1), (x - I*y, 200000), (x + I*y, 200000), (
x**6 - x**5 + x**4 - x**3 + x**2 - x + 1, 1)])
@XFAIL
def test_factor_noeval():
assert factor(6*x - 10) == 2*(3*x - 5)
assert factor((6*x - 10)/(3*x - 6)) == S(2)/3*((3*x - 5)/(x - 2))
def test_intervals():
assert intervals(0) == []
assert intervals(1) == []
assert intervals(x, sqf=True) == [(0, 0)]
assert intervals(x) == [((0, 0), 1)]
assert intervals(x**128) == [((0, 0), 128)]
assert intervals([x**2, x**4]) == [((0, 0), {0: 2, 1: 4})]
f = Poly((2*x/5 - S(17)/3)*(4*x + S(1)/257))
assert f.intervals(sqf=True) == [(-1, 0), (14, 15)]
assert f.intervals() == [((-1, 0), 1), ((14, 15), 1)]
assert f.intervals(fast=True, sqf=True) == [(-1, 0), (14, 15)]
assert f.intervals(fast=True) == [((-1, 0), 1), ((14, 15), 1)]
assert f.intervals(eps=S(1)/10) == f.intervals(eps=0.1) == \
[((-S(1)/258, 0), 1), ((S(85)/6, S(85)/6), 1)]
assert f.intervals(eps=S(1)/100) == f.intervals(eps=0.01) == \
[((-S(1)/258, 0), 1), ((S(85)/6, S(85)/6), 1)]
assert f.intervals(eps=S(1)/1000) == f.intervals(eps=0.001) == \
[((-S(1)/1002, 0), 1), ((S(85)/6, S(85)/6), 1)]
assert f.intervals(eps=S(1)/10000) == f.intervals(eps=0.0001) == \
[((-S(1)/1028, -S(1)/1028), 1), ((S(85)/6, S(85)/6), 1)]
f = (2*x/5 - S(17)/3)*(4*x + S(1)/257)
assert intervals(f, sqf=True) == [(-1, 0), (14, 15)]
assert intervals(f) == [((-1, 0), 1), ((14, 15), 1)]
assert intervals(f, eps=S(1)/10) == intervals(f, eps=0.1) == \
[((-S(1)/258, 0), 1), ((S(85)/6, S(85)/6), 1)]
assert intervals(f, eps=S(1)/100) == intervals(f, eps=0.01) == \
[((-S(1)/258, 0), 1), ((S(85)/6, S(85)/6), 1)]
assert intervals(f, eps=S(1)/1000) == intervals(f, eps=0.001) == \
[((-S(1)/1002, 0), 1), ((S(85)/6, S(85)/6), 1)]
assert intervals(f, eps=S(1)/10000) == intervals(f, eps=0.0001) == \
[((-S(1)/1028, -S(1)/1028), 1), ((S(85)/6, S(85)/6), 1)]
f = Poly((x**2 - 2)*(x**2 - 3)**7*(x + 1)*(7*x + 3)**3)
assert f.intervals() == \
[((-2, -S(3)/2), 7), ((-S(3)/2, -1), 1),
((-1, -1), 1), ((-1, 0), 3),
((1, S(3)/2), 1), ((S(3)/2, 2), 7)]
assert intervals([x**5 - 200, x**5 - 201]) == \
[((S(75)/26, S(101)/35), {0: 1}), ((S(309)/107, S(26)/9), {1: 1})]
assert intervals([x**5 - 200, x**5 - 201], fast=True) == \
[((S(75)/26, S(101)/35), {0: 1}), ((S(309)/107, S(26)/9), {1: 1})]
assert intervals([x**2 - 200, x**2 - 201]) == \
[((-S(71)/5, -S(85)/6), {1: 1}), ((-S(85)/6, -14), {0: 1}),
((14, S(85)/6), {0: 1}), ((S(85)/6, S(71)/5), {1: 1})]
assert intervals([x + 1, x + 2, x - 1, x + 1, 1, x - 1, x - 1, (x - 2)**2]) == \
[((-2, -2), {1: 1}), ((-1, -1), {0: 1, 3: 1}), ((1, 1), {2:
1, 5: 1, 6: 1}), ((2, 2), {7: 2})]
f, g, h = x**2 - 2, x**4 - 4*x**2 + 4, x - 1
assert intervals(f, inf=S(7)/4, sqf=True) == []
assert intervals(f, inf=S(7)/5, sqf=True) == [(S(7)/5, S(3)/2)]
assert intervals(f, sup=S(7)/4, sqf=True) == [(-2, -1), (1, S(3)/2)]
assert intervals(f, sup=S(7)/5, sqf=True) == [(-2, -1)]
assert intervals(g, inf=S(7)/4) == []
assert intervals(g, inf=S(7)/5) == [((S(7)/5, S(3)/2), 2)]
assert intervals(g, sup=S(7)/4) == [((-2, -1), 2), ((1, S(3)/2), 2)]
assert intervals(g, sup=S(7)/5) == [((-2, -1), 2)]
assert intervals([g, h], inf=S(7)/4) == []
assert intervals([g, h], inf=S(7)/5) == [((S(7)/5, S(3)/2), {0: 2})]
assert intervals([g, h], sup=S(
7)/4) == [((-2, -1), {0: 2}), ((1, 1), {1: 1}), ((1, S(3)/2), {0: 2})]
assert intervals(
[g, h], sup=S(7)/5) == [((-2, -1), {0: 2}), ((1, 1), {1: 1})]
assert intervals([x + 2, x**2 - 2]) == \
[((-2, -2), {0: 1}), ((-2, -1), {1: 1}), ((1, 2), {1: 1})]
assert intervals([x + 2, x**2 - 2], strict=True) == \
[((-2, -2), {0: 1}), ((-S(3)/2, -1), {1: 1}), ((1, 2), {1: 1})]
f = 7*z**4 - 19*z**3 + 20*z**2 + 17*z + 20
assert intervals(f) == []
real_part, complex_part = intervals(f, all=True, sqf=True)
assert real_part == []
assert all(re(a) < re(r) < re(b) and im(
a) < im(r) < im(b) for (a, b), r in zip(complex_part, nroots(f)))
assert complex_part == [(-S(40)/7 - 40*I/7, 0), (-S(40)/7, 40*I/7),
(-40*I/7, S(40)/7), (0, S(40)/7 + 40*I/7)]
real_part, complex_part = intervals(f, all=True, sqf=True, eps=S(1)/10)
assert real_part == []
assert all(re(a) < re(r) < re(b) and im(
a) < im(r) < im(b) for (a, b), r in zip(complex_part, nroots(f)))
raises(ValueError, lambda: intervals(x**2 - 2, eps=10**-100000))
raises(ValueError, lambda: Poly(x**2 - 2).intervals(eps=10**-100000))
raises(
ValueError, lambda: intervals([x**2 - 2, x**2 - 3], eps=10**-100000))
def test_refine_root():
f = Poly(x**2 - 2)
assert f.refine_root(1, 2, steps=0) == (1, 2)
assert f.refine_root(-2, -1, steps=0) == (-2, -1)
assert f.refine_root(1, 2, steps=None) == (1, S(3)/2)
assert f.refine_root(-2, -1, steps=None) == (-S(3)/2, -1)
assert f.refine_root(1, 2, steps=1) == (1, S(3)/2)
assert f.refine_root(-2, -1, steps=1) == (-S(3)/2, -1)
assert f.refine_root(1, 2, steps=1, fast=True) == (1, S(3)/2)
assert f.refine_root(-2, -1, steps=1, fast=True) == (-S(3)/2, -1)
assert f.refine_root(1, 2, eps=S(1)/100) == (S(24)/17, S(17)/12)
assert f.refine_root(1, 2, eps=1e-2) == (S(24)/17, S(17)/12)
raises(PolynomialError, lambda: (f**2).refine_root(1, 2, check_sqf=True))
raises(RefinementFailed, lambda: (f**2).refine_root(1, 2))
raises(RefinementFailed, lambda: (f**2).refine_root(2, 3))
f = x**2 - 2
assert refine_root(f, 1, 2, steps=1) == (1, S(3)/2)
assert refine_root(f, -2, -1, steps=1) == (-S(3)/2, -1)
assert refine_root(f, 1, 2, steps=1, fast=True) == (1, S(3)/2)
assert refine_root(f, -2, -1, steps=1, fast=True) == (-S(3)/2, -1)
assert refine_root(f, 1, 2, eps=S(1)/100) == (S(24)/17, S(17)/12)
assert refine_root(f, 1, 2, eps=1e-2) == (S(24)/17, S(17)/12)
raises(PolynomialError, lambda: refine_root(1, 7, 8, eps=S(1)/100))
raises(ValueError, lambda: Poly(f).refine_root(1, 2, eps=10**-100000))
raises(ValueError, lambda: refine_root(f, 1, 2, eps=10**-100000))
def test_count_roots():
assert count_roots(x**2 - 2) == 2
assert count_roots(x**2 - 2, inf=-oo) == 2
assert count_roots(x**2 - 2, sup=+oo) == 2
assert count_roots(x**2 - 2, inf=-oo, sup=+oo) == 2
assert count_roots(x**2 - 2, inf=-2) == 2
assert count_roots(x**2 - 2, inf=-1) == 1
assert count_roots(x**2 - 2, sup=1) == 1
assert count_roots(x**2 - 2, sup=2) == 2
assert count_roots(x**2 - 2, inf=-1, sup=1) == 0
assert count_roots(x**2 - 2, inf=-2, sup=2) == 2
assert count_roots(x**2 - 2, inf=-1, sup=1) == 0
assert count_roots(x**2 - 2, inf=-2, sup=2) == 2
assert count_roots(x**2 + 2) == 0
assert count_roots(x**2 + 2, inf=-2*I) == 2
assert count_roots(x**2 + 2, sup=+2*I) == 2
assert count_roots(x**2 + 2, inf=-2*I, sup=+2*I) == 2
assert count_roots(x**2 + 2, inf=0) == 0
assert count_roots(x**2 + 2, sup=0) == 0
assert count_roots(x**2 + 2, inf=-I) == 1
assert count_roots(x**2 + 2, sup=+I) == 1
assert count_roots(x**2 + 2, inf=+I/2, sup=+I) == 0
assert count_roots(x**2 + 2, inf=-I, sup=-I/2) == 0
raises(PolynomialError, lambda: count_roots(1))
def test_Poly_root():
f = Poly(2*x**3 - 7*x**2 + 4*x + 4)
assert f.root(0) == -S(1)/2
assert f.root(1) == 2
assert f.root(2) == 2
raises(IndexError, lambda: f.root(3))
assert Poly(x**5 + x + 1).root(0) == RootOf(x**3 - x**2 + 1, 0)
def test_real_roots():
assert real_roots(x) == [0]
assert real_roots(x, multiple=False) == [(0, 1)]
assert real_roots(x**3) == [0, 0, 0]
assert real_roots(x**3, multiple=False) == [(0, 3)]
assert real_roots(x*(x**3 + x + 3)) == [RootOf(x**3 + x + 3, 0), 0]
assert real_roots(x*(x**3 + x + 3), multiple=False) == [(RootOf(
x**3 + x + 3, 0), 1), (0, 1)]
assert real_roots(
x**3*(x**3 + x + 3)) == [RootOf(x**3 + x + 3, 0), 0, 0, 0]
assert real_roots(x**3*(x**3 + x + 3), multiple=False) == [(RootOf(
x**3 + x + 3, 0), 1), (0, 3)]
f = 2*x**3 - 7*x**2 + 4*x + 4
g = x**3 + x + 1
assert Poly(f).real_roots() == [-S(1)/2, 2, 2]
assert Poly(g).real_roots() == [RootOf(g, 0)]
def test_all_roots():
f = 2*x**3 - 7*x**2 + 4*x + 4
g = x**3 + x + 1
assert Poly(f).all_roots() == [-S(1)/2, 2, 2]
assert Poly(g).all_roots() == [RootOf(g, 0), RootOf(g, 1), RootOf(g, 2)]
def test_nroots():
assert Poly(0, x).nroots() == []
assert Poly(1, x).nroots() == []
assert Poly(x**2 - 1, x).nroots() == [-1.0, 1.0]
assert Poly(x**2 + 1, x).nroots() == [-1.0*I, 1.0*I]
roots = Poly(x**2 - 1, x).nroots()
assert roots == [-1.0, 1.0]
roots = Poly(x**2 + 1, x).nroots()
assert roots == [-1.0*I, 1.0*I]
roots = Poly(x**2/3 - S(1)/3, x).nroots()
assert roots == [-1.0, 1.0]
roots = Poly(x**2/3 + S(1)/3, x).nroots()
assert roots == [-1.0*I, 1.0*I]
assert Poly(x**2 + 2*I, x).nroots() == [-1.0 + 1.0*I, 1.0 - 1.0*I]
assert Poly(
x**2 + 2*I, x, extension=I).nroots() == [-1.0 + 1.0*I, 1.0 - 1.0*I]
assert Poly(0.2*x + 0.1).nroots() == [-0.5]
roots = nroots(x**5 + x + 1, n=5)
eps = Float("1e-5")
assert re(roots[0]).epsilon_eq(-0.75487, eps) is S.true
assert im(roots[0]) == 0.0
assert re(roots[1]) == -0.5
assert im(roots[1]).epsilon_eq(-0.86602, eps) is S.true
assert re(roots[2]) == -0.5
assert im(roots[2]).epsilon_eq(+0.86602, eps) is S.true
assert re(roots[3]).epsilon_eq(+0.87743, eps) is S.true
assert im(roots[3]).epsilon_eq(-0.74486, eps) is S.true
assert re(roots[4]).epsilon_eq(+0.87743, eps) is S.true
assert im(roots[4]).epsilon_eq(+0.74486, eps) is S.true
eps = Float("1e-6")
assert re(roots[0]).epsilon_eq(-0.75487, eps) is S.false
assert im(roots[0]) == 0.0
assert re(roots[1]) == -0.5
assert im(roots[1]).epsilon_eq(-0.86602, eps) is S.false
assert re(roots[2]) == -0.5
assert im(roots[2]).epsilon_eq(+0.86602, eps) is S.false
assert re(roots[3]).epsilon_eq(+0.87743, eps) is S.false
assert im(roots[3]).epsilon_eq(-0.74486, eps) is S.false
assert re(roots[4]).epsilon_eq(+0.87743, eps) is S.false
assert im(roots[4]).epsilon_eq(+0.74486, eps) is S.false
raises(DomainError, lambda: Poly(x + y, x).nroots())
raises(MultivariatePolynomialError, lambda: Poly(x + y).nroots())
assert nroots(x**2 - 1) == [-1.0, 1.0]
roots = nroots(x**2 - 1)
assert roots == [-1.0, 1.0]
assert nroots(x + I) == [-1.0*I]
assert nroots(x + 2*I) == [-2.0*I]
raises(PolynomialError, lambda: nroots(0))
# issue 8296
f = Poly(x**4 - 1)
assert f.nroots(2) == [w.n(2) for w in f.all_roots()]
def test_ground_roots():
f = x**6 - 4*x**4 + 4*x**3 - x**2
assert Poly(f).ground_roots() == {S(1): 2, S(0): 2}
assert ground_roots(f) == {S(1): 2, S(0): 2}
def test_nth_power_roots_poly():
f = x**4 - x**2 + 1
f_2 = (x**2 - x + 1)**2
f_3 = (x**2 + 1)**2
f_4 = (x**2 + x + 1)**2
f_12 = (x - 1)**4
assert nth_power_roots_poly(f, 1) == f
raises(ValueError, lambda: nth_power_roots_poly(f, 0))
raises(ValueError, lambda: nth_power_roots_poly(f, x))
assert factor(nth_power_roots_poly(f, 2)) == f_2
assert factor(nth_power_roots_poly(f, 3)) == f_3
assert factor(nth_power_roots_poly(f, 4)) == f_4
assert factor(nth_power_roots_poly(f, 12)) == f_12
raises(MultivariatePolynomialError, lambda: nth_power_roots_poly(
x + y, 2, x, y))
def test_torational_factor_list():
p = expand(((x**2-1)*(x-2)).subs({x:x*(1 + sqrt(2))}))
assert _torational_factor_list(p, x) == (-2, [
(-x*(1 + sqrt(2))/2 + 1, 1),
(-x*(1 + sqrt(2)) - 1, 1),
(-x*(1 + sqrt(2)) + 1, 1)])
p = expand(((x**2-1)*(x-2)).subs({x:x*(1 + 2**Rational(1, 4))}))
assert _torational_factor_list(p, x) is None
def test_cancel():
assert cancel(0) == 0
assert cancel(7) == 7
assert cancel(x) == x
assert cancel(oo) == oo
assert cancel((2, 3)) == (1, 2, 3)
assert cancel((1, 0), x) == (1, 1, 0)
assert cancel((0, 1), x) == (1, 0, 1)
f, g, p, q = 4*x**2 - 4, 2*x - 2, 2*x + 2, 1
F, G, P, Q = [ Poly(u, x) for u in (f, g, p, q) ]
assert F.cancel(G) == (1, P, Q)
assert cancel((f, g)) == (1, p, q)
assert cancel((f, g), x) == (1, p, q)
assert cancel((f, g), (x,)) == (1, p, q)
assert cancel((F, G)) == (1, P, Q)
assert cancel((f, g), polys=True) == (1, P, Q)
assert cancel((F, G), polys=False) == (1, p, q)
f = (x**2 - 2)/(x + sqrt(2))
assert cancel(f) == f
assert cancel(f, greedy=False) == x - sqrt(2)
f = (x**2 - 2)/(x - sqrt(2))
assert cancel(f) == f
assert cancel(f, greedy=False) == x + sqrt(2)
assert cancel((x**2/4 - 1, x/2 - 1)) == (S(1)/2, x + 2, 1)
assert cancel((x**2 - y)/(x - y)) == 1/(x - y)*(x**2 - y)
assert cancel((x**2 - y**2)/(x - y), x) == x + y
assert cancel((x**2 - y**2)/(x - y), y) == x + y
assert cancel((x**2 - y**2)/(x - y)) == x + y
assert cancel((x**3 - 1)/(x**2 - 1)) == (x**2 + x + 1)/(x + 1)
assert cancel((x**3/2 - S(1)/2)/(x**2 - 1)) == (x**2 + x + 1)/(2*x + 2)
assert cancel((exp(2*x) + 2*exp(x) + 1)/(exp(x) + 1)) == exp(x) + 1
f = Poly(x**2 - a**2, x)
g = Poly(x - a, x)
F = Poly(x + a, x)
G = Poly(1, x)
assert cancel((f, g)) == (1, F, G)
f = x**3 + (sqrt(2) - 2)*x**2 - (2*sqrt(2) + 3)*x - 3*sqrt(2)
g = x**2 - 2
assert cancel((f, g), extension=True) == (1, x**2 - 2*x - 3, x - sqrt(2))
f = Poly(-2*x + 3, x)
g = Poly(-x**9 + x**8 + x**6 - x**5 + 2*x**2 - 3*x + 1, x)
assert cancel((f, g)) == (1, -f, -g)
f = Poly(y, y, domain='ZZ(x)')
g = Poly(1, y, domain='ZZ[x]')
assert f.cancel(
g) == (1, Poly(y, y, domain='ZZ(x)'), Poly(1, y, domain='ZZ(x)'))
assert f.cancel(g, include=True) == (
Poly(y, y, domain='ZZ(x)'), Poly(1, y, domain='ZZ(x)'))
f = Poly(5*x*y + x, y, domain='ZZ(x)')
g = Poly(2*x**2*y, y, domain='ZZ(x)')
assert f.cancel(g, include=True) == (
Poly(5*y + 1, y, domain='ZZ(x)'), Poly(2*x*y, y, domain='ZZ(x)'))
f = -(-2*x - 4*y + 0.005*(z - y)**2)/((z - y)*(-z + y + 2))
assert cancel(f).is_Mul == True
P = tanh(x - 3.0)
Q = tanh(x + 3.0)
f = ((-2*P**2 + 2)*(-P**2 + 1)*Q**2/2 + (-2*P**2 + 2)*(-2*Q**2 + 2)*P*Q - (-2*P**2 + 2)*P**2*Q**2 + (-2*Q**2 + 2)*(-Q**2 + 1)*P**2/2 - (-2*Q**2 + 2)*P**2*Q**2)/(2*sqrt(P**2*Q**2 + 0.0001)) \
+ (-(-2*P**2 + 2)*P*Q**2/2 - (-2*Q**2 + 2)*P**2*Q/2)*((-2*P**2 + 2)*P*Q**2/2 + (-2*Q**2 + 2)*P**2*Q/2)/(2*(P**2*Q**2 + 0.0001)**(S(3)/2))
assert cancel(f).is_Mul == True
# issue 7022
A = Symbol('A', commutative=False)
p1 = Piecewise((A*(x**2 - 1)/(x + 1), x > 1), ((x + 2)/(x**2 + 2*x), True))
p2 = Piecewise((A*(x - 1), x > 1), (1/x, True))
assert cancel(p1) == p2
assert cancel(2*p1) == 2*p2
assert cancel(1 + p1) == 1 + p2
assert cancel((x**2 - 1)/(x + 1)*p1) == (x - 1)*p2
assert cancel((x**2 - 1)/(x + 1) + p1) == (x - 1) + p2
p3 = Piecewise(((x**2 - 1)/(x + 1), x > 1), ((x + 2)/(x**2 + 2*x), True))
p4 = Piecewise(((x - 1), x > 1), (1/x, True))
assert cancel(p3) == p4
assert cancel(2*p3) == 2*p4
assert cancel(1 + p3) == 1 + p4
assert cancel((x**2 - 1)/(x + 1)*p3) == (x - 1)*p4
assert cancel((x**2 - 1)/(x + 1) + p3) == (x - 1) + p4
# issue 9363
M = MatrixSymbol('M', 5, 5)
assert cancel(M[0,0] + 7) == M[0,0] + 7
expr = sin(M[1, 4] + M[2, 1] * 5 * M[4, 0]) - 5 * M[1, 2] / z
assert cancel(expr) == expr
def test_reduced():
f = 2*x**4 + y**2 - x**2 + y**3
G = [x**3 - x, y**3 - y]
Q = [2*x, 1]
r = x**2 + y**2 + y
assert reduced(f, G) == (Q, r)
assert reduced(f, G, x, y) == (Q, r)
H = groebner(G)
assert H.reduce(f) == (Q, r)
Q = [Poly(2*x, x, y), Poly(1, x, y)]
r = Poly(x**2 + y**2 + y, x, y)
assert _strict_eq(reduced(f, G, polys=True), (Q, r))
assert _strict_eq(reduced(f, G, x, y, polys=True), (Q, r))
H = groebner(G, polys=True)
assert _strict_eq(H.reduce(f), (Q, r))
f = 2*x**3 + y**3 + 3*y
G = groebner([x**2 + y**2 - 1, x*y - 2])
Q = [x**2 - x*y**3/2 + x*y/2 + y**6/4 - y**4/2 + y**2/4, -y**5/4 + y**3/2 + 3*y/4]
r = 0
assert reduced(f, G) == (Q, r)
assert G.reduce(f) == (Q, r)
assert reduced(f, G, auto=False)[1] != 0
assert G.reduce(f, auto=False)[1] != 0
assert G.contains(f) is True
assert G.contains(f + 1) is False
assert reduced(1, [1], x) == ([1], 0)
raises(ComputationFailed, lambda: reduced(1, [1]))
def test_groebner():
assert groebner([], x, y, z) == []
assert groebner([x**2 + 1, y**4*x + x**3], x, y, order='lex') == [1 + x**2, -1 + y**4]
assert groebner([x**2 + 1, y**4*x + x**3, x*y*z**3], x, y, z, order='grevlex') == [-1 + y**4, z**3, 1 + x**2]
assert groebner([x**2 + 1, y**4*x + x**3], x, y, order='lex', polys=True) == \
[Poly(1 + x**2, x, y), Poly(-1 + y**4, x, y)]
assert groebner([x**2 + 1, y**4*x + x**3, x*y*z**3], x, y, z, order='grevlex', polys=True) == \
[Poly(-1 + y**4, x, y, z), Poly(z**3, x, y, z), Poly(1 + x**2, x, y, z)]
assert groebner([x**3 - 1, x**2 - 1]) == [x - 1]
assert groebner([Eq(x**3, 1), Eq(x**2, 1)]) == [x - 1]
F = [3*x**2 + y*z - 5*x - 1, 2*x + 3*x*y + y**2, x - 3*y + x*z - 2*z**2]
f = z**9 - x**2*y**3 - 3*x*y**2*z + 11*y*z**2 + x**2*z**2 - 5
G = groebner(F, x, y, z, modulus=7, symmetric=False)
assert G == [1 + x + y + 3*z + 2*z**2 + 2*z**3 + 6*z**4 + z**5,
1 + 3*y + y**2 + 6*z**2 + 3*z**3 + 3*z**4 + 3*z**5 + 4*z**6,
1 + 4*y + 4*z + y*z + 4*z**3 + z**4 + z**6,
6 + 6*z + z**2 + 4*z**3 + 3*z**4 + 6*z**5 + 3*z**6 + z**7]
Q, r = reduced(f, G, x, y, z, modulus=7, symmetric=False, polys=True)
assert sum([ q*g for q, g in zip(Q, G.polys)], r) == Poly(f, modulus=7)
F = [x*y - 2*y, 2*y**2 - x**2]
assert groebner(F, x, y, order='grevlex') == \
[y**3 - 2*y, x**2 - 2*y**2, x*y - 2*y]
assert groebner(F, y, x, order='grevlex') == \
[x**3 - 2*x**2, -x**2 + 2*y**2, x*y - 2*y]
assert groebner(F, order='grevlex', field=True) == \
[y**3 - 2*y, x**2 - 2*y**2, x*y - 2*y]
assert groebner([1], x) == [1]
assert groebner([x**2 + 2.0*y], x, y) == [1.0*x**2 + 2.0*y]
raises(ComputationFailed, lambda: groebner([1]))
assert groebner([x**2 - 1, x**3 + 1], method='buchberger') == [x + 1]
assert groebner([x**2 - 1, x**3 + 1], method='f5b') == [x + 1]
raises(ValueError, lambda: groebner([x, y], method='unknown'))
def test_fglm():
F = [a + b + c + d, a*b + a*d + b*c + b*d, a*b*c + a*b*d + a*c*d + b*c*d, a*b*c*d - 1]
G = groebner(F, a, b, c, d, order=grlex)
B = [
4*a + 3*d**9 - 4*d**5 - 3*d,
4*b + 4*c - 3*d**9 + 4*d**5 + 7*d,
4*c**2 + 3*d**10 - 4*d**6 - 3*d**2,
4*c*d**4 + 4*c - d**9 + 4*d**5 + 5*d,
d**12 - d**8 - d**4 + 1,
]
assert groebner(F, a, b, c, d, order=lex) == B
assert G.fglm(lex) == B
F = [9*x**8 + 36*x**7 - 32*x**6 - 252*x**5 - 78*x**4 + 468*x**3 + 288*x**2 - 108*x + 9,
-72*t*x**7 - 252*t*x**6 + 192*t*x**5 + 1260*t*x**4 + 312*t*x**3 - 404*t*x**2 - 576*t*x + \
108*t - 72*x**7 - 256*x**6 + 192*x**5 + 1280*x**4 + 312*x**3 - 576*x + 96]
G = groebner(F, t, x, order=grlex)
B = [
203577793572507451707*t + 627982239411707112*x**7 - 666924143779443762*x**6 - \
10874593056632447619*x**5 + 5119998792707079562*x**4 + 72917161949456066376*x**3 + \
20362663855832380362*x**2 - 142079311455258371571*x + 183756699868981873194,
9*x**8 + 36*x**7 - 32*x**6 - 252*x**5 - 78*x**4 + 468*x**3 + 288*x**2 - 108*x + 9,
]
assert groebner(F, t, x, order=lex) == B
assert G.fglm(lex) == B
F = [x**2 - x - 3*y + 1, -2*x + y**2 + y - 1]
G = groebner(F, x, y, order=lex)
B = [
x**2 - x - 3*y + 1,
y**2 - 2*x + y - 1,
]
assert groebner(F, x, y, order=grlex) == B
assert G.fglm(grlex) == B
def test_is_zero_dimensional():
assert is_zero_dimensional([x, y], x, y) is True
assert is_zero_dimensional([x**3 + y**2], x, y) is False
assert is_zero_dimensional([x, y, z], x, y, z) is True
assert is_zero_dimensional([x, y, z], x, y, z, t) is False
F = [x*y - z, y*z - x, x*y - y]
assert is_zero_dimensional(F, x, y, z) is True
F = [x**2 - 2*x*z + 5, x*y**2 + y*z**3, 3*y**2 - 8*z**2]
assert is_zero_dimensional(F, x, y, z) is True
def test_GroebnerBasis():
F = [x*y - 2*y, 2*y**2 - x**2]
G = groebner(F, x, y, order='grevlex')
H = [y**3 - 2*y, x**2 - 2*y**2, x*y - 2*y]
P = [ Poly(h, x, y) for h in H ]
assert isinstance(G, GroebnerBasis) is True
assert len(G) == 3
assert G[0] == H[0] and not G[0].is_Poly
assert G[1] == H[1] and not G[1].is_Poly
assert G[2] == H[2] and not G[2].is_Poly
assert G[1:] == H[1:] and not any(g.is_Poly for g in G[1:])
assert G[:2] == H[:2] and not any(g.is_Poly for g in G[1:])
assert G.exprs == H
assert G.polys == P
assert G.gens == (x, y)
assert G.domain == ZZ
assert G.order == grevlex
assert G == H
assert G == tuple(H)
assert G == P
assert G == tuple(P)
assert G != []
G = groebner(F, x, y, order='grevlex', polys=True)
assert G[0] == P[0] and G[0].is_Poly
assert G[1] == P[1] and G[1].is_Poly
assert G[2] == P[2] and G[2].is_Poly
assert G[1:] == P[1:] and all(g.is_Poly for g in G[1:])
assert G[:2] == P[:2] and all(g.is_Poly for g in G[1:])
def test_poly():
assert poly(x) == Poly(x, x)
assert poly(y) == Poly(y, y)
assert poly(x + y) == Poly(x + y, x, y)
assert poly(x + sin(x)) == Poly(x + sin(x), x, sin(x))
assert poly(x + y, wrt=y) == Poly(x + y, y, x)
assert poly(x + sin(x), wrt=sin(x)) == Poly(x + sin(x), sin(x), x)
assert poly(x*y + 2*x*z**2 + 17) == Poly(x*y + 2*x*z**2 + 17, x, y, z)
assert poly(2*(y + z)**2 - 1) == Poly(2*y**2 + 4*y*z + 2*z**2 - 1, y, z)
assert poly(
x*(y + z)**2 - 1) == Poly(x*y**2 + 2*x*y*z + x*z**2 - 1, x, y, z)
assert poly(2*x*(
y + z)**2 - 1) == Poly(2*x*y**2 + 4*x*y*z + 2*x*z**2 - 1, x, y, z)
assert poly(2*(
y + z)**2 - x - 1) == Poly(2*y**2 + 4*y*z + 2*z**2 - x - 1, x, y, z)
assert poly(x*(
y + z)**2 - x - 1) == Poly(x*y**2 + 2*x*y*z + x*z**2 - x - 1, x, y, z)
assert poly(2*x*(y + z)**2 - x - 1) == Poly(2*x*y**2 + 4*x*y*z + 2*
x*z**2 - x - 1, x, y, z)
assert poly(x*y + (x + y)**2 + (x + z)**2) == \
Poly(2*x*z + 3*x*y + y**2 + z**2 + 2*x**2, x, y, z)
assert poly(x*y*(x + y)*(x + z)**2) == \
Poly(x**3*y**2 + x*y**2*z**2 + y*x**2*z**2 + 2*z*x**2*
y**2 + 2*y*z*x**3 + y*x**4, x, y, z)
assert poly(Poly(x + y + z, y, x, z)) == Poly(x + y + z, y, x, z)
assert poly((x + y)**2, x) == Poly(x**2 + 2*x*y + y**2, x, domain=ZZ[y])
assert poly((x + y)**2, y) == Poly(x**2 + 2*x*y + y**2, y, domain=ZZ[x])
assert poly(1, x) == Poly(1, x)
raises(GeneratorsNeeded, lambda: poly(1))
# issue 6184
assert poly(x + y, x, y) == Poly(x + y, x, y)
assert poly(x + y, y, x) == Poly(x + y, y, x)
def test_keep_coeff():
u = Mul(2, x + 1, evaluate=False)
assert _keep_coeff(S(1), x) == x
assert _keep_coeff(S(-1), x) == -x
assert _keep_coeff(S(1.0), x) == 1.0*x
assert _keep_coeff(S(-1.0), x) == -1.0*x
assert _keep_coeff(S(1), 2*x) == 2*x
assert _keep_coeff(S(2), x/2) == x
assert _keep_coeff(S(2), sin(x)) == 2*sin(x)
assert _keep_coeff(S(2), x + 1) == u
assert _keep_coeff(x, 1/x) == 1
assert _keep_coeff(x + 1, S(2)) == u
@XFAIL
def test_poly_matching_consistency():
# Test for this issue:
# https://github.com/sympy/sympy/issues/5514
assert I * Poly(x, x) == Poly(I*x, x)
assert Poly(x, x) * I == Poly(I*x, x)
@XFAIL
def test_issue_5786():
assert expand(factor(expand(
(x - I*y)*(z - I*t)), extension=[I])) == -I*t*x - t*y + x*z - I*y*z
def test_noncommutative():
class foo(Expr):
is_commutative=False
e = x/(x + x*y)
c = 1/( 1 + y)
assert cancel(foo(e)) == foo(c)
assert cancel(e + foo(e)) == c + foo(c)
assert cancel(e*foo(c)) == c*foo(c)
def test_to_rational_coeffs():
assert to_rational_coeffs(
Poly(x**3 + y*x**2 + sqrt(y), x, domain='EX')) == None
|
sahilshekhawat/sympy
|
sympy/polys/tests/test_polytools.py
|
Python
|
bsd-3-clause
| 106,021
|
[
"Gaussian"
] |
b029b73620cb254e5092ecc4301c97f76e5c085f5f42ed370f02fb18381f4ec4
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Multivariate Normal distribution classes."""
import tensorflow.compat.v2 as tf
from tensorflow_probability.python import math as tfp_math
from tensorflow_probability.python import stats as tfp_stats
from tensorflow_probability.python.bijectors import fill_scale_tril as fill_scale_tril_bijector
from tensorflow_probability.python.distributions import mvn_linear_operator
from tensorflow_probability.python.internal import distribution_util
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import parameter_properties
from tensorflow_probability.python.internal import prefer_static as ps
from tensorflow_probability.python.internal import tensor_util
from tensorflow.python.ops.linalg import linear_operator # pylint: disable=g-direct-tensorflow-import
__all__ = [
'MultivariateNormalTriL',
]
@linear_operator.make_composite_tensor
class KahanLogDetLinOpTriL(tf.linalg.LinearOperatorLowerTriangular):
"""Override `LinearOperatorLowerTriangular` logdet to use Kahan summation."""
def _log_abs_determinant(self):
return tfp_math.reduce_kahan_sum(
tf.math.log(tf.math.abs(self._get_diag())), axis=[-1]).total
class MultivariateNormalTriL(
mvn_linear_operator.MultivariateNormalLinearOperator):
"""The multivariate normal distribution on `R^k`.
The Multivariate Normal distribution is defined over `R^k` and parameterized
by a (batch of) length-`k` `loc` vector (aka "mu") and a (batch of) `k x k`
`scale` matrix; `covariance = scale @ scale.T` where `@` denotes
matrix-multiplication.
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(x; loc, scale) = exp(-0.5 ||y||**2) / Z,
y = inv(scale) @ (x - loc),
Z = (2 pi)**(0.5 k) |det(scale)|,
```
where:
* `loc` is a vector in `R^k`,
* `scale` is a matrix in `R^{k x k}`, `covariance = scale @ scale.T`,
* `Z` denotes the normalization constant, and,
* `||y||**2` denotes the squared Euclidean norm of `y`.
A (non-batch) `scale` matrix is:
```none
scale = scale_tril
```
where `scale_tril` is lower-triangular `k x k` matrix with non-zero diagonal,
i.e., `tf.diag_part(scale_tril) != 0`.
Additional leading dimensions (if any) will index batches.
The MultivariateNormal distribution is a member of the [location-scale
family](https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be
constructed as,
```none
X ~ MultivariateNormal(loc=0, scale=1) # Identity scale, zero shift.
Y = scale @ X + loc
```
Trainable (batch) lower-triangular matrices can be created with
`tfp.distributions.matrix_diag_transform()` and/or
`tfp.math.fill_triangular()`
#### Examples
```python
tfd = tfp.distributions
# Initialize a single 3-variate Gaussian.
mu = [1., 2, 3]
cov = [[ 0.36, 0.12, 0.06],
[ 0.12, 0.29, -0.13],
[ 0.06, -0.13, 0.26]]
scale = tf.linalg.cholesky(cov)
# ==> [[ 0.6, 0. , 0. ],
# [ 0.2, 0.5, 0. ],
# [ 0.1, -0.3, 0.4]])
mvn = tfd.MultivariateNormalTriL(
loc=mu,
scale_tril=scale)
mvn.mean()
# ==> [1., 2, 3]
# Covariance agrees with cholesky(cov) parameterization.
mvn.covariance()
# ==> [[ 0.36, 0.12, 0.06],
# [ 0.12, 0.29, -0.13],
# [ 0.06, -0.13, 0.26]]
# Compute the pdf of an observation in `R^3` ; return a scalar.
mvn.prob([-1., 0, 1]) # shape: []
# Initialize a 2-batch of 3-variate Gaussians.
mu = [[1., 2, 3],
[11, 22, 33]] # shape: [2, 3]
tril = ... # shape: [2, 3, 3], lower triangular, non-zero diagonal.
mvn = tfd.MultivariateNormalTriL(
loc=mu,
scale_tril=tril)
# Compute the pdf of two `R^3` observations; return a length-2 vector.
x = [[-0.9, 0, 0.1],
[-10, 0, 9]] # shape: [2, 3]
mvn.prob(x) # shape: [2]
# Instantiate a "learnable" MVN.
dims = 4
mvn = tfd.MultivariateNormalTriL(
loc=tf.Variable(tf.zeros([dims], dtype=tf.float32), name="mu"),
scale_tril=tfp.util.TransformedVariable(
tf.eye(dims, dtype=tf.float32),
tfp.bijectors.FillScaleTriL(),
name="raw_scale_tril")
```
"""
def __init__(self,
loc=None,
scale_tril=None,
validate_args=False,
allow_nan_stats=True,
experimental_use_kahan_sum=False,
name='MultivariateNormalTriL'):
"""Construct Multivariate Normal distribution on `R^k`.
The `batch_shape` is the broadcast shape between `loc` and `scale`
arguments.
The `event_shape` is given by last dimension of the matrix implied by
`scale`. The last dimension of `loc` (if provided) must broadcast with this.
Recall that `covariance = scale @ scale.T`. A (non-batch) `scale` matrix is:
```none
scale = scale_tril
```
where `scale_tril` is lower-triangular `k x k` matrix with non-zero
diagonal, i.e., `tf.diag_part(scale_tril) != 0`.
Additional leading dimensions (if any) will index batches.
Args:
loc: Floating-point `Tensor`. If this is set to `None`, `loc` is
implicitly `0`. When specified, may have shape `[B1, ..., Bb, k]` where
`b >= 0` and `k` is the event size.
scale_tril: Floating-point, lower-triangular `Tensor` with non-zero
diagonal elements. `scale_tril` has shape `[B1, ..., Bb, k, k]` where
`b >= 0` and `k` is the event size.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
experimental_use_kahan_sum: Python `bool`. When `True`, we use Kahan
summation to aggregate independent underlying log_prob values as well as
when computing the log-determinant of the scale matrix. Doing so
improves against the precision of a naive float32 sum. This can be
noticeable in particular for large dimensions in float32. See CPU caveat
on `tfp.math.reduce_kahan_sum`.
name: Python `str` name prefixed to Ops created by this class.
Raises:
ValueError: if neither `loc` nor `scale_tril` are specified.
"""
parameters = dict(locals())
if loc is None and scale_tril is None:
raise ValueError('Must specify one or both of `loc`, `scale_tril`.')
with tf.name_scope(name) as name:
dtype = dtype_util.common_dtype([loc, scale_tril], tf.float32)
loc = tensor_util.convert_nonref_to_tensor(loc, name='loc', dtype=dtype)
scale_tril = tensor_util.convert_nonref_to_tensor(
scale_tril, name='scale_tril', dtype=dtype)
self._scale_tril = scale_tril
if scale_tril is None:
scale = tf.linalg.LinearOperatorIdentity(
num_rows=distribution_util.dimension_size(loc, -1),
dtype=loc.dtype,
is_self_adjoint=True,
is_positive_definite=True,
assert_proper_shapes=validate_args)
else:
# No need to validate that scale_tril is non-singular.
# LinearOperatorLowerTriangular has an assert_non_singular
# method that is called by the Bijector.
linop_cls = (KahanLogDetLinOpTriL if experimental_use_kahan_sum else
tf.linalg.LinearOperatorLowerTriangular)
scale = linop_cls(
scale_tril,
is_non_singular=True,
is_self_adjoint=False,
is_positive_definite=False)
super(MultivariateNormalTriL, self).__init__(
loc=loc,
scale=scale,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
experimental_use_kahan_sum=experimental_use_kahan_sum,
name=name)
self._parameters = parameters
@classmethod
def _parameter_properties(cls, dtype, num_classes=None):
# pylint: disable=g-long-lambda
return dict(
loc=parameter_properties.ParameterProperties(event_ndims=1),
scale_tril=parameter_properties.ParameterProperties(
event_ndims=2,
shape_fn=lambda sample_shape: ps.concat(
[sample_shape, sample_shape[-1:]], axis=0),
default_constraining_bijector_fn=lambda: fill_scale_tril_bijector.
FillScaleTriL(diag_shift=dtype_util.eps(dtype))))
# pylint: enable=g-long-lambda
@classmethod
def _maximum_likelihood_parameters(cls, value):
return {'loc': tf.reduce_mean(value, axis=0),
'scale_tril': tf.linalg.cholesky(
tfp_stats.covariance(value, sample_axis=0, event_axis=-1))}
@property
def scale_tril(self):
return self._scale_tril
|
tensorflow/probability
|
tensorflow_probability/python/distributions/mvn_tril.py
|
Python
|
apache-2.0
| 9,711
|
[
"Gaussian"
] |
281d7a6203ccf291e73782dee9108c1e2245dbf61cfa73f0cd886da41e98bc5f
|
import numpy as np
import theano
import theano.tensor as T
from theano.sandbox.rng_mrg import MRG_RandomStreams
from mozi.layers.template import Template
floatX = theano.config.floatX
theano_rand = MRG_RandomStreams()
class Sigmoid(Template):
def _test_fprop(self, state_below):
return T.nnet.sigmoid(state_below)
def _train_fprop(self, state_below):
return T.nnet.sigmoid(state_below)
class RELU(Template):
def _test_fprop(self, state_below):
return state_below * (state_below > 0.)
def _train_fprop(self, state_below):
return state_below * (state_below > 0.)
class PRELU(Template):
def __init__(self, alpha=0.2, **kwargs):
'''
y = wx + b
if y > 0 then z = y else z = alpha * y
return z
alpha: the gradient of the slope which is updated by backpropagation
'''
super(PRELU, self).__init__(**kwargs)
alpha = alpha * np.ones(shape=self.dim, dtype=floatX)
self.alpha = theano.shared(value=alpha, name='PRELU_gradient', borrow=True)
self.params += [self.alpha]
def _test_fprop(self, state_below):
return self._train_fprop(state_below)
def _train_fprop(self, state_below):
return state_below * (state_below >= 0) \
+ self.alpha * state_below * (state_below < 0)
class Noisy_RELU(Template):
def __init__(self, sparsity_factor=0.1, threshold_lr=0.01, alpha=0.01, std=0.1, num_batch=10000, **kwargs):
'''
sparsityFactor: the micro sparsity of signals through each neuron
threshold_lr: the learning rate of learning the optimum threshold for each neuron
so that the activeness of the neuron approaches sparsityFactor
alpha_range: {start_weight, num_batches, final_weight} for setting the weight on the
contemporary sparsity when calculating the mean sparsity over many batches.
For the start, it will place more weight on the contemporary, but as more
epoch goes through, the weight on contemporary batch should decrease, so
that mean_sparsity will be more stable.
std: the standard deviation of the noise
'''
super(Noisy_RELU, self).__init__(**kwargs)
self.sparsity_factor = sparsity_factor
self.threshold_lr = threshold_lr
self.alpha = alpha
self.std = std
self.num_batch = num_batch
self.threshold = 0.
self.activity = 0.
self.batch_count = 0
def _test_fprop(self, state_below):
return output * (output > self.threshold)
def _train_fprop(self, state_below):
if self.batch_count > self.num_batch:
return state_below * (state_below > self.threshold)
else:
self.batch_count += 1
state_below = state_below + theano_rand.normal(size=state_below.shape, std=self.std, dtype=floatX)
state_below = state_below * (state_below > self.threshold)
activity = theano.mean(state_below > 0, axis=0)
self.activity = self.alpha * activity + (1-self.alpha) * self.activity
self.threshold += self.threshold_lr * (self.activity - self.sparsity_factor)
return state_below * (state_below > self.threshold)
class Softmax(Template):
def _test_fprop(self, state_below):
return T.nnet.softmax(state_below)
def _train_fprop(self, state_below):
return T.nnet.softmax(state_below)
class Tanh(Template):
def _test_fprop(self, state_below):
return T.tanh(state_below)
def _train_fprop(self, state_below):
return T.tanh(state_below)
class Softplus(Template):
def _test_fprop(self, state_below):
return T.nnet.softplus(state_below)
def _train_fprop(self, state_below):
return T.nnet.softplus(state_below)
|
dksahuji/Mozi
|
mozi/layers/activation.py
|
Python
|
mit
| 3,869
|
[
"NEURON"
] |
61b3d78b987b77c88b50d67848ca2208d788bd1329d3338cc35b8d549dc760c7
|
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
# $Id:$
'''Low-level graphics rendering.
This module provides an efficient low-level abstraction over OpenGL. It gives
very good performance for rendering OpenGL primitives; far better than the
typical immediate-mode usage and, on modern graphics cards, better than using
display lists in many cases. The module is used internally by other areas of
pyglet.
See the Programming Guide for details on how to use this graphics API.
Batches and groups
==================
Without even needing to understand the details on how to draw primitives with
the graphics API, developers can make use of `Batch` and `Group`
objects to improve performance of sprite and text rendering.
The `Sprite`, `Label` and `TextLayout` classes all accept a ``batch`` and
``group`` parameter in their constructors. A batch manages a set of objects
that will be drawn all at once, and a group describes the manner in which an
object is drawn.
The following example creates a batch, adds two sprites to the batch, and then
draws the entire batch::
batch = pyglet.graphics.Batch()
car = pyglet.sprite.Sprite(car_image, batch=batch)
boat = pyglet.sprite.Sprite(boat_image, batch=batch)
def on_draw()
batch.draw()
Drawing a complete batch is much faster than drawing the items in the batch
individually, especially when those items belong to a common group.
Groups describe the OpenGL state required for an item. This is for the most
part managed by the sprite and text classes, however you can also use groups
to ensure items are drawn in a particular order. For example, the following
example adds a background sprite which is guaranteed to be drawn before the
car and the boat::
batch = pyglet.graphics.Batch()
background = pyglet.graphics.OrderedGroup(0)
foreground = pyglet.graphics.OrderedGroup(1)
background = pyglet.sprite.Sprite(background_image,
batch=batch, group=background)
car = pyglet.sprite.Sprite(car_image, batch=batch, group=foreground)
boat = pyglet.sprite.Sprite(boat_image, batch=batch, group=foreground)
def on_draw()
batch.draw()
It's preferable to manage sprites and text objects within as few batches as
possible. If the drawing of sprites or text objects need to be interleaved
with other drawing that does not use the graphics API, multiple batches will
be required.
Data item parameters
====================
Many of the functions and methods in this module accept any number of ``data``
parameters as their final parameters. In the documentation these are notated
as ``*data`` in the formal parameter list.
A data parameter describes a vertex attribute format and an optional sequence
to initialise that attribute. Examples of common attribute formats are:
``"v3f"``
Vertex position, specified as three floats.
``"c4B"``
Vertex color, specified as four unsigned bytes.
``"t2f"``
Texture coordinate, specified as two floats.
See `pyglet.graphics.vertexattribute` for the complete syntax of the vertex
format string.
When no initial data is to be given, the data item is just the format string.
For example, the following creates a 2 element vertex list with position and
color attributes::
vertex_list = pyglet.graphics.vertex_list(2, 'v2f', 'c4B')
When initial data is required, wrap the format string and the initial data in
a tuple, for example::
vertex_list = pyglet.graphics.vertex_list(2,
('v2f', (0.0, 1.0, 1.0, 0.0)),
('c4B', (255, 255, 255, 255) * 2))
Drawing modes
=============
Methods in this module that accept a ``mode`` parameter will accept any value
in the OpenGL drawing mode enumeration: ``GL_POINTS``, ``GL_LINE_STRIP``,
``GL_LINE_LOOP``, ``GL_LINES``, ``GL_TRIANGLE_STRIP``, ``GL_TRIANGLE_FAN``,
``GL_TRIANGLES``, ``GL_QUAD_STRIP``, ``GL_QUADS``, and ``GL_POLYGON``.
::
pyglet.graphics.draw(1, GL_POINTS, ('v2i',(10,20)))
However, because of the way the graphics API renders multiple primitives with
shared state, ``GL_POLYGON``, ``GL_LINE_LOOP`` and ``GL_TRIANGLE_FAN`` cannot
be used --- the results are undefined.
When using ``GL_LINE_STRIP``, ``GL_TRIANGLE_STRIP`` or ``GL_QUAD_STRIP`` care
must be taken to insert degenerate vertices at the beginning and end of each
vertex list. For example, given the vertex list::
A, B, C, D
the correct vertex list to provide the vertex list is::
A, A, B, C, D, D
Alternatively, the ``NV_primitive_restart`` extension can be used if it is
present. This also permits use of ``GL_POLYGON``, ``GL_LINE_LOOP`` and
``GL_TRIANGLE_FAN``. Unfortunately the extension is not provided by older
video drivers, and requires indexed vertex lists.
:since: pyglet 1.1
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import ctypes
import pyglet
from pyglet.gl import *
from pyglet import gl
from pyglet.graphics import vertexbuffer, vertexattribute, vertexdomain
_debug_graphics_batch = pyglet.options['debug_graphics_batch']
def draw(size, mode, *data):
'''Draw a primitive immediately.
:Parameters:
`size` : int
Number of vertices given
`mode` : gl primitive type
OpenGL drawing mode, e.g. ``GL_TRIANGLES``,
avoiding quotes.
`data` : data items
Attribute formats and data. See the module summary for
details.
'''
glPushClientAttrib(GL_CLIENT_VERTEX_ARRAY_BIT)
buffers = []
for format, array in data:
attribute = vertexattribute.create_attribute(format)
assert size == len(array) // attribute.count, \
'Data for %s is incorrect length' % format
buffer = vertexbuffer.create_mappable_buffer(
size * attribute.stride, vbo=False)
attribute.set_region(buffer, 0, size, array)
attribute.enable()
attribute.set_pointer(buffer.ptr)
buffers.append(buffer)
glDrawArrays(mode, 0, size)
glFlush()
glPopClientAttrib()
def draw_indexed(size, mode, indices, *data):
'''Draw a primitive with indexed vertices immediately.
:Parameters:
`size` : int
Number of vertices given
`mode` : int
OpenGL drawing mode, e.g. ``GL_TRIANGLES``
`indices` : sequence of int
Sequence of integers giving indices into the vertex list.
`data` : data items
Attribute formats and data. See the module summary for details.
'''
glPushClientAttrib(GL_CLIENT_VERTEX_ARRAY_BIT)
buffers = []
for format, array in data:
attribute = vertexattribute.create_attribute(format)
assert size == len(array) // attribute.count, \
'Data for %s is incorrect length' % format
buffer = vertexbuffer.create_mappable_buffer(
size * attribute.stride, vbo=False)
attribute.set_region(buffer, 0, size, array)
attribute.enable()
attribute.set_pointer(buffer.ptr)
buffers.append(buffer)
if size <= 0xff:
index_type = GL_UNSIGNED_BYTE
index_c_type = ctypes.c_ubyte
elif size <= 0xffff:
index_type = GL_UNSIGNED_SHORT
index_c_type = ctypes.c_ushort
else:
index_type = GL_UNSIGNED_INT
index_c_type = ctypes.c_uint
index_array = (index_c_type * len(indices))(*indices)
glDrawElements(mode, len(indices), index_type, index_array)
glFlush()
glPopClientAttrib()
def _parse_data(data):
'''Given a list of data items, returns (formats, initial_arrays).'''
assert data, 'No attribute formats given'
# Return tuple (formats, initial_arrays).
formats = []
initial_arrays = []
for i, format in enumerate(data):
if isinstance(format, tuple):
format, array = format
initial_arrays.append((i, array))
formats.append(format)
formats = tuple(formats)
return formats, initial_arrays
def _get_default_batch():
shared_object_space = gl.current_context.object_space
try:
return shared_object_space.pyglet_graphics_default_batch
except AttributeError:
shared_object_space.pyglet_graphics_default_batch = Batch()
return shared_object_space.pyglet_graphics_default_batch
def vertex_list(count, *data):
'''Create a `VertexList` not associated with a batch, group or mode.
:Parameters:
`count` : int
The number of vertices in the list.
`data` : data items
Attribute formats and initial data for the vertex list. See the
module summary for details.
:rtype: `VertexList`
'''
# Note that mode=0 because the default batch is never drawn: vertex lists
# returned from this function are drawn directly by the app.
return _get_default_batch().add(count, 0, None, *data)
def vertex_list_indexed(count, indices, *data):
'''Create an `IndexedVertexList` not associated with a batch, group or mode.
:Parameters:
`count` : int
The number of vertices in the list.
`indices` : sequence
Sequence of integers giving indices into the vertex list.
`data` : data items
Attribute formats and initial data for the vertex list. See the
module summary for details.
:rtype: `IndexedVertexList`
'''
# Note that mode=0 because the default batch is never drawn: vertex lists
# returned from this function are drawn directly by the app.
return _get_default_batch().add_indexed(count, 0, None, indices, *data)
class Batch(object):
'''Manage a collection of vertex lists for batched rendering.
Vertex lists are added to a `Batch` using the `add` and `add_indexed`
methods. An optional group can be specified along with the vertex list,
which gives the OpenGL state required for its rendering. Vertex lists
with shared mode and group are allocated into adjacent areas of memory and
sent to the graphics card in a single operation.
Call `VertexList.delete` to remove a vertex list from the batch.
'''
def __init__(self):
'''Create a graphics batch.'''
# Mapping to find domain.
# group -> (attributes, mode, indexed) -> domain
self.group_map = {}
# Mapping of group to list of children.
self.group_children = {}
# List of top-level groups
self.top_groups = []
self._draw_list = []
self._draw_list_dirty = False
def invalidate(self):
'''Force the batch to update the draw list.
This method can be used to force the batch to re-compute the draw list
when the ordering of groups has changed.
:since: pyglet 1.2
'''
self._draw_list_dirty = True
def add(self, count, mode, group, *data):
'''Add a vertex list to the batch.
:Parameters:
`count` : int
The number of vertices in the list.
`mode` : int
OpenGL drawing mode enumeration; for example, one of
``GL_POINTS``, ``GL_LINES``, ``GL_TRIANGLES``, etc.
See the module summary for additional information.
`group` : `Group`
Group of the vertex list, or ``None`` if no group is required.
`data` : data items
Attribute formats and initial data for the vertex list. See
the module summary for details.
:rtype: `VertexList`
'''
formats, initial_arrays = _parse_data(data)
domain = self._get_domain(False, mode, group, formats)
# Create vertex list and initialize
vlist = domain.create(count)
for i, array in initial_arrays:
vlist._set_attribute_data(i, array)
return vlist
def add_indexed(self, count, mode, group, indices, *data):
'''Add an indexed vertex list to the batch.
:Parameters:
`count` : int
The number of vertices in the list.
`mode` : int
OpenGL drawing mode enumeration; for example, one of
``GL_POINTS``, ``GL_LINES``, ``GL_TRIANGLES``, etc.
See the module summary for additional information.
`group` : `Group`
Group of the vertex list, or ``None`` if no group is required.
`indices` : sequence
Sequence of integers giving indices into the vertex list.
`data` : data items
Attribute formats and initial data for the vertex list. See
the module summary for details.
:rtype: `IndexedVertexList`
'''
formats, initial_arrays = _parse_data(data)
domain = self._get_domain(True, mode, group, formats)
# Create vertex list and initialize
vlist = domain.create(count, len(indices))
start = vlist.start
vlist._set_index_data(map(lambda i: i + start, indices))
for i, array in initial_arrays:
vlist._set_attribute_data(i, array)
return vlist
def migrate(self, vertex_list, mode, group, batch):
'''Migrate a vertex list to another batch and/or group.
`vertex_list` and `mode` together identify the vertex list to migrate.
`group` and `batch` are new owners of the vertex list after migration.
The results are undefined if `mode` is not correct or if `vertex_list`
does not belong to this batch (they are not checked and will not
necessarily throw an exception immediately).
`batch` can remain unchanged if only a group change is desired.
:Parameters:
`vertex_list` : `VertexList`
A vertex list currently belonging to this batch.
`mode` : int
The current GL drawing mode of the vertex list.
`group` : `Group`
The new group to migrate to.
`batch` : `Batch`
The batch to migrate to (or the current batch).
'''
formats = vertex_list.domain.__formats
domain = batch._get_domain(False, mode, group, formats)
vertex_list.migrate(domain)
def _get_domain(self, indexed, mode, group, formats):
if group is None:
group = null_group
# Batch group
if group not in self.group_map:
self._add_group(group)
domain_map = self.group_map[group]
# Find domain given formats, indices and mode
key = (formats, mode, indexed)
try:
domain = domain_map[key]
except KeyError:
# Create domain
if indexed:
domain = vertexdomain.create_indexed_domain(*formats)
else:
domain = vertexdomain.create_domain(*formats)
domain.__formats = formats
domain_map[key] = domain
self._draw_list_dirty = True
return domain
def _add_group(self, group):
self.group_map[group] = {}
if group.parent is None:
self.top_groups.append(group)
else:
if group.parent not in self.group_map:
self._add_group(group.parent)
if group.parent not in self.group_children:
self.group_children[group.parent] = []
self.group_children[group.parent].append(group)
self._draw_list_dirty = True
def _update_draw_list(self):
'''Visit group tree in preorder and create a list of bound methods
to call.
'''
def visit(group):
draw_list = []
# Draw domains using this group
domain_map = self.group_map[group]
for (formats, mode, indexed), domain in list(domain_map.items()):
# Remove unused domains from batch
if domain._is_empty():
del domain_map[(formats, mode, indexed)]
continue
draw_list.append(
(lambda d, m: lambda: d.draw(m))(domain, mode))
# Sort and visit child groups of this group
children = self.group_children.get(group)
if children:
children.sort()
for child in list(children):
draw_list.extend(visit(child))
if children or domain_map:
return [group.set_state] + draw_list + [group.unset_state]
else:
# Remove unused group from batch
del self.group_map[group]
if group.parent:
self.group_children[group.parent].remove(group)
try:
del self.group_children[group]
except KeyError:
pass
try:
self.top_groups.remove(group)
except ValueError:
pass
return []
self._draw_list = []
self.top_groups.sort()
for group in list(self.top_groups):
self._draw_list.extend(visit(group))
self._draw_list_dirty = False
if _debug_graphics_batch:
self._dump_draw_list()
def _dump_draw_list(self):
def dump(group, indent=''):
print indent, 'Begin group', group
domain_map = self.group_map[group]
for _, domain in domain_map.items():
print indent, ' ', domain
for start, size in zip(*domain.allocator.get_allocated_regions()):
print indent, ' ', 'Region %d size %d:' % (start, size)
for key, attribute in domain.attribute_names.items():
print indent, ' ',
try:
region = attribute.get_region(attribute.buffer,
start, size)
print key, region.array[:]
except:
print key, '(unmappable)'
for child in self.group_children.get(group, ()):
dump(child, indent + ' ')
print indent, 'End group', group
print 'Draw list for %r:' % self
for group in self.top_groups:
dump(group)
def draw(self):
'''Draw the batch.
'''
if self._draw_list_dirty:
self._update_draw_list()
for func in self._draw_list:
func()
def draw_subset(self, vertex_lists):
'''Draw only some vertex lists in the batch.
The use of this method is highly discouraged, as it is quite
inefficient. Usually an application can be redesigned so that batches
can always be drawn in their entirety, using `draw`.
The given vertex lists must belong to this batch; behaviour is
undefined if this condition is not met.
:Parameters:
`vertex_lists` : sequence of `VertexList` or `IndexedVertexList`
Vertex lists to draw.
'''
# Horrendously inefficient.
def visit(group):
group.set_state()
# Draw domains using this group
domain_map = self.group_map[group]
for (_, mode, _), domain in domain_map.items():
for list in vertex_lists:
if list.domain is domain:
list.draw(mode)
# Sort and visit child groups of this group
children = self.group_children.get(group)
if children:
children.sort()
for child in children:
visit(child)
group.unset_state()
self.top_groups.sort()
for group in self.top_groups:
visit(group)
class Group(object):
'''Group of common OpenGL state.
Before a vertex list is rendered, its group's OpenGL state is set; as are
that state's ancestors' states. This can be defined arbitrarily on
subclasses; the default state change has no effect, and groups vertex
lists only in the order in which they are drawn.
'''
def __init__(self, parent=None):
'''Create a group.
:Parameters:
`parent` : `Group`
Group to contain this group; its state will be set before this
state's.
'''
self.parent = parent
def __lt__(self, other):
return hash(self) < hash(other)
def set_state(self):
'''Apply the OpenGL state change.
The default implementation does nothing.'''
pass
def unset_state(self):
'''Repeal the OpenGL state change.
The default implementation does nothing.'''
pass
def set_state_recursive(self):
'''Set this group and its ancestry.
Call this method if you are using a group in isolation: the
parent groups will be called in top-down order, with this class's
`set` being called last.
'''
if self.parent:
self.parent.set_state_recursive()
self.set_state()
def unset_state_recursive(self):
'''Unset this group and its ancestry.
The inverse of `set_state_recursive`.
'''
self.unset_state()
if self.parent:
self.parent.unset_state_recursive()
class NullGroup(Group):
'''The default group class used when ``None`` is given to a batch.
This implementation has no effect.
'''
pass
#: The default group.
#:
#: :type: `Group`
null_group = NullGroup()
class TextureGroup(Group):
'''A group that enables and binds a texture.
Texture groups are equal if their textures' targets and names are equal.
'''
# Don't use this, create your own group classes that are more specific.
# This is just an example.
def __init__(self, texture, parent=None):
'''Create a texture group.
:Parameters:
`texture` : `Texture`
Texture to bind.
`parent` : `Group`
Parent group.
'''
super(TextureGroup, self).__init__(parent)
self.texture = texture
def set_state(self):
glEnable(self.texture.target)
glBindTexture(self.texture.target, self.texture.id)
def unset_state(self):
glDisable(self.texture.target)
def __hash__(self):
return hash((self.texture.target, self.texture.id, self.parent))
def __eq__(self, other):
return (self.__class__ is other.__class__ and
self.texture.target == other.texture.target and
self.texture.id == other.texture.id and
self.parent == other.parent)
def __repr__(self):
return '%s(id=%d)' % (self.__class__.__name__, self.texture.id)
class OrderedGroup(Group):
'''A group with partial order.
Ordered groups with a common parent are rendered in ascending order of
their ``order`` field. This is a useful way to render multiple layers of
a scene within a single batch.
'''
# This can be useful as a top-level group, or as a superclass for other
# groups that need to be ordered.
#
# As a top-level group it's useful because graphics can be composited in a
# known order even if they don't know about each other or share any known
# group.
def __init__(self, order, parent=None):
'''Create an ordered group.
:Parameters:
`order` : int
Order of this group.
`parent` : `Group`
Parent of this group.
'''
super(OrderedGroup, self).__init__(parent)
self.order = order
def __lt__(self, other):
if isinstance(other, OrderedGroup):
return self.order < other.order
return super(OrderedGroup, self).__lt__(other)
def __eq__(self, other):
return (self.__class__ is other.__class__ and
self.order == other.order and
self.parent == other.parent)
def __hash__(self):
return hash((self.order, self.parent))
def __repr__(self):
return '%s(%d)' % (self.__class__.__name__, self.order)
|
jpaalasm/pyglet
|
pyglet/graphics/__init__.py
|
Python
|
bsd-3-clause
| 26,045
|
[
"VisIt"
] |
f34146baddf71fc01ddc7ac5bfbd73b482be8acf2c3de587a2bf79c0f81b1b78
|
__author__ = 'Luca Parmesan'
import imp
import csv
from platform import architecture, system
from warnings import warn
from os import getcwd
class OpalKelly:
"""A simplified version of the Opal Kelly class"""
if system() == 'Windows':
if architecture()[0] == '32bit':
ok = imp.load_source('ok', './opalkelly_beta/32bit/ok/ok.py')
elif architecture()[0] == '64bit':
ok = imp.load_source('ok', './opalkelly_beta/64bit/ok/ok.py')
else:
raise Exception('Architecture not recognised')
else:
raise Exception('OS not recognised')
_registers_ = []
pll_info = {}
_clk_sources_ = {'REF': 0,
'PLL0-0': 2,
'PLL0-180': 3,
'PLL1-0': 4,
'PLL1-180': 5,
'PLL2-0': 6,
'PLL2-180': 7,
}
# Opal Kelly error codes
_ok_errors_ = {0: 'NoError',
-1: 'Failed',
-2: 'Timeout',
-3: 'DoneNotHigh',
-4: 'TransferError',
-5: 'CommunicationError',
-6: 'InvalidBitstream',
-7: 'FileError',
-8: 'DeviceNotOpen',
-9: 'InvalidEndpoint',
-10: 'InvalidBlockSize',
-11: 'I2CRestrictedAddress',
-12: 'I2CBitError',
-13: 'I2CNack',
-14: 'I2CUnknownStatus',
-15: 'UnsupportedFeature',
-16: 'FIFOUnderflow',
-17: 'FIFOOverflow',
-18: 'DataAlignmentError',
-19: 'InvalidResetProfile',
-20: 'InvalidParameter',
}
def __init__(self, bit_file, register_file):
"""Initialisation of the Opal Kelly and its PLL"""
self._xem_ = self.ok.FrontPanel()
self._info_ = self.ok.okTDeviceInfo()
self._xem_.OpenBySerial('')
self._xem_.GetDeviceInfo(self._info_)
# choosing the right PLL
self._pll = self.ok.PLL22150()
if self._xem_.GetPLL22150Configuration(self._pll) == 0:
self._whichPLL_ = 'PLL22150'
#self.pll_info = self._xem.GetPLL22150Configuration(self._pll)
self._update_pll_()
else:
del self._pll
self._pll = self.ok.PLL22393()
if self._xem_.GetPLL22393Configuration(self._pll) == 0:
self._whichPLL_ = 'PLL22393'
self.pll_info = self._xem_.GetPLL22393Configuration(self._pll)
self._update_pll_()
config = self._xem_.ConfigureFPGA(bit_file)
if config is not 0:
raise Exception('Wrong bit file selected')
self.device_name = self._xem_.GetDeviceID()
# same as self.xem.GetDeviceID()
#self.board_name = self.ok.okCFrontPanel_GetBoardModelString(self.xem.GetBoardModel())
with open(register_file, 'rU') as f:
reader = csv.reader(f, delimiter=',')
for row in reader:
if len(row) > 0:
if row[0] is not '#':
self._registers_.append([row[1],
row[2],
int('0x%s' % row[3], 0),
row[4],
int(row[5]),
row[6],
row[7],
row[8],
row[9],
int(row[10])])
print('You are using: %s' % self.device_name)
def set_register(self, register_name, register_value):
"""Set the 'register_value' to the OK register 'register_name'
The 'register_name' must be one of the names specified in the csv input file"""
register = []
i = 0
founded = False
while i < len(self._registers_) and not founded:
if self._registers_[i][3] == register_name:
founded = True
register = self._registers_[i]
else:
i += 1
if isinstance(register_value, basestring):
register_value = int(register_value, 0)
elif isinstance(register_value, float):
register_value = int(register_value)
if founded is True:
if register[0] == 'FromPC' and register[1] == 'Wire':
if register_value > 2**register[4] - 1:
print('Warning: The register value exceed the maximum value')
else:
a = register[2]
v = register_value << register[9]
m = 2 ** register[4] - 1 << register[9]
self._xem_.SetWireInValue(a, v, m)
self._xem_.UpdateWireIns()
else:
print(' Warning: the register selected is not to write')
else:
print(' Warning: Register name \'%s\' not found' % register_name)
def get_register(self, register_name):
"""Return the value of the OK register 'register_name'
The 'register_name' must be one of the names specified in the input csv configuration file"""
register = []
i = 0
founded = False
while i < len(self._registers_) and not founded:
if self._registers_[i][3] == register_name:
founded = True
register = self._registers_[i]
else:
i += 1
if founded is True:
if register[0] == 'ToPC' and register[1] == 'Wire':
self._xem_.UpdateWireOuts()
return self._xem_.GetWireOutValue(register[2])
else:
print(' Warning: the register selected is not to read')
else:
print(' Warning: Register name \'%s\' not found' % register_name)
def set_block_pipe(self, register_name):
"""Send a byte stream to the OK fifo
Function not implemented yet
"""
print(' Warning: Function not implemented yet')
def get_block_pipe(self, register_name, data_length, block_size):
"""Read a byte stream from the OK fifo named 'register_name' in the input csv configuration file
data_length = size of the byte stream per 16 bit data (2 bytes per data)
block_size = size of the data packet from the OK over the USB connection
"""
register = []
i = 0
founded = False
while i < len(self._registers_) and not founded:
if self._registers_[i][3] == register_name:
founded = True
register = self._registers_[i]
else:
i += 1
if founded is True:
if register[0] == 'ToPC' and register[1] == 'BTPipe':
buf = bytearray(2*data_length)
out_buf = self._xem_.ReadFromBlockPipeOut(register[2], block_size, buf)
if out_buf < 0:
raise Exception('Opal Kelly error %s' % self._ok_errors_[out_buf])
return buf
else:
print(' Warning: the stream selected is not to read')
else:
print(' Warning: Register name \'%s\' not found' % register_name)
def get_pipe(self, register_name, data_length):
"""Read a byte stream from the OK fifo named 'register_name' in the input csv configuration file
data_length = size of the byte stream per 16 bit data (2 bytes per data)
Note: this function does not wait for a ready state to read the fifo
"""
register = []
i = 0
founded = False
while i < len(self._registers_) and not founded:
if self._registers_[i][3] == register_name:
founded = True
register = self._registers_[i]
else:
i += 1
if founded is True:
if register[0] == 'ToPC' and register[1] == 'BTPipe':
buf = bytearray(data_length*2)
out_buf = self._xem_.ReadFromPipeOut(register[2], buf)
if out_buf < 0:
raise Exception('Opal Kelly error %s' % self._ok_errors_[out_buf])
return buf
else:
print(' Warning: the stream selected is not to read')
else:
print(' Warning: Register name \'%s\' not found' % register_name)
def set_trigger(self, register_name):
"""Send a trigger to the OK of the register 'register_name'
register_name = name of the register in the input csv configuration file
"""
register = []
i = 0
founded = False
while i < len(self._registers_) and not founded:
if self._registers_[i][3] == register_name:
founded = True
register = self._registers_[i]
else:
i += 1
if founded is True:
if register[0] == 'FromPC' and register[1] == 'Trigger':
a = register[2]
m = register[9]
self._xem_.ActivateTriggerIn(a, m)
else:
print(' Warning: the register selected is not to write')
else:
print(' Warning: Register name \'%s\' not found' % register_name)
def get_trigger(self, register_name):
"""Check if a trigger occurred in the OK
Note: Not implemented yet
"""
print(' Warning: Function not implemented yet')
def set_pll(self, pll_number, pll_p, pll_q, pll_enable):
"""Set the PLL parameters P, Q, Enable
pll_number: which PLL to set
pll_p: P factor
pll_q: Q factor
pll_enable: Enable the PLL
Note: does not work
"""
if pll_p < 6:
raise Exception('PLL P parameter must be greater than 6')
if pll_p > 2053:
raise Exception('PLL P parameter must be smaller than 2053')
if pll_q < 2:
raise Exception('PLL Q parameter must be greater than 2')
if pll_p > 257:
raise Exception('PLL Q parameter must be smaller than 257')
self._pll.SetPLLParameters(pll_number, pll_p, pll_q, pll_enable)
if self._whichPLL_ == 'PLL22393':
self._xem_.SetPLL22393Configuration(self._pll)
self._xem_.SetEepromPLL22393Configuration(self._pll)
elif self._whichPLL_ == 'PLL22150':
self._xem_.SetPLL22150Configuration(self._pll)
self._xem_.SetEepromPLL22150Configuration(self._pll)
self._update_pll_()
def get_pll(self, parameter):
"""Return a PLL or a SYSCLK parameter with the 'parameter'
parameter (str): 'Crystal Frequency',
'PLL0 Frequency', 'PLL0 P', 'PLL0 Q', 'PLL0 Enable',
'PLL1 Frequency', 'PLL1 P', 'PLL1 Q', 'PLL1 Enable',
'PLL2 Frequency', 'PLL2 P', 'PLL2 Q', 'PLL2 Enable',
'SYSCLK1 Frequency', 'SYSCLK1 Source', 'SYSCLK1 Divider', 'SYSCLK1 Enable'
'SYSCLK2 Frequency', 'SYSCLK2 Source', 'SYSCLK2 Divider', 'SYSCLK2 Enable'
'SYSCLK3 Frequency', 'SYSCLK3 Source', 'SYSCLK3 Divider', 'SYSCLK3 Enable'
'SYSCLK4 Frequency', 'SYSCLK4 Source', 'SYSCLK4 Divider', 'SYSCLK4 Enable'
'SYSCLK5 Frequency', 'SYSCLK5 Source', 'SYSCLK5 Divider', 'SYSCLK5 Enable'
"""
self._update_pll_()
return self.pll_info[parameter]
def set_sys_clk(self, sys_clk_number, sys_clk_source, sys_clk_divider, sys_clk_enable=True):
"""Set the clock of the OK
sys_clk_number (1 to 5): select on of SYSCLK1 to SYSCLK5
sys_clk_source (str: REF, PLL0-0, PLL0-180, PLL1-0, PLL1-180, PLL2-0, PLL2-180):
set the source of the SYSCLK selected
sys_clk_divider (int): specifies the frequency divider for the SYSCLK selected
sys_clk_enable (boolean): enable/disable the SYSCLK selected
Note: does not work
"""
if sys_clk_divider > 127:
raise Exception('Clock divider cannot be greater than 127')
if sys_clk_divider < 0:
raise Exception('Clock divider cannot be negative')
if sys_clk_source not in self._clk_sources_.keys():
raise Exception('Clock source must be chosen between these parameters: %s' %
'REF, PLL0-0, PLL0-180, PLL1-0, PLL1-180, PLL2-0, PLL2-180')
if sys_clk_number == 5:
warn('Ignoring the Clock source, SYS_CLK5 source it is fixed to PLL0-0')
else:
self._pll.SetOutputSource(sys_clk_number - 1, self._clk_sources_[sys_clk_source])
self._pll.SetOutputDivider(sys_clk_number - 1, sys_clk_divider)
self._pll.SetOutputEnable(sys_clk_number - 1, sys_clk_enable)
self._update_pll_()
def _update_pll_(self):
"""Update the _pll state
Note: internal use only
"""
if self._whichPLL_ == 'PLL22393':
self._xem_.GetPLL22393Configuration(self._pll)
self.pll_info = {'Crystal Frequency': self._pll.GetReference(),
'PLL0 Frequency': self._pll.GetPLLFrequency(0),
'PLL1 Frequency': self._pll.GetPLLFrequency(1),
'PLL2 Frequency': self._pll.GetPLLFrequency(2),
'PLL0 P': self._pll.GetPLLP(0),
'PLL1 P': self._pll.GetPLLP(1),
'PLL2 P': self._pll.GetPLLP(2),
'PLL0 Q': self._pll.GetPLLQ(0),
'PLL1 Q': self._pll.GetPLLQ(1),
'PLL2 Q': self._pll.GetPLLQ(2),
'PLL0 Enable': self._pll.IsPLLEnabled(0),
'PLL1 Enable': self._pll.IsPLLEnabled(1),
'PLL2 Enable': self._pll.IsPLLEnabled(2),
'SYSCLK1 Frequency': self._pll.GetOutputFrequency(0),
'SYSCLK2 Frequency': self._pll.GetOutputFrequency(1),
'SYSCLK3 Frequency': self._pll.GetOutputFrequency(2),
'SYSCLK4 Frequency': self._pll.GetOutputFrequency(3),
'SYSCLK5 Frequency': self._pll.GetOutputFrequency(4),
'SYSCLK1 Source': self._pll.GetOutputSource(0),
'SYSCLK2 Source': self._pll.GetOutputSource(1),
'SYSCLK3 Source': self._pll.GetOutputSource(2),
'SYSCLK4 Source': self._pll.GetOutputSource(3),
'SYSCLK5 Source': self._pll.GetOutputSource(4),
'SYSCLK1 Divider': self._pll.GetOutputDivider(0),
'SYSCLK2 Divider': self._pll.GetOutputDivider(1),
'SYSCLK3 Divider': self._pll.GetOutputDivider(2),
'SYSCLK4 Divider': self._pll.GetOutputDivider(3),
'SYSCLK5 Divider': self._pll.GetOutputDivider(4),
'SYSCLK1 Enable': self._pll.IsOutputEnabled(0),
'SYSCLK2 Enable': self._pll.IsOutputEnabled(1),
'SYSCLK3 Enable': self._pll.IsOutputEnabled(2),
'SYSCLK4 Enable': self._pll.IsOutputEnabled(3),
'SYSCLK5 Enable': self._pll.IsOutputEnabled(4),
}
elif self._whichPLL_ == 'PLL22150':
self._xem_.GetPLL22150Configuration(self._pll)
|
mcspritz/pyOpalKellyWrapper
|
OpalKelly.py
|
Python
|
gpl-2.0
| 16,393
|
[
"CRYSTAL"
] |
95a3d2c320fc99dd717feaa473789f085f24b57de73234d498f414442a2c3be3
|
'''
==============================================================
Group - Group reads based on their UMI and mapping coordinates
==============================================================
*Identify groups of reads based on their genomic coordinate and UMI*
The group command can be used to create two types of outfile: a tagged
BAM or a flatfile describing the read groups
To generate the tagged-BAM file, use the option ``--output-bam`` and
provide a filename with the ``--stdout``/``-S`` option. Alternatively,
if you do not provide a filename, the bam file will be outputted to
the stdout. If you have provided the ``--log``/``-L`` option to send
the logging output elsewhere, you can pipe the output from the group
command directly to e.g samtools view like so::
umi_tools group -I inf.bam --group-out=grouped.tsv --output-bam
--log=group.log --paired | samtools view - |less
The tagged-BAM file will have two tagged per read:
- UG
Unique_id. 0-indexed unique id number for each group of reads
with the same genomic position and UMI or UMIs inferred to be
from the same true UMI + errors
- BX
Final UMI. The inferred true UMI for the group
To generate the flatfile describing the read groups, include the
``--group-out=<filename>`` option. The columns of the read groups file are
below. The first five columns relate to the read. The final 3 columns
relate to the group.
- read_id
read identifier
- contig
alignment contig
- position
Alignment position. Note that this position is not the start
position of the read in the BAM file but the start of the read
taking into account the read strand and cigar
- gene
The gene assignment for the read. Note, this will be NA unless the
--per-gene option is specified
- umi
The read UMI
- umi_count
The number of times this UMI is observed for reads at the same
position
- final_umi
The inferred true UMI for the group
- final_umi_count
The total number of reads within the group
- unique_id
The unique id for the group
group-specific options
----------------------
"""""""""""
--group-out
"""""""""""
Outfile name for file mapping read id to read group
"""""""""
--out-bam
"""""""""
Output a bam file with read groups tagged using the UG tag
"""""""""""""""
--umi-group-tag
"""""""""""""""
BAM tag for the error corrected UMI selected for the group. Default=BX
'''
import sys
import collections
import os
# required to make iteritems python2 and python3 compatible
from builtins import dict
from future.utils import iteritems
import pysam
import umi_tools.Utilities as U
import umi_tools.Documentation as Documentation
import umi_tools.network as network
import umi_tools.sam_methods as sam_methods
# add the generic docstring text
__doc__ = __doc__ + Documentation.GENERIC_DOCSTRING_GDC
__doc__ = __doc__ + Documentation.GROUP_DEDUP_GENERIC_OPTIONS
usage = '''
group - Group reads based on their UMI
Usage: umi_tools group --output-bam [OPTIONS] [--stdin=INFILE.bam] [--stdout=OUTFILE.bam]
note: If --stdout is ommited, standard out is output. To
generate a valid BAM file on standard out, please
redirect log with --log=LOGFILE or --log2stderr '''
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if argv is None:
argv = sys.argv
# setup command line parser
parser = U.OptionParser(version="%prog version: $Id$",
usage=usage,
description=globals()["__doc__"])
group = U.OptionGroup(parser, "group-specific options")
group.add_option("--group-out", dest="tsv", type="string",
help="Outfile name for file mapping read id to read group",
default=None)
group.add_option("--output-bam", dest="output_bam", action="store_true",
default=False,
help=("output a bam file with read groups tagged using the UG tag"
"[default=%default]"))
parser.add_option("--umi-group-tag", dest="umi_group_tag",
type="string", help="tag for the outputted umi group",
default='BX')
parser.add_option_group(group)
# add common options (-h/--help, ...) and parse command line
(options, args) = U.Start(parser, argv=argv)
U.validateSamOptions(options, group=True)
if options.stdin != sys.stdin:
in_name = options.stdin.name
options.stdin.close()
else:
raise ValueError("Input on standard in not currently supported")
if options.stdout != sys.stdout:
if options.no_sort_output:
out_name = options.stdout.name
else:
out_name = U.getTempFilename(dir=options.tmpdir)
sorted_out_name = options.stdout.name
options.stdout.close()
assert options.output_bam, (
"To output a bam you must include --output-bam option")
else:
if options.no_sort_output:
out_name = "-"
else:
out_name = U.getTempFilename(dir=options.tmpdir)
sorted_out_name = "-"
if not options.no_sort_output: # need to determine the output format for sort
if options.out_sam:
sort_format = "sam"
else:
sort_format = "bam"
if options.in_sam:
in_mode = "r"
else:
in_mode = "rb"
if options.out_sam:
out_mode = "wh"
else:
out_mode = "wb"
infile = pysam.Samfile(in_name, in_mode)
if options.output_bam:
outfile = pysam.Samfile(out_name, out_mode, template=infile)
else:
outfile = None
if options.tsv:
mapping_outfile = U.openFile(options.tsv, "w")
mapping_outfile.write("%s\n" % "\t".join(
["read_id", "contig", "position", "gene", "umi", "umi_count",
"final_umi", "final_umi_count", "unique_id"]))
nInput, nOutput, unique_id, input_reads, output_reads = 0, 0, 0, 0, 0
gene_tag = options.gene_tag
metacontig2contig = None
if options.unmapped_reads in ["use", "output"]:
output_unmapped = True
else:
output_unmapped = False
if options.chrom:
inreads = infile.fetch(reference=options.chrom)
else:
if options.per_gene and options.gene_transcript_map:
metacontig2contig = sam_methods.getMetaContig2contig(
infile, options.gene_transcript_map)
metatag = "MC"
inreads = sam_methods.metafetcher(infile, metacontig2contig, metatag)
gene_tag = metatag
else:
inreads = infile.fetch(until_eof=output_unmapped)
bundle_iterator = sam_methods.get_bundles(
options,
all_reads=True,
return_read2=True,
return_unmapped=output_unmapped,
metacontig_contig=metacontig2contig)
# set up UMIClusterer functor with methods specific to
# specified options.method
processor = network.UMIClusterer(options.method)
for bundle, key, status in bundle_iterator(inreads):
# write out read2s and unmapped/chimeric (if these options are set)
if status == 'single_read':
# bundle is just a single read here
nInput += 1
if outfile:
outfile.write(bundle)
nOutput += 1
continue
umis = bundle.keys()
counts = {umi: bundle[umi]["count"] for umi in umis}
nInput += sum(counts.values())
while nOutput >= output_reads + 10000:
output_reads += 10000
U.info("Written out %i reads" % output_reads)
while nInput >= input_reads + 1000000:
input_reads += 1000000
U.info("Parsed %i input reads" % input_reads)
# group the umis
groups = processor(
counts,
threshold=options.threshold)
for umi_group in groups:
top_umi = umi_group[0]
group_count = sum(counts[umi] for umi in umi_group)
for umi in umi_group:
reads = bundle[umi]['read']
for read in reads:
if outfile:
# Add the 'UG' tag to the read
read.set_tag('UG', unique_id)
read.set_tag(options.umi_group_tag, top_umi)
outfile.write(read)
if options.tsv:
if options.per_gene:
gene = read.get_tag(gene_tag)
else:
gene = "NA"
mapping_outfile.write("%s\n" % "\t".join(map(str, (
read.query_name, read.reference_name,
sam_methods.get_read_position(
read, options.soft_clip_threshold)[1],
gene,
umi.decode(),
counts[umi],
top_umi.decode(),
group_count,
unique_id))))
nOutput += 1
unique_id += 1
if outfile:
outfile.close()
if not options.no_sort_output:
# sort the output
pysam.sort("-o", sorted_out_name, "-O", sort_format, "--no-PG", out_name)
os.unlink(out_name) # delete the tempfile
if options.tsv:
mapping_outfile.close()
# write footer and output benchmark information.
U.info(
"Reads: %s" % ", ".join(["%s: %s" % (x[0], x[1]) for x in
bundle_iterator.read_events.most_common()]))
U.info("Number of reads out: %i, Number of groups: %i" %
(nOutput, unique_id))
U.info("Total number of positions deduplicated: %i" %
processor.positions)
if processor.positions > 0:
U.info("Mean number of unique UMIs per position: %.2f" %
(float(processor.total_umis_per_position) /
processor.positions))
U.info("Max. number of unique UMIs per position: %i" %
processor.max_umis_per_position)
else:
U.warn("The BAM did not contain any valid "
"reads/read pairs for deduplication")
U.Stop()
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
CGATOxford/UMI-tools
|
umi_tools/group.py
|
Python
|
mit
| 10,510
|
[
"pysam"
] |
cce79de2ce5087014ac2829d4ab2abfa6d5b76fd803c26eeefc2f3bbff2f5561
|
""" This module contain solvers for all kinds of equations:
- algebraic, use solve()
- recurrence, use rsolve()
- differential, use dsolve()
- transcendental, use tsolve()
- nonlinear (numerically), use nsolve()
(you will need a good starting point)
"""
from sympy.core.sympify import sympify
from sympy.core.basic import S, Mul
from sympy.core.add import Add
from sympy.core.power import Pow
from sympy.core.symbol import Symbol, Wild
from sympy.core.relational import Equality
from sympy.core.numbers import ilcm
from sympy.functions import log, exp, LambertW
from sympy.simplify import simplify, collect
from sympy.matrices import Matrix, zeros
from sympy.polys import roots
from sympy.functions.elementary.piecewise import piecewise_fold
from sympy.utilities import any, all
from sympy.utilities.iterables import iff
from sympy.utilities.lambdify import lambdify
from sympy.mpmath import findroot
from sympy.solvers.polysys import solve_poly_system
from warnings import warn
# Codes for guess solve strategy
GS_POLY = 0
GS_RATIONAL = 1
GS_POLY_CV_1 = 2 # can be converted to a polynomial equation via the change of variable y -> x**a, a real
GS_POLY_CV_2 = 3 # can be converted to a polynomial equation multiplying on both sides by x**m
# for example, x + 1/x == 0. Multiplying by x yields x**2 + x == 0
GS_RATIONAL_CV_1 = 4 # can be converted to a rational equation via the change of variable y -> x**n
GS_PIECEWISE = 5
GS_TRANSCENDENTAL = 6
def guess_solve_strategy(expr, symbol):
"""
Tries to guess what approach should be used to solve a specific equation
Returns
=======
- -1: could not guess
- integer > 0: code representing certain type of equation. See GS_* fields
on this module for a complete list
Examples
========
>>> from sympy import Symbol, Rational
>>> from sympy.solvers.solvers import guess_solve_strategy
>>> from sympy.abc import x
>>> guess_solve_strategy(x**2 + 1, x)
0
>>> guess_solve_strategy(x**Rational(1,2) + 1, x)
2
"""
eq_type = -1
if expr.is_Add:
return max([guess_solve_strategy(i, symbol) for i in expr.args])
elif expr.is_Mul:
# check for rational functions
num, denom = expr.as_numer_denom()
if denom != 1 and denom.has(symbol):
#we have a quotient
m = max(guess_solve_strategy(num, symbol), guess_solve_strategy(denom, symbol))
if m == GS_POLY:
return GS_RATIONAL
elif m == GS_POLY_CV_1:
return GS_RATIONAL_CV_1
else:
raise NotImplementedError
else:
return max([guess_solve_strategy(i, symbol) for i in expr.args])
elif expr.is_Symbol:
return GS_POLY
elif expr.is_Pow:
if expr.exp.has(symbol):
return GS_TRANSCENDENTAL
elif not expr.exp.has(symbol) and expr.base.has(symbol):
if expr.exp.is_Integer and expr.exp > 0:
eq_type = max(eq_type, GS_POLY)
elif expr.exp.is_Integer and expr.exp < 0:
eq_type = max(eq_type, GS_POLY_CV_2)
elif expr.exp.is_Rational:
eq_type = max(eq_type, GS_POLY_CV_1)
else:
return GS_TRANSCENDENTAL
elif expr.is_Piecewise:
return GS_PIECEWISE
elif expr.is_Function and expr.has(symbol):
return GS_TRANSCENDENTAL
elif not expr.has(symbol):
return GS_POLY
return eq_type
def solve(f, *symbols, **flags):
"""Solves equations and systems of equations.
Currently supported are univariate polynomial, transcendental
equations, piecewise combinations thereof and systems of linear
and polynomial equations. Input is formed as a single expression
or an equation, or an iterable container in case of an equation
system. The type of output may vary and depends heavily on the
input. For more details refer to more problem specific functions.
By default all solutions are simplified to make the output more
readable. If this is not the expected behavior (e.g., because of
speed issues) set simplified=False in function arguments.
To solve equations and systems of equations like recurrence relations
or differential equations, use rsolve() or dsolve(), respectively.
>>> from sympy import I, solve
>>> from sympy.abc import x, y
Solve a polynomial equation:
>>> solve(x**4-1, x)
[1, -1, -I, I]
Solve a linear system:
>>> solve((x+5*y-2, -3*x+6*y-15), x, y)
{x: -3, y: 1}
"""
def sympit(w):
return map(sympify, iff(isinstance(w,(list, tuple, set)), w, [w]))
# make f and symbols into lists of sympified quantities
# keeping track of how f was passed since if it is a list
# a dictionary of results will be returned.
bare_f = not isinstance(f, (list, tuple, set))
f, symbols = (sympit(w) for w in [f, symbols])
for i, fi in enumerate(f):
if isinstance(fi, Equality):
f[i] = fi.lhs - fi.rhs
if not symbols:
#get symbols from equations or supply dummy symbols since
#solve(3,x) returns []...though it seems that it should raise some sort of error TODO
symbols = set([])
for fi in f:
symbols |= fi.atoms(Symbol) or set([Symbol('x',dummy=True)])
symbols = list(symbols)
if bare_f:
f=f[0]
if len(symbols) == 1:
if isinstance(symbols[0], (list, tuple, set)):
symbols = symbols[0]
result = list()
# Begin code handling for Function and Derivative instances
# Basic idea: store all the passed symbols in symbols_passed, check to see
# if any of them are Function or Derivative types, if so, use a dummy
# symbol in their place, and set symbol_swapped = True so that other parts
# of the code can be aware of the swap. Once all swapping is done, the
# continue on with regular solving as usual, and swap back at the end of
# the routine, so that whatever was passed in symbols is what is returned.
symbols_new = []
symbol_swapped = False
symbols_passed = list(symbols)
for i, s in enumerate(symbols):
if s.is_Symbol:
s_new = s
elif s.is_Function:
symbol_swapped = True
s_new = Symbol('F%d' % i, dummy=True)
elif s.is_Derivative:
symbol_swapped = True
s_new = Symbol('D%d' % i, dummy=True)
else:
raise TypeError('not a Symbol or a Function')
symbols_new.append(s_new)
if symbol_swapped:
swap_back_dict = dict(zip(symbols_new, symbols))
# End code for handling of Function and Derivative instances
if not isinstance(f, (tuple, list, set)):
# Create a swap dictionary for storing the passed symbols to be solved
# for, so that they may be swapped back.
if symbol_swapped:
swap_dict = zip(symbols, symbols_new)
f = f.subs(swap_dict)
symbols = symbols_new
# Any embedded piecewise functions need to be brought out to the
# top level so that the appropriate strategy gets selected.
f = piecewise_fold(f)
if len(symbols) != 1:
result = {}
for s in symbols:
result[s] = solve(f, s, **flags)
if flags.get('simplified', True):
for s, r in result.items():
result[s] = map(simplify, r)
return result
symbol = symbols[0]
strategy = guess_solve_strategy(f, symbol)
if strategy == GS_POLY:
poly = f.as_poly( symbol )
if poly is None:
raise NotImplementedError("Cannot solve equation " + str(f) + " for "
+ str(symbol))
# for cubics and quartics, if the flag wasn't set, DON'T do it
# by default since the results are quite long. Perhaps one could
# base this decision on a certain crtical length of the roots.
if poly.degree > 2:
flags['simplified'] = flags.get('simplified', False)
result = roots(poly, cubics=True, quartics=True).keys()
elif strategy == GS_RATIONAL:
P, Q = f.as_numer_denom()
#TODO: check for Q != 0
result = solve(P, symbol, **flags)
elif strategy == GS_POLY_CV_1:
args = list(f.args)
if isinstance(f, Add):
# we must search for a suitable change of variable
# collect exponents
exponents_denom = list()
for arg in args:
if isinstance(arg, Pow):
exponents_denom.append(arg.exp.q)
elif isinstance(arg, Mul):
for mul_arg in arg.args:
if isinstance(mul_arg, Pow):
exponents_denom.append(mul_arg.exp.q)
assert len(exponents_denom) > 0
if len(exponents_denom) == 1:
m = exponents_denom[0]
else:
# get the GCD of the denominators
m = reduce(ilcm, exponents_denom)
# x -> y**m.
# we assume positive for simplification purposes
t = Symbol('t', positive=True, dummy=True)
f_ = f.subs(symbol, t**m)
if guess_solve_strategy(f_, t) != GS_POLY:
raise NotImplementedError("Could not convert to a polynomial equation: %s" % f_)
cv_sols = solve(f_, t)
for sol in cv_sols:
result.append(sol**m)
elif isinstance(f, Mul):
for mul_arg in args:
result.extend(solve(mul_arg, symbol))
elif strategy == GS_POLY_CV_2:
m = 0
args = list(f.args)
if isinstance(f, Add):
for arg in args:
if isinstance(arg, Pow):
m = min(m, arg.exp)
elif isinstance(arg, Mul):
for mul_arg in arg.args:
if isinstance(mul_arg, Pow):
m = min(m, mul_arg.exp)
elif isinstance(f, Mul):
for mul_arg in args:
if isinstance(mul_arg, Pow):
m = min(m, mul_arg.exp)
f1 = simplify(f*symbol**(-m))
result = solve(f1, symbol)
# TODO: we might have introduced unwanted solutions
# when multiplied by x**-m
elif strategy == GS_PIECEWISE:
result = set()
for expr, cond in f.args:
candidates = solve(expr, *symbols)
if isinstance(cond, bool) or cond.is_Number:
if not cond:
continue
# Only include solutions that do not match the condition
# of any of the other pieces.
for candidate in candidates:
matches_other_piece = False
for other_expr, other_cond in f.args:
if isinstance(other_cond, bool) \
or other_cond.is_Number:
continue
if bool(other_cond.subs(symbol, candidate)):
matches_other_piece = True
break
if not matches_other_piece:
result.add(candidate)
else:
for candidate in candidates:
if bool(cond.subs(symbol, candidate)):
result.add(candidate)
result = list(result)
elif strategy == GS_TRANSCENDENTAL:
#a, b = f.as_numer_denom()
# Let's throw away the denominator for now. When we have robust
# assumptions, it should be checked, that for the solution,
# b!=0.
result = tsolve(f, *symbols)
elif strategy == -1:
raise ValueError('Could not parse expression %s' % f)
else:
raise NotImplementedError("No algorithms are implemented to solve equation %s" % f)
# This symbol swap should not be necessary for the single symbol case: if you've
# solved for the symbol the it will not appear in the solution. Right now, however
# ode's are getting solutions for solve (even though they shouldn't be -- see the
# swap_back test in test_solvers).
if symbol_swapped:
result = [ri.subs(swap_back_dict) for ri in result]
if flags.get('simplified', True) and strategy != GS_RATIONAL:
return map(simplify, result)
else:
return result
else:
if not f:
return {}
else:
# Create a swap dictionary for storing the passed symbols to be
# solved for, so that they may be swapped back.
if symbol_swapped:
swap_dict = zip(symbols, symbols_new)
f = [fi.subs(swap_dict) for fi in f]
symbols = symbols_new
polys = []
for g in f:
poly = g.as_poly(*symbols)
if poly is not None:
polys.append(poly)
else:
raise NotImplementedError()
if all(p.is_linear for p in polys):
n, m = len(f), len(symbols)
matrix = zeros((n, m + 1))
for i, poly in enumerate(polys):
for monom, coeff in poly.terms():
try:
j = list(monom).index(1)
matrix[i, j] = coeff
except ValueError:
matrix[i, m] = -coeff
soln = solve_linear_system(matrix, *symbols, **flags)
else:
soln = solve_poly_system(polys)
# Use swap_dict to ensure we return the same type as what was
# passed
if symbol_swapped:
if isinstance(soln, dict):
res = {}
for k in soln.keys():
res.update({swap_back_dict[k]: soln[k]})
return res
else:
return soln
else:
return soln
def solve_linear_system(system, *symbols, **flags):
"""Solve system of N linear equations with M variables, which means
both Cramer and over defined systems are supported. The possible
number of solutions is zero, one or infinite. Respectively this
procedure will return None or dictionary with solutions. In the
case of over defined system all arbitrary parameters are skipped.
This may cause situation in with empty dictionary is returned.
In this case it means all symbols can be assigned arbitrary values.
Input to this functions is a Nx(M+1) matrix, which means it has
to be in augmented form. If you are unhappy with such setting
use 'solve' method instead, where you can input equations
explicitly. And don't worry about the matrix, this function
is persistent and will make a local copy of it.
The algorithm used here is fraction free Gaussian elimination,
which results, after elimination, in upper-triangular matrix.
Then solutions are found using back-substitution. This approach
is more efficient and compact than the Gauss-Jordan method.
>>> from sympy import Matrix, solve_linear_system
>>> from sympy.abc import x, y
Solve the following system:
x + 4 y == 2
-2 x + y == 14
>>> system = Matrix(( (1, 4, 2), (-2, 1, 14)))
>>> solve_linear_system(system, x, y)
{x: -6, y: 2}
"""
matrix = system[:,:]
syms = list(symbols)
i, m = 0, matrix.cols-1 # don't count augmentation
while i < matrix.rows:
if i == m:
# an overdetermined system
if any(matrix[i:,m]):
return None # no solutions
else:
# remove trailing rows
matrix = matrix[:i,:]
break
if not matrix[i, i]:
# there is no pivot in current column
# so try to find one in other columns
for k in xrange(i+1, m):
if matrix[i, k]:
break
else:
if matrix[i, m]:
return None # no solutions
else:
# zero row or was a linear combination of
# other rows so now we can safely skip it
matrix.row_del(i)
continue
# we want to change the order of colums so
# the order of variables must also change
syms[i], syms[k] = syms[k], syms[i]
matrix.col_swap(i, k)
pivot_inv = S.One / matrix [i, i]
# divide all elements in the current row by the pivot
matrix.row(i, lambda x, _: x * pivot_inv)
for k in xrange(i+1, matrix.rows):
if matrix[k, i]:
coeff = matrix[k, i]
# subtract from the current row the row containing
# pivot and multiplied by extracted coefficient
matrix.row(k, lambda x, j: simplify(x - matrix[i, j]*coeff))
i += 1
# if there weren't any problems, augmented matrix is now
# in row-echelon form so we can check how many solutions
# there are and extract them using back substitution
simplified = flags.get('simplified', True)
if len(syms) == matrix.rows:
# this system is Cramer equivalent so there is
# exactly one solution to this system of equations
k, solutions = i-1, {}
while k >= 0:
content = matrix[k, m]
# run back-substitution for variables
for j in xrange(k+1, m):
content -= matrix[k, j]*solutions[syms[j]]
if simplified:
solutions[syms[k]] = simplify(content)
else:
solutions[syms[k]] = content
k -= 1
return solutions
elif len(syms) > matrix.rows:
# this system will have infinite number of solutions
# dependent on exactly len(syms) - i parameters
k, solutions = i-1, {}
while k >= 0:
content = matrix[k, m]
# run back-substitution for variables
for j in xrange(k+1, i):
content -= matrix[k, j]*solutions[syms[j]]
# run back-substitution for parameters
for j in xrange(i, m):
content -= matrix[k, j]*syms[j]
if simplified:
solutions[syms[k]] = simplify(content)
else:
solutions[syms[k]] = content
k -= 1
return solutions
else:
return None # no solutions
def solve_undetermined_coeffs(equ, coeffs, sym, **flags):
"""Solve equation of a type p(x; a_1, ..., a_k) == q(x) where both
p, q are univariate polynomials and f depends on k parameters.
The result of this functions is a dictionary with symbolic
values of those parameters with respect to coefficients in q.
This functions accepts both Equations class instances and ordinary
SymPy expressions. Specification of parameters and variable is
obligatory for efficiency and simplicity reason.
>>> from sympy import Eq
>>> from sympy.abc import a, b, c, x
>>> from sympy.solvers import solve_undetermined_coeffs
>>> solve_undetermined_coeffs(Eq(2*a*x + a+b, x), [a, b], x)
{a: 1/2, b: -1/2}
>>> solve_undetermined_coeffs(Eq(a*c*x + a+b, x), [a, b], x)
{a: 1/c, b: -1/c}
"""
if isinstance(equ, Equality):
# got equation, so move all the
# terms to the left hand side
equ = equ.lhs - equ.rhs
system = collect(equ.expand(), sym, evaluate=False).values()
if not any([ equ.has(sym) for equ in system ]):
# consecutive powers in the input expressions have
# been successfully collected, so solve remaining
# system using Gaussian elimination algorithm
return solve(system, *coeffs, **flags)
else:
return None # no solutions
def solve_linear_system_LU(matrix, syms):
""" LU function works for invertible only """
assert matrix.rows == matrix.cols-1
A = matrix[:matrix.rows,:matrix.rows]
b = matrix[:,matrix.cols-1:]
soln = A.LUsolve(b)
solutions = {}
for i in range(soln.rows):
solutions[syms[i]] = soln[i,0]
return solutions
x = Symbol('x', dummy=True)
a,b,c,d,e,f,g,h = [Wild(t, exclude=[x]) for t in 'abcdefgh']
patterns = None
def _generate_patterns():
"""
Generates patterns for transcendental equations.
This is lazily calculated (called) in the tsolve() function and stored in
the patterns global variable.
"""
tmp1 = f ** (h-(c*g/b))
tmp2 = (-e*tmp1/a)**(1/d)
global patterns
patterns = [
(a*(b*x+c)**d + e , ((-(e/a))**(1/d)-c)/b),
( b+c*exp(d*x+e) , (log(-b/c)-e)/d),
(a*x+b+c*exp(d*x+e) , -b/a-LambertW(c*d*exp(e-b*d/a)/a)/d),
( b+c*f**(d*x+e) , (log(-b/c)-e*log(f))/d/log(f)),
(a*x+b+c*f**(d*x+e) , -b/a-LambertW(c*d*f**(e-b*d/a)*log(f)/a)/d/log(f)),
( b+c*log(d*x+e) , (exp(-b/c)-e)/d),
(a*x+b+c*log(d*x+e) , -e/d+c/a*LambertW(a/c/d*exp(-b/c+a*e/c/d))),
(a*(b*x+c)**d + e*f**(g*x+h) , -c/b-d*LambertW(-tmp2*g*log(f)/b/d)/g/log(f))
]
def tsolve(eq, sym):
"""
Solves a transcendental equation with respect to the given
symbol. Various equations containing mixed linear terms, powers,
and logarithms, can be solved.
Only a single solution is returned. This solution is generally
not unique. In some cases, a complex solution may be returned
even though a real solution exists.
>>> from sympy import tsolve, log
>>> from sympy.abc import x
>>> tsolve(3**(2*x+5)-4, x)
[(-5*log(3) + log(4))/(2*log(3))]
>>> tsolve(log(x) + 2*x, x)
[LambertW(2)/2]
"""
if patterns is None:
_generate_patterns()
eq = sympify(eq)
if isinstance(eq, Equality):
eq = eq.lhs - eq.rhs
sym = sympify(sym)
eq2 = eq.subs(sym, x)
# First see if the equation has a linear factor
# In that case, the other factor can contain x in any way (as long as it
# is finite), and we have a direct solution to which we add others that
# may be found for the remaining portion.
r = Wild('r')
m = eq2.match((a*x+b)*r)
if m and m[a]:
return [(-b/a).subs(m).subs(x, sym)] + solve(m[r], x)
for p, sol in patterns:
m = eq2.match(p)
if m:
return [sol.subs(m).subs(x, sym)]
# let's also try to inverse the equation
lhs = eq
rhs = S.Zero
while True:
indep, dep = lhs.as_independent(sym)
# dep + indep == rhs
if lhs.is_Add:
# this indicates we have done it all
if indep is S.Zero:
break
lhs = dep
rhs-= indep
# dep * indep == rhs
else:
# this indicates we have done it all
if indep is S.One:
break
lhs = dep
rhs/= indep
# -1
# f(x) = g -> x = f (g)
if lhs.is_Function and lhs.nargs==1 and hasattr(lhs, 'inverse'):
rhs = lhs.inverse() (rhs)
lhs = lhs.args[0]
sol = solve(lhs-rhs, sym)
return sol
elif lhs.is_Add:
# just a simple case - we do variable substitution for first function,
# and if it removes all functions - let's call solve.
# x -x -1
# UC: e + e = y -> t + t = y
t = Symbol('t', dummy=True)
terms = lhs.args
# find first term which is Function
for f1 in lhs.args:
if f1.is_Function:
break
else:
raise NotImplementedError("Unable to solve the equation" + \
"(tsolve: at least one Function expected at this point")
# perform the substitution
lhs_ = lhs.subs(f1, t)
# if no Functions left, we can proceed with usual solve
if not (lhs_.is_Function or
any(term.is_Function for term in lhs_.args)):
cv_sols = solve(lhs_ - rhs, t)
for sol in cv_sols:
if sol.has(sym):
raise NotImplementedError("Unable to solve the equation")
cv_inv = solve( t - f1, sym )[0]
sols = list()
for sol in cv_sols:
sols.append(cv_inv.subs(t, sol))
return sols
raise NotImplementedError("Unable to solve the equation.")
def msolve(*args, **kwargs):
"""
Compatibility wrapper pointing to nsolve().
msolve() has been renamed to nsolve(), please use nsolve() directly."""
warn('msolve() is has been renamed, please use nsolve() instead',
DeprecationWarning)
args[0], args[1] = args[1], args[0]
return nsolve(*args, **kwargs)
# TODO: option for calculating J numerically
def nsolve(*args, **kwargs):
"""
Solve a nonlinear equation system numerically.
nsolve(f, [args,] x0, modules=['mpmath'], **kwargs)
f is a vector function of symbolic expressions representing the system.
args are the variables. If there is only one variable, this argument can be
omitted.
x0 is a starting vector close to a solution.
Use the modules keyword to specify which modules should be used to evaluate
the function and the Jacobian matrix. Make sure to use a module that
supports matrices. For more information on the syntax, please see the
docstring of lambdify.
Overdetermined systems are supported.
>>> from sympy import Symbol, nsolve
>>> import sympy
>>> sympy.mpmath.mp.dps = 15
>>> x1 = Symbol('x1')
>>> x2 = Symbol('x2')
>>> f1 = 3 * x1**2 - 2 * x2**2 - 1
>>> f2 = x1**2 - 2 * x1 + x2**2 + 2 * x2 - 8
>>> print nsolve((f1, f2), (x1, x2), (-1, 1))
[-1.19287309935246]
[ 1.27844411169911]
For one-dimensional functions the syntax is simplified:
>>> from sympy import sin, nsolve
>>> from sympy.abc import x
>>> nsolve(sin(x), x, 2)
3.14159265358979
>>> nsolve(sin(x), 2)
3.14159265358979
mpmath.findroot is used, you can find there more extensive documentation,
especially concerning keyword parameters and available solvers.
"""
# interpret arguments
if len(args) == 3:
f = args[0]
fargs = args[1]
x0 = args[2]
elif len(args) == 2:
f = args[0]
fargs = None
x0 = args[1]
elif len(args) < 2:
raise TypeError('nsolve expected at least 2 arguments, got %i'
% len(args))
else:
raise TypeError('nsolve expected at most 3 arguments, got %i'
% len(args))
modules = kwargs.get('modules', ['mpmath'])
if isinstance(f, (list, tuple)):
f = Matrix(f).T
if not isinstance(f, Matrix):
# assume it's a sympy expression
if isinstance(f, Equality):
f = f.lhs - f.rhs
f = f.evalf()
atoms = f.atoms(Symbol)
if fargs is None:
fargs = atoms.copy().pop()
if not (len(atoms) == 1 and (fargs in atoms or fargs[0] in atoms)):
raise ValueError('expected a one-dimensional and numerical function')
# the function is much better behaved if there is no denominator
f = f.as_numer_denom()[0]
f = lambdify(fargs, f, modules)
return findroot(f, x0, **kwargs)
if len(fargs) > f.cols:
raise NotImplementedError('need at least as many equations as variables')
verbose = kwargs.get('verbose', False)
if verbose:
print 'f(x):'
print f
# derive Jacobian
J = f.jacobian(fargs)
if verbose:
print 'J(x):'
print J
# create functions
f = lambdify(fargs, f.T, modules)
J = lambdify(fargs, J, modules)
# solve the system numerically
x = findroot(f, x0, J=J, **kwargs)
return x
|
tovrstra/sympy
|
sympy/solvers/solvers.py
|
Python
|
bsd-3-clause
| 28,803
|
[
"Gaussian"
] |
b6f057523a66675a52c98ff37ae9f0cb0e61e4f659f981a8a4c7259fb4f6ba6f
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
from __future__ import absolute_import
import MDAnalysis as mda
import numpy as np
from numpy.testing import (assert_equal, assert_,
assert_almost_equal, assert_raises)
from MDAnalysisTests.coordinates.reference import RefACHE, RefCappedAla
from MDAnalysisTests.datafiles import (PRM, TRJ, TRJ_bz2, PRMpbc, TRJpbc_bz2)
from MDAnalysisTests.coordinates.base import BaseTimestepTest
class _TRJReaderTest(object):
# use as a base class (override setUp()) and mixin a reference
def tearDown(self):
del self.universe
def test_load_prm(self):
U = self.universe
assert_equal(len(U.atoms), self.ref_n_atoms,
"load Universe from PRM and TRJ")
def test_n_atoms(self):
assert_equal(self.universe.trajectory.n_atoms, self.ref_n_atoms,
"wrong number of atoms")
def test_n_frames(self):
assert_equal(self.universe.trajectory.n_frames, self.ref_n_frames,
"wrong number of frames in xyz")
def test_periodic(self):
assert_equal(self.universe.trajectory.periodic, self.ref_periodic)
def test_amber_proteinselection(self):
protein = self.universe.select_atoms('protein')
assert_equal(protein.n_atoms, self.ref_proteinatoms,
"error in protein selection (HIS or termini?)")
def test_sum_centres_of_geometry(self):
protein = self.universe.select_atoms('protein')
total = np.sum([protein.center_of_geometry() for ts in
self.universe.trajectory])
assert_almost_equal(total, self.ref_sum_centre_of_geometry, self.prec,
err_msg="sum of centers of geometry over the "
"trajectory do not match")
def test_initial_frame_is_0(self):
assert_equal(self.universe.trajectory.ts.frame, 0,
"initial frame is not 0 but {0}".format(
self.universe.trajectory.ts.frame))
def test_starts_with_first_frame(self):
"""Test that coordinate arrays are filled as soon as the trajectory
has been opened."""
assert_(np.any(self.universe.atoms.positions > 0),
"Reader does not populate positions right away.")
def test_rewind(self):
trj = self.universe.trajectory
trj.next()
trj.next() # for readers that do not support indexing
assert_equal(trj.ts.frame, 2,
"failed to forward to frame 2 (frameindex 2)")
trj.rewind()
assert_equal(trj.ts.frame, 0, "failed to rewind to first frame")
assert_(np.any(self.universe.atoms.positions > 0),
"Reader does not populate positions after rewinding.")
def test_full_slice(self):
trj_iter = self.universe.trajectory[:]
frames = [ts.frame for ts in trj_iter]
assert_equal(frames, np.arange(self.universe.trajectory.n_frames))
def test_random_access(self):
u = self.universe
pos1 = u.atoms[0].position
u.trajectory.next()
u.trajectory.next()
pos3 = u.atoms[0].position
u.trajectory[0]
assert_equal(u.atoms[0].position, pos1)
u.trajectory[2]
assert_equal(u.atoms[0].position, pos3)
class TestTRJReader(_TRJReaderTest, RefACHE):
def setUp(self):
self.universe = mda.Universe(PRM, TRJ)
self.prec = 3
def test_read_frame_reopens(self):
# should automatically reopen
u = self.universe
u.trajectory.close()
u.trajectory[2]
assert_(u.trajectory.ts.frame == 2)
class TestBzippedTRJReader(TestTRJReader):
def setUp(self):
self.universe = mda.Universe(PRM, TRJ_bz2)
self.prec = 3
class TestBzippedTRJReaderPBC(_TRJReaderTest, RefCappedAla):
def setUp(self):
self.universe = mda.Universe(PRMpbc, TRJpbc_bz2)
self.prec = 3
class TestTRJTimestep(BaseTimestepTest):
Timestep = mda.coordinates.TRJ.Timestep
name = "TRJ"
has_box = True
set_box = True
unitcell = np.array([10., 11., 12., 90., 90., 90.])
uni_args = (PRM, TRJ)
def test_trj_no_natoms():
assert_raises(ValueError, mda.coordinates.TRJ.TRJReader, 'somefile.txt')
|
kain88-de/mdanalysis
|
testsuite/MDAnalysisTests/coordinates/test_trj.py
|
Python
|
gpl-2.0
| 5,312
|
[
"MDAnalysis"
] |
230a9f7b4c7b67ecb56c8ab6cade47d895e79ee03e029ca6be6b6b552bd8d50d
|
#
# Copyright (c) 2015 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import, print_function
import os
from os.path import join
from os.path import sep
from unittest.case import skipIf
from commoncode.system import on_windows
from commoncode.system import on_posix
from commoncode.testcase import FileBasedTesting
from commoncode.testcase import make_non_readable
from commoncode.testcase import make_non_writable
from commoncode.testcase import make_non_executable
from commoncode import filetype
from commoncode import fileutils
from commoncode.fileutils import as_posixpath
class TestPermissions(FileBasedTesting):
"""
Several assertions or test are skipped on non posix OSes.
Windows handles permissions and special files differently.
"""
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
def test_chmod_on_non_existing_file_throws_no_exception(self):
fileutils.chmod('some non existing dir', fileutils.RWX)
def test_chmod_read_write_recursively_on_dir(self):
test_dir = self.get_test_loc('fileutils/executable', copy=True)
test_file = join(test_dir, 'deep1', 'deep2', 'ctags')
test_dir2 = join(test_dir, 'deep1', 'deep2')
parent = join(test_dir, 'deep1')
try:
make_non_writable(test_file)
assert not filetype.is_writable(test_file)
if on_posix:
make_non_executable(test_file)
assert not filetype.is_executable(test_file)
if on_posix:
make_non_executable(test_dir2)
assert not filetype.is_executable(test_dir2)
make_non_writable(test_dir)
if on_posix:
assert not filetype.is_writable(test_dir2)
fileutils.chmod(parent, fileutils.RW, recurse=True)
assert filetype.is_readable(test_dir2) is True
assert filetype.is_writable(test_dir2)
if on_posix:
assert filetype.is_executable(test_dir2)
finally:
fileutils.chmod(test_dir, fileutils.RW, recurse=True)
def test_chmod_read_write_non_recursively_on_dir(self):
test_dir = self.get_test_loc('fileutils/executable', copy=True)
test_file = join(test_dir, 'deep1', 'deep2', 'ctags')
test_dir = join(test_dir, 'deep1', 'deep2')
parent = join(test_dir, 'deep1')
try:
# setup
make_non_writable(test_file)
assert not filetype.is_writable(test_file)
make_non_writable(test_dir)
if on_posix:
assert not filetype.is_writable(test_dir)
else:
# windows is different
assert filetype.is_writable(test_dir)
fileutils.chmod(parent, fileutils.RW, recurse=False)
# test: the perms should be the same
assert not filetype.is_writable(test_file)
if on_posix:
assert not filetype.is_writable(test_dir)
else:
# windows is different
assert filetype.is_writable(test_dir)
finally:
fileutils.chmod(test_dir, fileutils.RW, recurse=True)
def test_chmod_read_write_file(self):
test_dir = self.get_test_loc('fileutils/executable', copy=True)
test_file = join(test_dir, 'deep1', 'deep2', 'ctags')
try:
make_non_writable(test_file)
assert not filetype.is_writable(test_file)
fileutils.chmod(test_file, fileutils.RW)
assert filetype.is_readable(test_file)
assert filetype.is_writable(test_file)
finally:
fileutils.chmod(test_dir, fileutils.RW, recurse=True)
def test_chmod_read_write_exec_dir(self):
test_dir = self.get_test_loc('fileutils/executable', copy=True)
test_file = join(test_dir, 'deep1', 'deep2', 'ctags')
try:
if on_posix:
make_non_executable(test_dir)
assert not filetype.is_executable(test_file)
make_non_writable(test_dir)
fileutils.chmod(test_dir, fileutils.RWX, recurse=True)
assert filetype.is_readable(test_file)
assert filetype.is_writable(test_file)
if on_posix:
assert filetype.is_executable(test_file)
finally:
fileutils.chmod(test_dir, fileutils.RW, recurse=True)
def test_copyfile_does_not_keep_permissions(self):
src_file = self.get_temp_file()
dest = self.get_temp_dir()
with open(src_file, 'wb') as f:
f.write('')
try:
make_non_readable(src_file)
if on_posix:
assert not filetype.is_readable(src_file)
fileutils.copyfile(src_file, dest)
dest_file = join(dest, os.listdir(dest)[0])
assert filetype.is_readable(dest_file)
finally:
fileutils.chmod(src_file, fileutils.RW, recurse=True)
fileutils.chmod(dest, fileutils.RW, recurse=True)
def test_copytree_does_not_keep_non_writable_permissions(self):
src = self.get_test_loc('fileutils/exec', copy=True)
dst = self.get_temp_dir()
try:
src_file = join(src, 'subtxt/a.txt')
make_non_writable(src_file)
assert not filetype.is_writable(src_file)
src_dir = join(src, 'subtxt')
make_non_writable(src_dir)
if on_posix:
assert not filetype.is_writable(src_dir)
# copy proper
dest_dir = join(dst, 'dest')
fileutils.copytree(src, dest_dir)
dst_file = join(dest_dir, 'subtxt/a.txt')
assert os.path.exists(dst_file)
assert filetype.is_writable(dst_file)
dest_dir2 = join(dest_dir, 'subtxt')
assert os.path.exists(dest_dir2)
assert filetype.is_writable(dest_dir)
finally:
fileutils.chmod(src, fileutils.RW, recurse=True)
fileutils.chmod(dst, fileutils.RW, recurse=True)
def test_copytree_copies_unreadable_files(self):
src = self.get_test_loc('fileutils/exec', copy=True)
dst = self.get_temp_dir()
src_file1 = join(src, 'a.bat')
src_file2 = join(src, 'subtxt', 'a.txt')
try:
# make some unreadable source files
make_non_readable(src_file1)
if on_posix:
assert not filetype.is_readable(src_file1)
make_non_readable(src_file2)
if on_posix:
assert not filetype.is_readable(src_file2)
# copy proper
dest_dir = join(dst, 'dest')
fileutils.copytree(src, dest_dir)
dest_file1 = join(dest_dir, 'a.bat')
assert os.path.exists(dest_file1)
assert filetype.is_readable(dest_file1)
dest_file2 = join(dest_dir, 'subtxt', 'a.txt')
assert os.path.exists(dest_file2)
assert filetype.is_readable(dest_file2)
finally:
fileutils.chmod(src, fileutils.RW, recurse=True)
fileutils.chmod(dst, fileutils.RW, recurse=True)
def test_delete_unwritable_directory_and_files(self):
base_dir = self.get_test_loc('fileutils/readwrite', copy=True)
test_dir = join(base_dir, 'sub')
test_file = join(test_dir, 'file')
try:
# note: there are no unread/writable dir on windows
make_non_readable(test_file)
make_non_executable(test_file)
make_non_writable(test_file)
make_non_readable(test_dir)
make_non_executable(test_dir)
make_non_writable(test_dir)
fileutils.delete(test_dir)
assert not os.path.exists(test_dir)
finally:
fileutils.chmod(base_dir, fileutils.RW, recurse=True)
class TestFileUtils(FileBasedTesting):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
@skipIf(on_windows, 'Windows handles special files differently.')
def test_copytree_does_not_copy_fifo(self):
# Windows does not support pipes
src = self.get_test_loc('fileutils/filetype', copy=True)
dest = self.get_temp_dir()
src_file = join(src, 'myfifo')
os.mkfifo(src_file) # @UndefinedVariable
dest_dir = join(dest, 'dest')
fileutils.copytree(src, dest_dir)
assert not os.path.exists(join(dest_dir, 'myfifo'))
def test_copyfile_keeps_modified_date(self):
test_file = self.get_test_loc('fileutils/exec/subtxt/a.txt', copy=True)
dest = self.get_temp_file()
expected = 1289918700
os.utime(test_file, (expected, expected))
fileutils.copyfile(test_file, dest)
result = os.stat(dest).st_mtime
assert expected == result
def test_copyfile_can_copy_file_to_dir_keeping_full_file_name(self):
test_file = self.get_test_loc('fileutils/exec/subtxt/a.txt', copy=True)
dest = self.get_temp_dir()
expected = os.path.join(dest, 'a.txt')
fileutils.copyfile(test_file, dest)
assert os.path.exists(expected)
def test_read_text_file_with_posix_LF_line_endings(self):
test_file = self.get_test_loc('fileutils/textfiles/unix_newlines.txt')
result = fileutils.read_text_file(test_file)[:172]
expected = (
'/**************************************************************/\n'
'/* ADDR.C */\n/* Author: John Doe, 7/2000 */\n'
'/* Copyright 1999 Cornell University. All rights reserved. */\n')
assert expected == result
def test_read_text_file_with_dos_CRLF_line_endings(self):
test_file = self.get_test_loc('fileutils/textfiles/dos_newlines.txt')
result = fileutils.read_text_file(test_file)[:70]
expected = ('package com.somecompany.somepackage;\n'
'\n/**\n * Title: Some Title\n')
assert expected == result
def test_read_text_file_with_mac_CR_lines_endings(self):
test_file = self.get_test_loc('fileutils/textfiles/mac_newlines.txt')
result = fileutils.read_text_file(test_file)[:55]
expected = ('package com.mycompany.test.sort;\n\n/*\n'
' * MergeSort.java\n')
assert expected == result
def test_resource_name(self):
assert 'f' == fileutils.resource_name('/a/b/d/f/f')
assert 'f' == fileutils.resource_name('/a/b/d/f/f/')
assert 'f' == fileutils.resource_name('a/b/d/f/f/')
assert 'f.a' == fileutils.resource_name('/a/b/d/f/f.a')
assert 'f.a' == fileutils.resource_name('/a/b/d/f/f.a/')
assert 'f.a' == fileutils.resource_name('a/b/d/f/f.a')
assert 'f.a' == fileutils.resource_name('f.a')
def test_os_walk_with_unicode_path(self):
test_dir = self.extract_test_zip('fileutils/walk/unicode.zip')
test_dir = join(test_dir, 'unicode')
test_dir = unicode(test_dir)
result = list(os.walk(test_dir))
expected = [
(unicode(test_dir), ['a'], [u'2.csv']),
(unicode(test_dir) + sep + 'a', [], [u'gru\u0308n.png'])
]
assert expected == result
def test_fileutils_walk(self):
test_dir = self.get_test_loc('fileutils/walk')
base = self.get_test_loc('fileutils')
result = [(as_posixpath(t.replace(base, '')), d, f,) for t, d, f in fileutils.walk(test_dir)]
expected = [
('/walk', ['d1'], ['f', 'unicode.zip']),
('/walk/d1', ['d2'], ['f1']),
('/walk/d1/d2', ['d3'], ['f2']),
('/walk/d1/d2/d3', [], ['f3'])
]
assert expected == result
def test_fileutils_walk_with_unicode_path(self):
test_dir = self.extract_test_zip('fileutils/walk/unicode.zip')
test_dir = join(test_dir, 'unicode')
test_dir = unicode(test_dir)
result = list(fileutils.walk(test_dir))
expected = [
(unicode(test_dir), ['a'], [u'2.csv']),
(unicode(test_dir) + sep + 'a', [], [u'gru\u0308n.png'])
]
assert expected == result
def test_fileutils_walk_can_walk_a_single_file(self):
test_file = self.get_test_loc('fileutils/walk/f')
result = list(fileutils.walk(test_file))
expected = [
(fileutils.parent_directory(test_file), [], ['f'])
]
assert expected == result
def test_fileutils_walk_can_walk_an_empty_dir(self):
test_dir = self.get_temp_dir()
result = list(fileutils.walk(test_dir))
expected = [
(test_dir, [], [])
]
assert expected == result
def test_file_iter(self):
test_dir = self.get_test_loc('fileutils/walk')
base = self.get_test_loc('fileutils')
result = [as_posixpath(f.replace(base, '')) for f in fileutils.file_iter(test_dir)]
expected = [
'/walk/f',
'/walk/unicode.zip',
'/walk/d1/f1',
'/walk/d1/d2/f2',
'/walk/d1/d2/d3/f3'
]
assert expected == result
def test_file_iter_can_iterate_a_single_file(self):
test_file = self.get_test_loc('fileutils/walk/f')
result = [as_posixpath(f) for f in fileutils.file_iter(test_file)]
expected = [as_posixpath(test_file)]
assert expected == result
def test_file_iter_can_walk_an_empty_dir(self):
test_dir = self.get_temp_dir()
result = list(fileutils.file_iter(test_dir))
expected = []
assert expected == result
class TestName(FileBasedTesting):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
def test_file_base_name_on_path_and_location(self):
test_dir = self.get_test_loc('fileutils/basename', copy=True)
tests = [
('a/.a/file', 'file'),
('a/.a/', '.a'),
('a/b/.a.b', '.a'),
('a/b/a.tag.gz', 'a.tag'),
('a/b/', 'b'),
('a/f.a', 'f'),
('a/', 'a'),
('f.a/a.c', 'a'),
('f.a/', 'f.a'),
('tst', 'tst'),
]
for test_file, name in tests:
result = fileutils.file_base_name(test_file)
assert name == result
# also test on location
result = fileutils.file_base_name((os.path.join(test_dir, test_file)))
assert name == result
def test_file_name_on_path_and_location(self):
test_dir = self.get_test_loc('fileutils/basename', copy=True)
tests = [
('a/.a/file', 'file'),
('a/.a/', '.a'),
('a/b/.a.b', '.a.b'),
('a/b/a.tag.gz', 'a.tag.gz'),
('a/b/', 'b'),
('a/f.a', 'f.a'),
('a/', 'a'),
('f.a/a.c', 'a.c'),
('f.a/', 'f.a'),
('tst', 'tst'),
]
for test_file, name in tests:
result = fileutils.file_name(test_file)
assert name == result
# also test on location
result = fileutils.file_name((os.path.join(test_dir, test_file)))
assert name == result
def test_file_extension_on_path_and_location(self):
test_dir = self.get_test_loc('fileutils/basename', copy=True)
tests = [
('a/.a/file', ''),
('a/.a/', ''),
('a/b/.a.b', '.b'),
('a/b/a.tag.gz', '.gz'),
('a/b/', ''),
('a/f.a', '.a'),
('a/', ''),
('f.a/a.c', '.c'),
('f.a/', ''),
('tst', ''),
]
for test_file, name in tests:
result = fileutils.file_extension(test_file)
assert name == result
# also test on location
result = fileutils.file_extension((os.path.join(test_dir, test_file)))
assert name == result
def test_parent_directory_on_path_and_location(self):
test_dir = self.get_test_loc('fileutils/basename', copy=True)
tests = [
('a/.a/file', 'a/.a/'),
('a/.a/', 'a/'),
('a/b/.a.b', 'a/b/'),
('a/b/a.tag.gz', 'a/b/'),
('a/b/', 'a/'),
('a/f.a', 'a/'),
('a/', '/'),
('f.a/a.c', 'f.a/'),
('f.a/', '/'),
('tst', '/'),
]
for test_file, name in tests:
result = fileutils.parent_directory(test_file)
assert name == result
# also test on location
result = fileutils.parent_directory((os.path.join(test_dir, test_file)))
assert result.endswith(name)
|
retrography/scancode-toolkit
|
tests/commoncode/test_fileutils.py
|
Python
|
apache-2.0
| 17,989
|
[
"VisIt"
] |
77cb3c82d38b49e30e1ce75f8cc398a7cf3f77d34c880b7eeb3fe59705e67d81
|
# Copyright (C) 2017
# Jakub Krajniak (jkrajniak at gmail.com)
# Copyright (C) 2012,2013,2017
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
*******************************
espressopp.analysis.Temperature
*******************************
Calculate the temperature of the system (in :math:`k_B T` units).
.. function:: espressopp.analysis.Temperature(system)
:param std::shared_ptr system: system object
:returns: temperature
:rtype: real
Temperature of the system of :math:`N` particles is calculated as:
.. math::
T = \frac{1}{N_f} \sum^N_{i=1} m_i v_i^2,
where :math:`m_i` and :math:`v_i` are the mass and velocity of a
particle :math:`i`.
:math:`N_f = 3N` is the number of the system's degrees of freedom.
**Example:**
>>> # declare an object, e.g., T:
>>> T = espressopp.analysis.Temperature(system)
>>>
>>> # later in your script compute temperature and print it:
>>> print T.compute()
.. function::espressopp.analysis.Temperature.add_type(type_id)
:param type_id: The particle type id to observe
.. function::espressopp.analysis.Temperature.remove_type(type_id)
:param type_id: Remove particle type id.
"""
from espressopp.esutil import cxxinit
from espressopp import pmi
from espressopp.analysis.Observable import *
from _espressopp import analysis_Temperature
class TemperatureLocal(ObservableLocal, analysis_Temperature):
def __init__(self, system):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, analysis_Temperature, system)
def add_type(self, type_id):
if pmi.workerIsActive():
self.cxxclass.add_type(self, type_id)
def remove_type(self, type_id):
if pmi.workerIsActive():
ret_val = self.cxxclass.remove_type(self, type_id)
if not ret_val:
print(('Warning, type {} not found'.format(type_id)))
if pmi.isController :
class Temperature(Observable, metaclass=pmi.Proxy):
pmiproxydefs = dict(
cls = 'espressopp.analysis.TemperatureLocal',
pmicall = ['add_type', 'remove_type']
)
|
espressopp/espressopp
|
src/analysis/Temperature.py
|
Python
|
gpl-3.0
| 3,008
|
[
"ESPResSo"
] |
a5c3d984f1c6892e7395ea763730e9d4b82b5e9ea4573c501d375442d31504c4
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Module for Latent Semantic Indexing.
This module actually contains several algorithms for decomposition of large corpora, a
combination of which effectively and transparently allows building LSI models for:
* corpora much larger than RAM: only constant memory is needed, independent of
the corpus size (though still dependent on the feature set size)
* corpora that are streamed: documents are only accessed sequentially, no
random-access
* corpora that cannot be even temporarily stored: each document can only be
seen once and must be processed immediately (one-pass algorithm)
* distributed computing for very large corpora, making use of a cluster of
machines
Wall-clock performance on the English Wikipedia (2G corpus positions, 3.2M
documents, 100K features, 0.5G non-zero entries in the final TF-IDF matrix),
requesting the top 400 LSI factors:
====================================================== ============ ==================
algorithm serial distributed
====================================================== ============ ==================
one-pass merge algorithm 5h14m 1h41m
multi-pass stochastic algo (with 2 power iterations) 5h39m N/A [1]_
====================================================== ============ ==================
*serial* = Core 2 Duo MacBook Pro 2.53Ghz, 4GB RAM, libVec
*distributed* = cluster of four logical nodes on three physical machines, each
with dual core Xeon 2.0GHz, 4GB RAM, ATLAS
.. [1] The stochastic algo could be distributed too, but most time is already spent
reading/decompressing the input from disk in its 4 passes. The extra network
traffic due to data distribution across cluster nodes would likely make it
*slower*.
"""
import logging
import itertools
import sys
import numpy
import scipy.sparse
from scipy.sparse import sparsetools
from gensim import interfaces, matutils, utils
logger = logging.getLogger('gensim.models.lsimodel')
# accuracy defaults for the multi-pass stochastic algo
P2_EXTRA_DIMS = 100 # set to `None` for dynamic P2_EXTRA_DIMS=k
P2_EXTRA_ITERS = 2
def clip_spectrum(s, k, discard=0.001):
"""
Given eigenvalues `s`, return how many factors should be kept to avoid
storing spurious (tiny, numerically instable) values.
This will ignore the tail of the spectrum with relative combined mass < min(`discard`, 1/k).
The returned value is clipped against `k` (= never return more than `k`).
"""
# compute relative contribution of eigenvalues towards the energy spectrum
rel_spectrum = numpy.abs(1.0 - numpy.cumsum(s / numpy.sum(s)))
# ignore the last `discard` mass (or 1/k, whichever is smaller) of the spectrum
small = 1 + len(numpy.where(rel_spectrum > min(discard, 1.0 / k))[0])
k = min(k, small) # clip against k
logger.info("keeping %i factors (discarding %.3f%% of energy spectrum)" %
(k, 100 * rel_spectrum[k - 1]))
return k
def asfarray(a, name=''):
if not a.flags.f_contiguous:
logger.debug("converting %s array %s to FORTRAN order" % (a.shape, name))
a = numpy.asfortranarray(a)
return a
def ascarray(a, name=''):
if not a.flags.contiguous:
logger.debug("converting %s array %s to C order" % (a.shape, name))
a = numpy.ascontiguousarray(a)
return a
class Projection(utils.SaveLoad):
def __init__(self, m, k, docs=None, use_svdlibc=False, power_iters=P2_EXTRA_ITERS, extra_dims=P2_EXTRA_DIMS):
"""
Construct the (U, S) projection from a corpus `docs`. The projection can
be later updated by merging it with another Projection via `self.merge()`.
This is the class taking care of the 'core math'; interfacing with corpora,
splitting large corpora into chunks and merging them etc. is done through
the higher-level `LsiModel` class.
"""
self.m, self.k = m, k
self.power_iters = power_iters
self.extra_dims = extra_dims
if docs is not None:
# base case decomposition: given a job `docs`, compute its decomposition,
# *in-core*.
if not use_svdlibc:
u, s = stochastic_svd(docs, k, chunksize=sys.maxsize, num_terms=m,
power_iters=self.power_iters, extra_dims=self.extra_dims)
else:
try:
import sparsesvd
except ImportError:
raise ImportError("`sparsesvd` module requested but not found; run `easy_install sparsesvd`")
logger.info("computing sparse SVD of %s matrix" % str(docs.shape))
if not scipy.sparse.issparse(docs):
docs = matutils.corpus2csc(docs)
ut, s, vt = sparsesvd.sparsesvd(docs,
k + 30) # ask for extra factors, because for some reason SVDLIBC sometimes returns fewer factors than requested
u = ut.T
del ut, vt
k = clip_spectrum(s ** 2, self.k)
self.u = u[:, :k].copy()
self.s = s[:k].copy()
else:
self.u, self.s = None, None
def empty_like(self):
return Projection(self.m, self.k, power_iters=self.power_iters, extra_dims=self.extra_dims)
def merge(self, other, decay=1.0):
"""
Merge this Projection with another.
The content of `other` is destroyed in the process, so pass this function a
copy of `other` if you need it further.
"""
if other.u is None:
# the other projection is empty => do nothing
return
if self.u is None:
# we are empty => result of merge is the other projection, whatever it is
self.u = other.u.copy()
self.s = other.s.copy()
return
if self.m != other.m:
raise ValueError("vector space mismatch: update is using %s features, expected %s" %
(other.m, self.m))
logger.info("merging projections: %s + %s" % (str(self.u.shape), str(other.u.shape)))
m, n1, n2 = self.u.shape[0], self.u.shape[1], other.u.shape[1]
# TODO Maybe keep the bases as elementary reflectors, without
# forming explicit matrices with ORGQR.
# The only operation we ever need is basis^T*basis ond basis*component.
# But how to do that in scipy? And is it fast(er)?
# find component of u2 orthogonal to u1
logger.debug("constructing orthogonal component")
self.u = asfarray(self.u, 'self.u')
c = numpy.dot(self.u.T, other.u)
self.u = ascarray(self.u, 'self.u')
other.u -= numpy.dot(self.u, c)
other.u = [other.u] # do some reference magic and call qr_destroy, to save RAM
q, r = matutils.qr_destroy(other.u) # q, r = QR(component)
assert not other.u
# find the rotation that diagonalizes r
k = numpy.bmat([[numpy.diag(decay * self.s), numpy.multiply(c, other.s)],
[matutils.pad(numpy.array([]).reshape(0, 0), min(m, n2), n1), numpy.multiply(r, other.s)]])
logger.debug("computing SVD of %s dense matrix" % str(k.shape))
try:
# in numpy < 1.1.0, running SVD sometimes results in "LinAlgError: SVD did not converge'.
# for these early versions of numpy, catch the error and try to compute
# SVD again, but over k*k^T.
# see http://www.mail-archive.com/numpy-discussion@scipy.org/msg07224.html and
# bug ticket http://projects.scipy.org/numpy/ticket/706
u_k, s_k, _ = numpy.linalg.svd(k,
full_matrices=False) # TODO *ugly overkill*!! only need first self.k SVD factors... but there is no LAPACK wrapper for partial svd/eigendecomp in numpy :(
except numpy.linalg.LinAlgError:
logger.error("SVD(A) failed; trying SVD(A * A^T)")
u_k, s_k, _ = numpy.linalg.svd(numpy.dot(k, k.T),
full_matrices=False) # if this fails too, give up with an exception
s_k = numpy.sqrt(s_k) # go back from eigen values to singular values
k = clip_spectrum(s_k ** 2, self.k)
u1_k, u2_k, s_k = numpy.array(u_k[:n1, :k]), numpy.array(u_k[n1:, :k]), s_k[:k]
# update & rotate current basis U = [U, U']*[U1_k, U2_k]
logger.debug("updating orthonormal basis U")
self.s = s_k
self.u = ascarray(self.u, 'self.u')
self.u = numpy.dot(self.u, u1_k)
q = ascarray(q, 'q')
q = numpy.dot(q, u2_k)
self.u += q
# make each column of U start with a non-negative number (to force canonical decomposition)
if self.u.shape[0] > 0:
for i in range(self.u.shape[1]):
if self.u[0, i] < 0.0:
self.u[:, i] *= -1.0
# diff = numpy.dot(self.u.T, self.u) - numpy.eye(self.u.shape[1])
# logger.info('orth error after=%f' % numpy.sum(diff * diff))
#endclass Projection
class LsiModel(interfaces.TransformationABC):
"""
Objects of this class allow building and maintaining a model for Latent
Semantic Indexing (also known as Latent Semantic Analysis).
The main methods are:
1. constructor, which initializes the projection into latent topics space,
2. the ``[]`` method, which returns representation of any input document in the
latent space,
3. `add_documents()` for incrementally updating the model with new documents.
The left singular vectors are stored in `lsi.projection.u`, singular values
in `lsi.projection.s`. Right singular vectors can be reconstructed from the output
of `lsi[training_corpus]`, if needed.
Model persistency is achieved via its load/save methods.
"""
def __init__(self, corpus=None, num_topics=200, id2word=None, chunksize=20000,
decay=1.0, distributed=False, onepass=True,
power_iters=P2_EXTRA_ITERS, extra_samples=P2_EXTRA_DIMS):
"""
`num_topics` is the number of requested factors (latent dimensions).
After the model has been trained, you can estimate topics for an
arbitrary, unseen document, using the ``topics = self[document]`` dictionary
notation. You can also add new training documents, with ``self.add_documents``,
so that training can be stopped and resumed at any time, and the
LSI transformation is available at any point.
If you specify a `corpus`, it will be used to train the model. See the
method `add_documents` for a description of the `chunksize` and `decay` parameters.
Turn `onepass` off to force a multi-pass stochastic algorithm.
`power_iters` and `extra_samples` affect the accuracy of the stochastic
multi-pass algorithm, which is used either internally (`onepass=True`) or
as the front-end algorithm (`onepass=False`). Increasing the number of
power iterations improves accuracy, but lowers performance. See [2]_ for
some hard numbers.
Turn on `distributed` to enable distributed computing.
Example:
>>> lsi = LsiModel(corpus, num_topics=10)
>>> print lsi[doc_tfidf] # project some document into LSI space
>>> lsi.add_documents(corpus2) # update LSI on additional documents
>>> print lsi[doc_tfidf]
.. [2] http://nlp.fi.muni.cz/~xrehurek/nips/rehurek_nips.pdf
"""
self.id2word = id2word
self.num_topics = int(num_topics)
self.chunksize = int(chunksize)
self.decay = float(decay)
if distributed:
if not onepass:
logger.warning("forcing the one-pass algorithm for distributed LSA")
onepass = True
self.onepass = onepass
self.extra_samples, self.power_iters = extra_samples, power_iters
if corpus is None and self.id2word is None:
raise ValueError(
'at least one of corpus/id2word must be specified, to establish input space dimensionality')
if self.id2word is None:
logger.warning("no word id mapping provided; initializing from corpus, assuming identity")
self.id2word = utils.dict_from_corpus(corpus)
self.num_terms = len(self.id2word)
else:
self.num_terms = 1 + max([-1] + list(self.id2word.keys()))
self.docs_processed = 0
self.projection = Projection(self.num_terms, self.num_topics, power_iters=self.power_iters,
extra_dims=self.extra_samples)
self.numworkers = 1
if not distributed:
logger.info("using serial LSI version on this node")
self.dispatcher = None
else:
if not onepass:
raise NotImplementedError("distributed stochastic LSA not implemented yet; "
"run either distributed one-pass, or serial randomized.")
try:
import Pyro4
dispatcher = Pyro4.Proxy('PYRONAME:gensim.lsi_dispatcher')
dispatcher._pyroOneway.add("exit")
logger.debug("looking for dispatcher at %s" % str(dispatcher._pyroUri))
dispatcher.initialize(id2word=self.id2word, num_topics=num_topics,
chunksize=chunksize, decay=decay,
power_iters=self.power_iters, extra_samples=self.extra_samples,
distributed=False, onepass=onepass)
self.dispatcher = dispatcher
self.numworkers = len(dispatcher.getworkers())
logger.info("using distributed version with %i workers" % self.numworkers)
except Exception as err:
# distributed version was specifically requested, so this is an error state
logger.error("failed to initialize distributed LSI (%s)" % err)
raise RuntimeError("failed to initialize distributed LSI (%s)" % err)
if corpus is not None:
self.add_documents(corpus)
def add_documents(self, corpus, chunksize=None, decay=None):
"""
Update singular value decomposition to take into account a new
corpus of documents.
Training proceeds in chunks of `chunksize` documents at a time. The size of
`chunksize` is a tradeoff between increased speed (bigger `chunksize`)
vs. lower memory footprint (smaller `chunksize`). If the distributed mode
is on, each chunk is sent to a different worker/computer.
Setting `decay` < 1.0 causes re-orientation towards new data trends in the
input document stream, by giving less emphasis to old observations. This allows
LSA to gradually "forget" old observations (documents) and give more
preference to new ones.
"""
logger.info("updating model with new documents")
# get computation parameters; if not specified, use the ones from constructor
if chunksize is None:
chunksize = self.chunksize
if decay is None:
decay = self.decay
if not scipy.sparse.issparse(corpus):
if not self.onepass:
# we are allowed multiple passes over the input => use a faster, randomized two-pass algo
update = Projection(self.num_terms, self.num_topics, None)
update.u, update.s = stochastic_svd(corpus, self.num_topics,
num_terms=self.num_terms, chunksize=chunksize,
extra_dims=self.extra_samples, power_iters=self.power_iters)
self.projection.merge(update, decay=decay)
else:
# the one-pass algo
doc_no = 0
for chunk_no, chunk in enumerate(utils.grouper(corpus, chunksize)):
logger.info("preparing a new chunk of documents")
nnz = sum(len(doc) for doc in chunk)
# construct the job as a sparse matrix, to minimize memory overhead
# definitely avoid materializing it as a dense matrix!
logger.debug("converting corpus to csc format")
job = matutils.corpus2csc(chunk, num_docs=len(chunk), num_terms=self.num_terms, num_nnz=nnz)
del chunk
doc_no += job.shape[1]
if self.dispatcher:
# distributed version: add this job to the job queue, so workers can work on it
logger.debug("creating job #%i" % chunk_no)
self.dispatcher.putjob(
job) # put job into queue; this will eventually block, because the queue has a small finite size
del job
logger.info("dispatched documents up to #%s" % doc_no)
else:
# serial version, there is only one "worker" (myself) => process the job directly
update = Projection(self.num_terms, self.num_topics, job, extra_dims=self.extra_samples,
power_iters=self.power_iters)
del job
self.projection.merge(update, decay=decay)
del update
logger.info("processed documents up to #%s" % doc_no)
self.print_topics(5)
# wait for all workers to finish (distributed version only)
if self.dispatcher:
logger.info("reached the end of input; now waiting for all remaining jobs to finish")
self.projection = self.dispatcher.getstate()
# logger.info("top topics after adding %i documents" % doc_no)
# self.print_debug(10)
else:
assert not self.dispatcher, "must be in serial mode to receive jobs"
assert self.onepass, "distributed two-pass algo not supported yet"
update = Projection(self.num_terms, self.num_topics, corpus.tocsc(), extra_dims=self.extra_samples,
power_iters=self.power_iters)
self.projection.merge(update, decay=decay)
logger.info("processed sparse job of %i documents" % (corpus.shape[1]))
def __str__(self):
return "LsiModel(num_terms=%s, num_topics=%s, decay=%s, chunksize=%s)" % \
(self.num_terms, self.num_topics, self.decay, self.chunksize)
def __getitem__(self, bow, scaled=False, chunksize=512):
"""
Return latent representation, as a list of (topic_id, topic_value) 2-tuples.
This is done by folding input document into the latent topic space.
"""
assert self.projection.u is not None, "decomposition not initialized yet"
# if the input vector is in fact a corpus, return a transformed corpus as a result
is_corpus, bow = utils.is_corpus(bow)
if is_corpus and chunksize:
# by default, transform 256 documents at once, when called as `lsi[corpus]`.
# this chunking is completely transparent to the user, but it speeds
# up internal computations (one mat * mat multiplication, instead of
# 256 smaller mat * vec multiplications).
return self._apply(bow, chunksize=chunksize)
if not is_corpus:
bow = [bow]
vec = matutils.corpus2csc(bow, num_terms=self.num_terms)
topic_dist = (vec.T * self.projection.u[:, :self.num_topics]).T # (x^T * u).T = u^-1 * x
if scaled:
topic_dist *= (1.0 / self.projection.s[:self.num_topics])# s^-1 * u^-1 * x
# convert a numpy array to gensim sparse vector = tuples of (feature_id, feature_weight),
# with no zero weights.
if not is_corpus:
# lsi[single_document]
result = matutils.full2sparse(topic_dist.flat)
else:
# lsi[chunk of documents]
result = matutils.Dense2Corpus(topic_dist)
return result
def show_topic(self, topicno, topn=10):
"""
Return a specified topic (=left singular vector), 0 <= `topicno` < `self.num_topics`,
as string.
Return only the `topn` words which contribute the most to the direction
of the topic (both negative and positive).
>>> lsimodel.print_topic(10, topn=5)
'-0.340 * "category" + 0.298 * "$M$" + 0.183 * "algebra" + -0.174 * "functor" + -0.168 * "operator"'
"""
# size of the projection matrix can actually be smaller than `self.num_topics`,
# if there were not enough factors (real rank of input matrix smaller than
# `self.num_topics`). in that case, return an empty string
if topicno >= len(self.projection.u.T):
return ''
c = numpy.asarray(self.projection.u.T[topicno, :]).flatten()
norm = numpy.sqrt(numpy.sum(numpy.dot(c, c)))
most = numpy.abs(c).argsort()[::-1][:topn]
return [(1.0 * c[val] / norm, self.id2word[val]) for val in most]
def print_topic(self, topicno, topn=10):
return ' + '.join(['%.3f*"%s"' % v for v in self.show_topic(topicno, topn)])
def show_topics(self, num_topics=-1, num_words=10, log=False, formatted=True):
"""
Show `num_topics` most significant topics (show all by default).
For each topic, show `num_words` most significant words (10 words by defaults).
Return the shown topics as a list -- a list of strings if `formatted` is
True, or a list of (value, word) 2-tuples if it's False.
If `log` is True, also output this result to log.
"""
shown = []
if num_topics < 0:
num_topics = self.num_topics
for i in range(min(num_topics, self.num_topics)):
if i < len(self.projection.s):
if formatted:
topic = self.print_topic(i, topn=num_words)
else:
topic = self.show_topic(i, topn=num_words)
shown.append(topic)
if log:
logger.info("topic #%i(%.3f): %s" %
(i, self.projection.s[i],
topic))
return shown
def print_topics(self, num_topics=5, num_words=10):
"""Alias for `show_topics()` which prints the top 5 topics to log."""
return self.show_topics(num_topics=num_topics, num_words=num_words, log=True)
def print_debug(self, num_topics=5, num_words=10):
"""
Print (to log) the most salient words of the first `num_topics` topics.
Unlike `print_topics()`, this looks for words that are significant for a
particular topic *and* not for others. This *should* result in a more
human-interpretable description of topics.
"""
# only wrap the module-level fnc
print_debug(self.id2word, self.projection.u, self.projection.s,
list(range(min(num_topics, len(self.projection.u.T)))),
num_words=num_words)
def save(self, fname):
"""
Override the default `save` (which uses cPickle), because that's
too inefficient and cPickle has bugs. Instead, single out the large transformation
matrix and store that separately in binary format (that can be directly
mmap'ed back in `load()`), under `fname.npy`.
"""
logger.info("storing %s object to %s and %s" % (self.__class__.__name__, fname, fname + '.npy'))
if self.projection.u is None:
# model not initialized: there is no projection
utils.pickle(self, fname)
# first, remove the projection from self.__dict__, so it doesn't get pickled
u, dispatcher = self.projection.u, self.dispatcher
del self.projection.u
self.dispatcher = None
try:
utils.pickle(self, fname) # store projection-less object
numpy.save(fname + '.npy', ascarray(u)) # store projection
finally:
self.projection.u, self.dispatcher = u, dispatcher
@classmethod
def load(cls, fname):
"""
Load a previously saved object from file (also see `save`).
"""
logger.debug("loading %s object from %s" % (cls.__name__, fname))
result = utils.unpickle(fname)
ufname = fname + '.npy'
try:
result.projection.u = numpy.load(ufname, mmap_mode='r') # load back as read-only
except:
logger.debug("failed to load mmap'ed projection from %s" % ufname)
result.dispatcher = None # TODO load back incl. distributed state? will require re-initialization of worker state
return result
#endclass LsiModel
def print_debug(id2token, u, s, topics, num_words=10, num_neg=None):
if num_neg is None:
# by default, print half as many salient negative words as positive
num_neg = num_words / 2
logger.info('computing word-topic salience for %i topics' % len(topics))
topics, result = set(topics), {}
# TODO speed up by block computation
for uvecno, uvec in enumerate(u):
uvec = numpy.abs(numpy.asarray(uvec).flatten())
udiff = uvec / numpy.sqrt(numpy.sum(numpy.dot(uvec, uvec)))
for topic in topics:
result.setdefault(topic, []).append((udiff[topic], uvecno))
logger.debug("printing %i+%i salient words" % (num_words, num_neg))
for topic in sorted(result.keys()):
weights = sorted(result[topic], key=lambda x: -abs(x[0]))
_, most = weights[0]
if u[most, topic] < 0.0: # the most significant word has a negative sign => flip sign of u[most]
normalize = -1.0
else:
normalize = 1.0
# order features according to salience; ignore near-zero entries in u
pos, neg = [], []
for weight, uvecno in weights:
if normalize * u[uvecno, topic] > 0.0001:
pos.append('%s(%.3f)' % (id2token[uvecno], u[uvecno, topic]))
if len(pos) >= num_words:
break
for weight, uvecno in weights:
if normalize * u[uvecno, topic] < -0.0001:
neg.append('%s(%.3f)' % (id2token[uvecno], u[uvecno, topic]))
if len(neg) >= num_neg:
break
logger.info('topic #%s(%.3f): %s, ..., %s' % (topic, s[topic], ', '.join(pos), ', '.join(neg)))
def stochastic_svd(corpus, rank, num_terms, chunksize=20000, extra_dims=None,
power_iters=0, dtype=numpy.float64, eps=1e-6):
"""
Return (U, S): the left singular vectors and the singular values of the streamed
input corpus `corpus` [3]_.
This may actually return less than the requested number of top `rank` factors,
in case the input is of lower rank. The `extra_dims` (oversampling) and especially
`power_iters` (power iterations) parameters affect accuracy of the decomposition.
This algorithm uses `2+power_iters` passes over the data. In case you can only
afford a single pass over the input corpus, set `onepass=True` in :class:`LsiModel`
and avoid using this algorithm directly.
The decomposition algorithm is based on
**Halko, Martinsson, Tropp. Finding structure with randomness, 2009.**
.. [3] If `corpus` is a scipy.sparse matrix instead, it is assumed the whole
corpus fits into core memory and a different (more efficient) code path is chosen.
"""
global num_docs
rank = int(rank)
if extra_dims is None:
samples = max(10, 2 * rank) # use more samples than requested factors, to improve accuracy
else:
samples = rank + int(extra_dims)
logger.info("using %i extra samples and %i power iterations" % (samples - rank, power_iters))
num_terms = int(num_terms)
# first phase: construct the orthonormal action matrix Q = orth(Y) = orth((A * A.T)^q * A * O)
# build Y in blocks of `chunksize` documents (much faster than going one-by-one
# and more memory friendly than processing all documents at once)
y = numpy.zeros(dtype=dtype, shape=(num_terms, samples))
logger.info("1st phase: constructing %s action matrix" % str(y.shape))
if scipy.sparse.issparse(corpus):
m, n = corpus.shape
assert num_terms == m, "mismatch in number of features: %i in sparse matrix vs. %i parameter" % (m, num_terms)
o = numpy.random.normal(0.0, 1.0, (n, samples)).astype(y.dtype) # draw a random gaussian matrix
sparsetools.csc_matvecs(m, n, samples, corpus.indptr, corpus.indices,
corpus.data, o.ravel(), y.ravel()) # y = corpus * o
del o
# unlike numpy, scipy.sparse `astype()` copies everything, even if there is no change to dtype!
# so check for equal dtype explicitly, to avoid the extra memory footprint if possible
if y.dtype != dtype:
y = y.astype(dtype)
logger.info("orthonormalizing %s action matrix" % str(y.shape))
y = [y]
q, _ = matutils.qr_destroy(y) # orthonormalize the range
logger.debug("running %i power iterations" % power_iters)
for power_iter in range(power_iters):
q = corpus.T * q
q = [corpus * q]
q, _ = matutils.qr_destroy(q) # orthonormalize the range after each power iteration step
else:
num_docs = 0
for chunk_no, chunk in enumerate(utils.grouper(corpus, chunksize)):
logger.info('PROGRESS: at document #%i' % (chunk_no * chunksize))
# construct the chunk as a sparse matrix, to minimize memory overhead
# definitely avoid materializing it as a dense (num_terms x chunksize) matrix!
s = sum(len(doc) for doc in chunk)
chunk = matutils.corpus2csc(chunk, num_terms=num_terms, dtype=dtype) # documents = columns of sparse CSC
m, n = chunk.shape
assert m == num_terms
assert n <= chunksize # the very last chunk of A is allowed to be smaller in size
num_docs += n
logger.debug("multiplying chunk * gauss")
o = numpy.random.normal(0.0, 1.0, (n, samples)).astype(dtype) # draw a random gaussian matrix
sparsetools.csc_matvecs(m, n, samples, chunk.indptr, chunk.indices, # y = y + chunk * o
chunk.data, o.ravel(), y.ravel())
del chunk, o
y = [y]
q, _ = matutils.qr_destroy(y) # orthonormalize the range
for power_iter in range(power_iters):
logger.info("running power iteration #%i" % (power_iter + 1))
yold = q.copy()
q[:] = 0.0
for chunk_no, chunk in enumerate(utils.grouper(corpus, chunksize)):
logger.info('PROGRESS: at document #%i/%i' % (chunk_no * chunksize, num_docs))
chunk = matutils.corpus2csc(chunk, num_terms=num_terms, dtype=dtype) # documents = columns of sparse CSC
tmp = chunk.T * yold
tmp = chunk * tmp
del chunk
q += tmp
del yold
q = [q]
q, _ = matutils.qr_destroy(q) # orthonormalize the range
qt = q[:, :samples].T.copy()
del q
if scipy.sparse.issparse(corpus):
b = qt * corpus
logger.info("2nd phase: running dense svd on %s matrix" % str(b.shape))
u, s, vt = numpy.linalg.svd(b, full_matrices=False)
del b, vt
else:
# second phase: construct the covariance matrix X = B * B.T, where B = Q.T * A
# again, construct X incrementally, in chunks of `chunksize` documents from the streaming
# input corpus A, to avoid using O(number of documents) memory
x = numpy.zeros(shape=(qt.shape[0], qt.shape[0]), dtype=numpy.float64)
logger.info("2nd phase: constructing %s covariance matrix" % str(x.shape))
for chunk_no, chunk in enumerate(utils.grouper(corpus, chunksize)):
logger.info('PROGRESS: at document #%i/%i' % (chunk_no * chunksize, num_docs))
chunk = matutils.corpus2csc(chunk, num_terms=num_terms, dtype=qt.dtype)
b = qt * chunk # dense * sparse matrix multiply
del chunk
x += numpy.dot(b, b.T) # TODO should call the BLAS routine SYRK, but there is no SYRK wrapper in scipy :(
del b
# now we're ready to compute decomposition of the small matrix X
logger.info("running dense decomposition on %s covariance matrix" % str(x.shape))
u, s, vt = numpy.linalg.svd(
x) # could use linalg.eigh, but who cares... and svd returns the factors already sorted :)
s = numpy.sqrt(
s) # sqrt to go back from singular values of X to singular values of B = singular values of the corpus
q = qt.T.copy()
del qt
logger.info("computing the final decomposition")
keep = clip_spectrum(s ** 2, rank, discard=eps)
u = u[:, :keep].copy()
s = s[:keep]
u = numpy.dot(q, u)
return u.astype(dtype), s.astype(dtype)
|
samantp/gensimPy3
|
gensim-develop/gensim/models/lsimodel.py
|
Python
|
gpl-3.0
| 33,665
|
[
"Gaussian"
] |
10517a412f1178f93d2b5c83661bfc1b00c8c06837ab7266e9291f4b6405b305
|
# paths.py
#
# Created by Brett H. Andrews on 12 Jun 2017.
import os
from os.path import join
from functools import reduce
import click
import numpy as np
@click.command()
@click.option('--stackname', default='dr7_M0.1e')
@click.option('--binnames', default=None, multiple=True)
@click.option('--path_mzr', default=None)
@click.option('--overwrite', '-o', default=False, is_flag=True)
def generate_filepaths(stackname, binnames, path_mzr, overwrite):
"""Build file paths to spectra on passport external hard drive.
Parameters:
stackname (str):
Name of the set of stacks. Default is ``dr7_M0.1e``.
binnames (str):
Name of bins to copy. Default is ``None``, which copies
all bins.
path_mzr (str):
Path to the parent directory containing the stack data,
scripts, etc. Default is ``None``.
overwrite (bool):
If ``True``, overwrite existing files. Default is ``False``.
"""
# data directory on passport external hard drive
path_data = join('//', 'Volumes', 'My Passport', 'osu', 'andrews', 'projects',
'mass-metallicity', 'data', 'raw_FITS_dr7')
assert os.path.isdir(path_data), f'``path_data`` does not exist: {path_data}'
# mass-metallicity relation project directory on MacBook Air
path_mzr_default = join(os.path.expanduser('~'), 'projects', 'mzr')
path_mzr = path_mzr if path_mzr is not None else path_mzr_default
assert os.path.isdir(path_mzr), f'``path_mzr`` does not exist: {path_mzr}'
# stacks directory on MacBook Air
path_stack = join(path_mzr, 'stacks', stackname)
assert os.path.isdir(path_stack), f'``path_stack`` does not exist: {path_stack}'
# read in bin masses (and SFRs) and number of galaxies per bin
if not binnames:
with open(join(path_stack, 'auxiliary', 'binnames.txt'), 'r') as fin:
binnames = [line.strip() for line in fin]
# create output directory
path_filelists = join(path_stack, 'filelists')
if not os.path.isdir(path_filelists):
os.mkdir(path_filelists)
click.echo(f'Created directory: {path_filelists}')
click.echo('Files written:')
for binname in binnames:
binpar = binname.split('_n')[0]
filelist_in = join(path_stack, binpar, 'filelist', binname + '_filenames.txt')
with open(filelist_in, 'r') as fin:
paths = [line.split('raw_FITS_dr7/')[1].strip() for line in fin]
# paths to files on passport external hard drive
files_passport = [join(path_data, it) for it in paths]
# write paths to files on passport to a local file
filelist_out = join(path_filelists, binpar + '.txt')
if not os.path.isfile(filelist_out) or overwrite:
with open(filelist_out, 'w') as fout:
for it in files_passport:
fout.write(it + '\n')
click.echo(f'{filelist_out}')
else:
click.echo(f'Not written (overwrite with --overwrite): {filelist_out}')
def get_table_index(table, filename):
"""Get index of table corresponding to spectrum.
Parameters:
table (DataFrame):
Table of galaxy properties.
filename (str):
Name of spectrum FITS file.
Returns:
int
"""
mjd, pid, fid = [int(it) for it in filename.split('spSpec-')[-1].strip('.fit').split('-')]
ind_mjd = np.where(table.mjd == mjd)
ind_pid = np.where(table.pid == pid)
ind_fid = np.where(table.fid == fid)
return reduce(np.intersect1d, (ind_mjd, ind_pid, ind_fid))[0]
|
bretthandrews/stacking
|
stacking/paths.py
|
Python
|
bsd-3-clause
| 3,624
|
[
"Galaxy"
] |
d61d2aa32c2272aef5866fe76e2665e2069a805360881a9a0b16a8f53218b21c
|
# eliteBonusGunshipExplosionVelocity2
#
# Used by:
# Ship: Jaguar
type = "passive"
def handler(fit, src, context):
fit.modules.filteredChargeBoost(lambda mod: mod.charge.requiresSkill("Missile Launcher Operation"), "aoeVelocity",
src.getModifiedItemAttr("eliteBonusGunship2"), stackingPenalties=True, skill="Assault Frigates")
|
bsmr-eve/Pyfa
|
eos/effects/elitebonusgunshipexplosionvelocity2.py
|
Python
|
gpl-3.0
| 369
|
[
"Jaguar"
] |
b20b30b58807c734d29edd17f6fab98fb3d8af24ae9ec1165ee74189f048502f
|
../../../../../../../share/pyshared/orca/scripts/apps/gnome-terminal/script.py
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/lib/python2.7/dist-packages/orca/scripts/apps/gnome-terminal/script.py
|
Python
|
gpl-3.0
| 78
|
[
"ORCA"
] |
94c5826a92e7d5d58ffb5b8ceea71544186540640eebf542d2835acea29f4cbd
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
QGIS Web Processing Service Plugin
-------------------------------------------------------------------
Date : 09 November 2009
Copyright : (C) 2009 by Dr. Horst Duester
email : horst dot duester at kappasys dot ch
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from __future__ import absolute_import
from builtins import str
from builtins import range
from qgis.PyQt.QtCore import *
from qgis.PyQt.QtNetwork import *
from qgis.PyQt.QtWidgets import QApplication, QMessageBox
from qgis.PyQt import QtXml
from qgis.core import QgsNetworkAccessManager
from .wpsserver import WpsServer
from collections import namedtuple
import os
from .. import apicompat
from .wpsservercookie import WpsServerCookie
# Process description example:
#
#<?xml version="1.0" encoding="utf-8"?>
#<wps:ProcessDescriptions xmlns:wps="http://www.opengis.net/wps/1.0.0" xmlns:ows="http://www.opengis.net/ows/1.1" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.opengis.net/wps/1.0.0 http://schemas.opengis.net/wps/1.0.0/wpsDescribeProcess_response.xsd" service="WPS" version="1.0.0" xml:lang="eng">
# <ProcessDescription wps:processVersion="1.0" storeSupported="true" statusSupported="true">
# <ows:Identifier>returner</ows:Identifier>
# <ows:Title>Return process</ows:Title>
# <ows:Abstract>This is demonstration process of PyWPS, returns the same file, it gets on input, as the output.</ows:Abstract>
# <DataInputs>
# <Input minOccurs="1" maxOccurs="1">
# <ows:Identifier>text</ows:Identifier>
# <ows:Title>Some width</ows:Title>
# <LiteralData>
# <ows:DataType ows:reference="http://www.w3.org/TR/xmlschema-2/#string">string</ows:DataType>
# <ows:AnyValue />
# </LiteralData>
# </Input>
# <Input minOccurs="1" maxOccurs="1">
# <ows:Identifier>data</ows:Identifier>
# <ows:Title>Input vector data</ows:Title>
# <ComplexData>
# <Default>
# <Format>
# <ows:MimeType>text/xml</ows:MimeType>
# </Format>
# </Default>
# <Supported>
# <Format>
# <ows:MimeType>text/xml</ows:MimeType>
# </Format>
# </Supported>
# </ComplexData>
# </Input>
# </DataInputs>
# <ProcessOutputs>
# <Output>
# <ows:Identifier>output2</ows:Identifier>
# <ows:Title>Output vector data</ows:Title>
# <ComplexOutput>
# <Default>
# <Format>
# <ows:MimeType>text/xml</ows:MimeType>
# </Format>
# </Default>
# <Supported>
# <Format>
# <ows:MimeType>text/xml</ows:MimeType>
# </Format>
# </Supported>
# </ComplexOutput>
# </Output>
# <Output>
# <ows:Identifier>text</ows:Identifier>
# <ows:Title>Output literal data</ows:Title>
# <LiteralOutput>
# <ows:DataType ows:reference="http://www.w3.org/TR/xmlschema-2/#integer">integer</ows:DataType>
# </LiteralOutput>
# </Output>
# <Output>
# <ows:Identifier>output1</ows:Identifier>
# <ows:Title>Output vector data</ows:Title>
# <ComplexOutput>
# <Default>
# <Format>
# <ows:MimeType>text/xml</ows:MimeType>
# </Format>
# </Default>
# <Supported>
# <Format>
# <ows:MimeType>text/xml</ows:MimeType>
# </Format>
# </Supported>
# </ComplexOutput>
# </Output>
# </ProcessOutputs>
# </ProcessDescription>
#</wps:ProcessDescriptions>
# All supported import raster formats
RASTER_MIMETYPES = [{"MIMETYPE":"image/tiff", "GDALID":"GTiff", "EXTENSION":"tif"},
{"MIMETYPE":"image/png", "GDALID":"PNG", "EXTENSION":"png"}, \
{"MIMETYPE":"image/gif", "GDALID":"GIF", "EXTENSION":"gif"}, \
{"MIMETYPE":"image/jpeg", "GDALID":"JPEG", "EXTENSION":"jpg"}, \
{"MIMETYPE":"image/geotiff", "GDALID":"GTiff", "EXTENSION":"tif"}, \
{"MIMETYPE":"application/x-erdas-hfa", "GDALID":"HFA", "EXTENSION":""}, \
{"MIMETYPE":"application/netcdf", "GDALID":"netCDF", "EXTENSION":""}, \
{"MIMETYPE":"application/x-netcdf", "GDALID":"netCDF", "EXTENSION":""}, \
{"MIMETYPE":"application/geotiff", "GDALID":"GTiff", "EXTENSION":"tif"}, \
{"MIMETYPE":"application/x-geotiff", "GDALID":"GTiff", "EXTENSION":"tif"}, \
{"MIMETYPE":"application/x-esri-ascii-grid", "GDALID":"AAIGrid", "EXTENSION":"asc"}, \
{"MIMETYPE":"application/image-ascii-grass", "GDALID":"GRASSASCIIGrid", "EXTENSION":"asc"}]
# All supported input vector formats [mime type, schema]
VECTOR_MIMETYPES = [{"MIMETYPE":"application/x-zipped-shp", "SCHEMA":"", "GDALID":"ESRI Shapefile", "DATATYPE":"SHP", "EXTENSION":"zip"}, \
{"MIMETYPE":"application/vnd.google-earth.kml+xml", "SCHEMA":"KML", "GDALID":"KML", "DATATYPE":"KML", "EXTENSION":"kml"}, \
{"MIMETYPE":"text/xml", "SCHEMA":"GML", "GDALID":"GML", "DATATYPE":"GML", "EXTENSION":"gml"}, \
{"MIMETYPE":"text/xml; subtype=gml/2.", "SCHEMA":"GML2", "GDALID":"GML", "DATATYPE":"GML2", "EXTENSION":"gml"}, \
{"MIMETYPE":"text/xml; subtype=gml/3.", "SCHEMA":"GML3", "GDALID":"GML", "DATATYPE":"GML3", "EXTENSION":"gml"}, \
{"MIMETYPE":"application/json", "SCHEMA":"JSON", "GDALID":"GEOJSON", "DATATYPE":"JSON", "EXTENSION":"json"}, \
{"MIMETYPE":"application/geojson", "SCHEMA":"GEOJSON", "GDALID":"GEOJSON", "DATATYPE":"GEOJSON", "EXTENSION":"geojson"}]
# mimeTypes for streaming
PLAYLIST_MIMETYPES = [{"MIMETYPE":"application/x-ogc-playlist+", "SCHEMA":"", "GDALID":"", "DATATYPE":"PLAYLIST", "EXTENSION":"txt"}]
FILE_MIMETYPES = [{"MIMETYPE":"application/octet-stream"}]
# Helper methods for reading WPS XML
def getOwsElement(element, name):
return element.elementsByTagNameNS("http://www.opengis.net/ows/1.1", name)
def getIdentifierTitleAbstractFromElement(element):
identifier = pystring(getOwsElement(element, "Identifier").at(0).toElement().text()).strip()
title = pystring(getOwsElement(element, "Title").at(0).toElement().text()).strip()
abstract = pystring(getOwsElement(element, "Abstract").at(0).toElement().text()).strip()
return identifier, title, abstract
def getDefaultMimeType(inElement):
myElement = inElement.elementsByTagName("Default").at(0).toElement()
return getMimeTypeSchemaEncoding(myElement)
def getSupportedMimeTypes(inElement):
mimeTypes = []
myElements = inElement.elementsByTagName("Supported").at(0).toElement()
myFormats = myElements.elementsByTagName('Format')
for i in range(myFormats.size()):
myElement = myFormats.at(i).toElement()
mimeTypes.append(getMimeTypeSchemaEncoding(myElement))
return mimeTypes
def getMimeTypeSchemaEncoding(element):
mimeType = ""
schema = ""
encoding = ""
# try:
mimeType = pystring(element.elementsByTagName("MimeType").at(0).toElement().text()).strip().lower()
schema = pystring(element.elementsByTagName("Schema").at(0).toElement().text()).strip().lower()
encoding = pystring(element.elementsByTagName("Encoding").at(0).toElement().text()).strip().lower()
# except:
# pass
return {"MimeType":mimeType, "Schema":schema, "Encoding":encoding}
def isMimeTypeRaster(mimeType, ignorePlaylist = False):
"""Check for raster input"""
if not ignorePlaylist:
if isMimeTypePlaylist(mimeType) != None:
return None
for rasterType in RASTER_MIMETYPES:
if rasterType["MIMETYPE"] in mimeType.lower():
return rasterType["GDALID"]
return None
def isMimeTypeVector(mimeType, ignorePlaylist = False):
"""Check for vector input. Zipped shapefiles must be extracted"""
if not ignorePlaylist:
if isMimeTypePlaylist(mimeType) != None:
return None
for vectorType in VECTOR_MIMETYPES:
if vectorType["MIMETYPE"] in mimeType.lower():
return vectorType["GDALID"]
return None
def isMimeTypeText(mimeType):
"""Check for text file input"""
if mimeType.upper() == "TEXT/PLAIN":
return "TXT"
else:
return None
def isMimeTypeFile(mimeType):
"""Check for file output"""
for fileType in FILE_MIMETYPES:
if fileType["MIMETYPE"] in mimeType.lower():
return "ZIP"
return None
def isMimeTypePlaylist(mimeType):
"""Check for playlists"""
for playlistType in PLAYLIST_MIMETYPES:
if playlistType["MIMETYPE"] in mimeType.lower():
return playlistType["DATATYPE"]
return None
def getBaseMimeType(dataType):
# Return a base mimeType (might not be completed) from a data type (e.g.GML2)
for vectorType in VECTOR_MIMETYPES:
if vectorType["DATATYPE"] == dataType.upper():
return vectorType["MIMETYPE"]
return None
def getFileExtension(mimeType):
# Return the extension associated to the mime type (e.g. tif)
if isMimeTypeVector(mimeType):
for vectorType in VECTOR_MIMETYPES:
if vectorType["MIMETYPE"] in mimeType.lower():
return "." + vectorType["EXTENSION"]
elif isMimeTypeRaster(mimeType):
for rasterType in RASTER_MIMETYPES:
if rasterType["MIMETYPE"] in mimeType.lower():
return "." + rasterType["EXTENSION"]
return ""
def getOGRVersion():
# Data conversion options might vary according to the OGR version
try:
import osgeo.gdal
return int(osgeo.gdal.VersionInfo())
except:
return 0 # If not accessible, assume it is 0
def isGML3SupportedByOGR():
# GDAL/OGR versions <= 1800 don't support the FORMAT=GML3 option
version = getOGRVersion()
if version < 1800: # OGR < 1.8.0
return False
else:
return True
def allowedValues(aValues):
valList = []
# Manage a value list defined by a range
value_element = aValues.at(0).toElement()
v_range_element = getOwsElement(value_element, "Range")
if v_range_element.size() > 0:
min_val = getOwsElement(value_element, "MinimumValue").at(0).toElement().text()
max_val = getOwsElement(value_element, "MaximumValue").at(0).toElement().text()
try:
for n in range(int(min_val), int(max_val) + 1):
myVal = pystring(str(n))
#myVal.append(str(n))
valList.append(myVal)
except:
QMessageBox.critical(None, QApplication.translate("QgsWps", "Error"), QApplication.translate("QgsWps", "Maximum allowed Value is too large"))
# Manage a value list defined by single values
v_element = getOwsElement(value_element, "Value")
if v_element.size() > 0:
for n in range(v_element.size()):
mv_element = v_element.at(n).toElement()
valList.append(pystring(mv_element.text()).strip())
return valList
StringInput = namedtuple('StringInput', 'identifier title minOccurs defaultValue')
TextInput = namedtuple('TextInput', 'identifier title minOccurs dataFormat')
SelectionInput = namedtuple('SelectionInput', 'identifier title, minOccurs valList')
VectorInput = namedtuple('VectorInput', 'identifier title minOccurs dataFormat')
MultipleVectorInput = namedtuple('MultipleVectorInput', 'identifier title minOccurs dataFormat')
RasterInput = namedtuple('RasterInput', 'identifier title minOccurs dataFormat')
MultipleRasterInput = namedtuple('MultipleRasterInput', 'identifier title minOccurs dataFormat')
FileInput = namedtuple('FileInput', 'identifier title minOccurs dataFormat')
MultipleFileInput = namedtuple('MultipleFileInput', 'identifier title minOccurs dataFormat')
ExtentInput = namedtuple('ExtentInput', 'identifier title minOccurs')
CrsInput = namedtuple('CrsInput', 'identifier title minOccurs crsList')
VectorOutput = namedtuple('VectorOutput', 'identifier title dataFormat')
RasterOutput = namedtuple('RasterOutput', 'identifier title dataFormat')
StringOutput = namedtuple('StringOutput', 'identifier title')
class ProcessDescription(QObject):
"""
Request and parse a WPS process description
"""
describeProcessFinished = pyqtSignal()
def __init__(self, server, identifier):
QObject.__init__(self)
self.server = server
self.version = server.version
self.identifier = identifier
self._requestExecuted = False
self.doc = None
self.inputs = []
self.outputs = []
@staticmethod
def getBookmarks():
settingsgrp = QSettings()
settingsgrp.beginGroup("WPS-Bookmarks")
bookmarks = settingsgrp.childGroups()
processList = []
for myBookmark in bookmarks:
settings = QSettings()
mySettings = "/WPS-Bookmarks/"+myBookmark
#old redundant server properties:
#scheme = settings.value(mySettings+"/scheme").toString()
#server = settings.value(mySettings+"/server").toString()
#path = settings.value(mySettings+"/path").toString()
#port = settings.value(mySettings+"/port").toString()
#version = settings.value(mySettings+"/version").toString()
myBookmarkArray = myBookmark.split("@@")
connectionName = myBookmarkArray[0]
identifier = pystring(settings.value(mySettings+"/identifier"))
server = WpsServer.getServer(connectionName)
process = ProcessDescription(server, identifier)
processList.append(process)
settingsgrp.endGroup()
return processList
def key(self):
return self.server.connectionName+"@@"+self.identifier
def saveBookmark(self):
settings = QSettings()
mySettings = "/WPS-Bookmarks/"+self.key()
#old redundant server properties:
#settings.setValue(mySettings+"/scheme", processUrl.scheme())
#settings.setValue(mySettings+"/server", processUrl.host())
#settings.setValue(mySettings+"/path", processUrl.path())
#settings.setValue(mySettings+"/port", processUrl.port())
#settings.setValue(mySettings+"/version", processUrl.queryItemValue('version'))
settings.setValue(mySettings+"/identifier", self.identifier)
def removeBookmark(self):
settings = QSettings()
settings.beginGroup("WPS-Bookmarks")
settings.remove(self.key())
settings.endGroup()
def requestUrl(self):
url = QUrl(self.server.baseUrl)
query = QUrlQuery()
query.addQueryItem('Request', 'DescribeProcess')
query.addQueryItem('Service', 'WPS')
query.addQueryItem('Version', self.version)
query.addQueryItem('identifier', self.identifier)
url.setQuery(query)
return url
def requestDescribeProcess(self):
"""
Request process description
"""
self._requestExecuted = False
self.doc = None
self.inputs = []
self.outputs = []
url = self.requestUrl()
myHttp = QgsNetworkAccessManager.instance()
request = QNetworkRequest(url)
# add cookies in header
serverCookie = WpsServerCookie(url)
if serverCookie.checkServerCookies():
request.setRawHeader("Cookie", serverCookie.getServerCookies())
self._theReply = myHttp.get(request)
self._theReply.finished.connect(self._describeProcessFinished)
@pyqtSlot()
def _describeProcessFinished(self):
# Receive the XML process description
self.processUrl = self._theReply.url()
self.processXML = self._theReply.readAll().data()
# get the cookie information from http header
cookies = self._theReply.header(QNetworkRequest.SetCookieHeader)
serverCookie = WpsServerCookie(self.processUrl)
if cookies is not None:
QMessageBox.information(None, '', "the first time to use this server")
serverCookie.setServerCookies(cookies)
self._theReply.deleteLater()
qDebug(self.processXML)
self._parseProcessXML()
self._requestExecuted = True
self.describeProcessFinished.emit()
def processDescriptionFile(self, basePath):
return self.server.processDescriptionFolder(basePath) + "/" + self.identifier
def loadDescription(self, path):
self.processUrl = self.requestUrl()
self.processXML = open(self.processDescriptionFile(path)).read()
self._parseProcessXML()
def _parseProcessXML(self):
self.doc = QtXml.QDomDocument()
self.doc.setContent(self.processXML, True)
processDescription = self.doc.elementsByTagName("ProcessDescription")
self.processIdentifier = pystring(processDescription.at(0).toElement().elementsByTagNameNS("http://www.opengis.net/ows/1.1","Identifier").at(0).toElement().text()).strip()
self.processName = pystring(processDescription.at(0).toElement().elementsByTagNameNS("http://www.opengis.net/ows/1.1","Title").at(0).toElement().text()).strip()
self.identifier, self.title, self.abstract = getIdentifierTitleAbstractFromElement(self.doc)
self.inputs = []
self.outputs = []
self._parseProcessInputs()
self._parseProcessOutputs()
def loaded(self):
return self._requestExecuted
def saveDescription(self, basePath):
dir = self.server.processDescriptionFolder(basePath)
if not os.path.exists(dir):
os.makedirs(dir)
f = open(self.processDescriptionFile(basePath), "wb")
f.write(self.processXML)
f.close()
def _parseProcessInputs(self):
"""
Populate self.inputs and self.outputs arrays from process description
"""
self._inputsMetaInfo = {} # dictionary for input metainfo, key is the input identifier
dataInputs = self.doc.elementsByTagName("Input")
# Create the complex inputs at first
for i in range(dataInputs.size()):
f_element = dataInputs.at(i).toElement()
inputIdentifier, title, abstract = getIdentifierTitleAbstractFromElement(f_element)
minOccurs = int(f_element.attribute("minOccurs", "1"))
maxOccurs = int(f_element.attribute("maxOccurs", "1"))
# Iterate over all complex inputs and add combo boxes, text boxes or list widgets
complexData = f_element.elementsByTagName("ComplexData")
if complexData.size() > 0:
# Das i-te ComplexData Objekt auswerten
complexDataTypeElement = complexData.at(0).toElement()
supportedComplexDataFormat = getSupportedMimeTypes(complexDataTypeElement)
complexDataFormat = getDefaultMimeType(complexDataTypeElement)
# Store the input formats
self._inputsMetaInfo[inputIdentifier] = supportedComplexDataFormat
# Attach the selected vector or raster maps
if isMimeTypeVector(complexDataFormat["MimeType"]) != None:
# Since it is a vector, choose an appropriate GML version
complexDataFormat = self.getSupportedGMLDataFormat(inputIdentifier)
if complexDataFormat == None :
QMessageBox.warning(None, QApplication.translate("QgsWps", "Error"),
QApplication.translate("QgsWps", "The process '{0}' does not seem to support GML for the parameter '{1}', which is required by the QGIS WPS client.").format(self.processIdentifier, inputIdentifier))
return 0
# Vector inputs
if maxOccurs == 1:
self.inputs.append(VectorInput(inputIdentifier, title, minOccurs, complexDataFormat))
else:
self.inputs.append(MultipleVectorInput(inputIdentifier, title, minOccurs, complexDataFormat))
elif isMimeTypeText(complexDataFormat["MimeType"]) != None:
# Text inputs
self.inputs.append(TextInput(inputIdentifier, title, minOccurs, complexDataFormat))
elif isMimeTypeRaster(complexDataFormat["MimeType"]) != None:
# Raster inputs
if maxOccurs == 1:
self.inputs.append(RasterInput(inputIdentifier, title, minOccurs, complexDataFormat))
else:
self.inputs.append(MultipleRasterInput(inputIdentifier, title, minOccurs, complexDataFormat))
elif isMimeTypePlaylist(complexDataFormat["MimeType"]) != None:
# Playlist (text) inputs
self.inputs.append(TextInput(inputIdentifier, title, minOccurs, complexDataFormat))
elif isMimeTypeFile(complexDataFormat["MimeType"]) != None:
if maxOccurs == 1:
self.inputs.append(FileInput(inputIdentifier, title, minOccurs, complexDataFormat))
else:
self.inputs.append(MultipleFileInput(inputIdentifier, title, minOccurs, complexDataFormat))
else:
# We assume text inputs in case of an unknown mime type
self.inputs.append(TextInput(inputIdentifier, title, minOccurs, complexDataFormat))
# Create the literal inputs as second
for i in range(dataInputs.size()):
f_element = dataInputs.at(i).toElement()
inputIdentifier, title, abstract = getIdentifierTitleAbstractFromElement(f_element)
minOccurs = int(f_element.attribute("minOccurs", "1"))
maxOccurs = int(f_element.attribute("maxOccurs", "1"))
literalData = f_element.elementsByTagName("LiteralData")
if literalData.size() > 0:
allowedValuesElement = literalData.at(0).toElement()
aValues = getOwsElement(allowedValuesElement, "AllowedValues")
dValue = str(allowedValuesElement.elementsByTagName("DefaultValue").at(0).toElement().text())
if aValues.size() > 0:
valList = allowedValues(aValues)
if len(valList) > 0:
if len(valList[0]) > 0:
self.inputs.append(SelectionInput(inputIdentifier, title, minOccurs, valList))
else:
self.inputs.append(StringInput(inputIdentifier, title, minOccurs, str(valList)))
else:
self.inputs.append(StringInput(inputIdentifier, title, minOccurs, dValue))
# At last, create the bounding box inputs
for i in range(dataInputs.size()):
f_element = dataInputs.at(i).toElement()
inputIdentifier, title, abstract = getIdentifierTitleAbstractFromElement(f_element)
minOccurs = int(f_element.attribute("minOccurs", "1"))
maxOccurs = int(f_element.attribute("maxOccurs", "1"))
bBoxData = f_element.elementsByTagName("BoundingBoxData")
if bBoxData.size() > 0:
crsListe = []
bBoxElement = bBoxData.at(0).toElement()
defaultCrsElement = bBoxElement.elementsByTagName("Default").at(0).toElement()
defaultCrs = defaultCrsElement.elementsByTagName("CRS").at(0).toElement().attributeNS("http://www.w3.org/1999/xlink", "href")
crsListe.append(defaultCrs)
self.inputs.append(ExtentInput(inputIdentifier, title, minOccurs))
supportedCrsElements = bBoxElement.elementsByTagName("Supported")
for i in range(supportedCrsElements.size()):
crsListe.append(supportedCrsElements.at(i).toElement().elementsByTagName("CRS").at(0).toElement().attributeNS("http://www.w3.org/1999/xlink", "href"))
self.inputs.append(CrsInput(inputIdentifier, title, minOccurs, crsListe))
def _parseProcessOutputs(self):
dataOutputs = self.doc.elementsByTagName("Output")
if dataOutputs.size() < 1:
return
# Add all complex outputs
for i in range(dataOutputs.size()):
f_element = dataOutputs.at(i).toElement()
outputIdentifier, title, abstract = getIdentifierTitleAbstractFromElement(f_element)
literalOutputType = f_element.elementsByTagName("LiteralOutput")
if literalOutputType.size() != 0:
self.outputs.append(StringOutput(outputIdentifier, title))
complexOutput = f_element.elementsByTagName("ComplexOutput")
if complexOutput.size() > 0:
complexOutputTypeElement = complexOutput.at(0).toElement()
complexOutputFormat = getDefaultMimeType(complexOutputTypeElement)
supportedcomplexOutputFormat = getSupportedMimeTypes(complexOutputTypeElement)
if isMimeTypeVector(complexOutputFormat["MimeType"]) != None:
self.outputs.append(VectorOutput(outputIdentifier, title, complexOutputFormat))
else:
self.outputs.append(RasterOutput(outputIdentifier, title, complexOutputFormat))
def getServiceVersion(self):
root = self.doc.documentElement()
version = root.attribute("version")
return version
def isDataTypeSupportedByServer(self, baseMimeType, name):
# Return if the given data type is supported by the WPS server
for dataType in self._inputsMetaInfo[pystring(name)]:
if baseMimeType in dataType['MimeType']:
return True
return False
def getDataTypeInfo(self, mimeType, name):
# Return a dict with mimeType, schema and encoding for the given mimeType
for dataType in self._inputsMetaInfo[name]:
if mimeType in dataType['MimeType']:
return dataType
return None
def getSupportedGMLVersion(self, dataIdentifier):
# Return GML version, e.g., GML, GML2, GML3
if isGML3SupportedByOGR() and self.isDataTypeSupportedByServer(getBaseMimeType("GML3"), dataIdentifier):
return "GML3"
elif self.isDataTypeSupportedByServer(getBaseMimeType("GML2"), dataIdentifier):
return "GML2"
elif self.isDataTypeSupportedByServer(getBaseMimeType("GML"), dataIdentifier):
return "GML"
else:
return ""
def getSupportedGMLDataFormat(self, dataIdentifier):
# Return mimeType, schema and encoding for the supported GML version
supportedGML = self.getSupportedGMLVersion(dataIdentifier)
if supportedGML != "":
return self.getDataTypeInfo(getBaseMimeType(supportedGML), dataIdentifier)
else:
return None
|
sourcepole/qgis-wps-client
|
wpslib/processdescription.py
|
Python
|
gpl-2.0
| 27,991
|
[
"NetCDF"
] |
fc47d98d39e70d5329e6c320e61bbdb78eaf3513881d1c238b93d3c1afe4df48
|
# -*- coding: utf-8 -*-
################################################################################
###
### ReadLAMMPSDump - v0.1.8 - May 03, 2018
###
################################################################################
###
### a package to read LAMMPS Dump files
### (it assumes that the data column names and the number of atoms do not change)
###
################################################################################
## example:
## import read_lammps_dump as rd
## data = rd.LAMMPS_Dump(filename)
##
import numpy as np
from time import time
from sportran.utils import log
def is_string(string):
try:
float(string)
except ValueError:
return True
return False
def is_vector_variable(string):
bracket = string.rfind('[')
if (bracket == -1):
bracket = 0
return bracket
def file_length(filename):
i = -1
with open(filename) as f:
for i, l in enumerate(f, 1):
pass
return i
def get_volume(filename):
f = open(filename, 'r')
line = f.readline()
while (line):
if 'BOX BOUNDS' in line:
xlo, xhi = list(map(float, f.readline().split()))
ylo, yhi = list(map(float, f.readline().split()))
zlo, zhi = list(map(float, f.readline().split()))
break
line = f.readline()
f.close()
volume = (xhi - xlo) * (yhi - ylo) * (zhi - zlo)
return volume
def get_natoms(filename):
f = open(filename, 'r')
line = f.readline()
while (line):
if 'NUMBER OF ATOMS' in line:
natoms = int(f.readline())
break
line = f.readline()
f.close()
return natoms
class LAMMPS_Dump(object):
"""
A LAMMPS_Dump file that can be read in blocks.
example:
traj = LAMMPS_Dump(filename, preload=False) -->> do not preload list of steps (suggested if the file is big)
traj.read_timesteps(10, start_step=0, select_ckeys=['id,xu,yu,vu']) -->> Read first 10 timesteps, only the specified columns
traj.read_timesteps(10, select_ckeys=['id,xu,yu,vu']) -->> Read the next 10 timesteps, only the specified columns (DELTA_TIMESTEP is assumed)
traj.read_timesteps((10,30)) -->> Read from TIMESTEP 10 to 30
traj.read_timesteps((10,30,2)) -->> Read every 2 steps from TIMESTEP 10 to 30
print(traj.data)
"""
def __init__(self, *args, **kwargs):
#*******
if (len(args) > 0):
self.filename = args[0]
if (len(args) == 2):
self.select_ckeys = args[1]
else:
self.select_ckeys = None
else:
raise ValueError('No file given.')
group_vectors = kwargs.get('group_vectors', True)
preload_timesteps = kwargs.get('preload', True)
self._quiet = kwargs.get('quiet', False)
self._GUI = kwargs.get('GUI', False)
if self._GUI:
from ipywidgets import FloatProgress
from IPython.display import display
global FloatProgress, display
self._open_file()
self._read_ckeys(group_vectors, preload_timesteps)
self.ckey = None
#self.MAX_NSTEPS = data_length(self.filename)
#log.write_log("Data length = ", self.MAX_NSTEPS)
return
def __repr__(self):
msg = 'LAMMPS_Dump:\n' + \
' filename: {}\n'.format(self.filename) + \
' all_ckeys: {}\n'.format(self.all_ckeys) + \
' select_ckeys: {}\n'.format(self.select_ckeys) + \
' used ckey: {}\n'.format(self.ckey) + \
' all_timesteps: {}\n'.format(self.all_timesteps) + \
' select_timesteps: {}\n'.format(self.select_timesteps) + \
' used timesteps: {}\n'.format(self.timestep) + \
' start pos: {}\n'.format(self._start_byte) + \
' current pos: {}\n'.format(self.file.tell()) + \
' FIRST TIMESTEP: {}\n'.format(self.FIRST_TIMESTEP) + \
' LAST TIMESTEP: {}\n'.format(self.LAST_TIMESTEP) + \
' DELTA TIMESTEP: {}\n'.format(self.DELTA_TIMESTEP) + \
' current step: {}\n'.format(self.current_timestep)
return msg
def _open_file(self):
"""Open the file."""
try:
self.file = open(self.filename, 'r')
except:
raise ValueError('File does not exist.')
return
def _read_ckeys(self, group_vectors=True, preload_timesteps=True):
"""Read the column keys. If group_vectors=True the vector ckeys are grouped togheter"""
self._start_byte = self.file.tell()
self.all_ckeys = {}
self.all_timesteps = []
self.preload_timesteps = preload_timesteps
while True:
line = self.file.readline()
if len(line) == 0: # EOF
raise RuntimeError('Reached EOF, no ckeys found.')
values = np.array(line.split())
if (values[0] == 'ITEM:'):
if (values[1] == 'TIMESTEP'):
self.current_timestep = int(self.file.readline())
self.FIRST_TIMESTEP = self.current_timestep
self.all_timesteps.append(self.current_timestep)
# facoltativo:
elif ((values[1] == 'NUMBER') and values[2] == 'OF' and values[3] == 'ATOMS'):
self.NATOMS = int(self.file.readline())
elif ((values[1] == 'BOX') and values[2] == 'BOUNDS'):
self.BOX_BOUNDS_TYPE = values[3:6]
xbox = self.file.readline().split()
ybox = self.file.readline().split()
zbox = self.file.readline().split()
self.BOX_BOUNDS = np.array([xbox, ybox, zbox], dtype='float')
elif (values[1] == 'ATOMS'):
for i in range(2, len(values)):
if group_vectors:
bracket = is_vector_variable(values[i]) # get position of left square bracket
else:
bracket = 0
if (bracket == 0): # the variable is a scalar
key = values[i]
if (key[:2] == 'c_'): # remove 'c_' if present
key = key[2:]
self.all_ckeys[key] = [i - 2] # -2 offset
else: # the variable is a vector
key = values[i][:bracket] # name of vector
if (key[:2] == 'c_'): # remove 'c_' if present
key = key[2:]
vecidx = int(values[i][bracket + 1:-1]) # current index
if key in self.all_ckeys: # if this vector is already defined, add this component
if (vecidx > self.all_ckeys[key].size):
self.ckeys[key] = np.resize(self.all_ckeys[key], vecidx)
self.all_ckeys[key][vecidx - 1] = i - 2 # -2 offset!
else: # if it is not, define a vector
self.all_ckeys[key] = np.array([0] * vecidx)
self.all_ckeys[key][-1] = i - 2 # -2 offset!
#self._start_byte = self.file.tell()
break
#else:
# self.header += line
if self.preload_timesteps:
# get the list of time steps
while True:
line = self.file.readline()
if len(line) == 0: # EOF
break
if (line == 'ITEM: TIMESTEP\n'):
self.current_timestep = int(self.file.readline())
self.all_timesteps.append(self.current_timestep)
self.LAST_TIMESTEP = self.all_timesteps[-1]
self.DELTA_TIMESTEP = self.all_timesteps[1] - self.FIRST_TIMESTEP
self.TOT_TIMESTEPS = len(self.all_timesteps)
self.all_timesteps = np.array(self.all_timesteps)
else:
log.write_log(' ** No timesteps pre-loaded. Be careful in the selection. **')
# get the first 2 timesteps
while (len(self.all_timesteps) < 2):
line = self.file.readline()
if len(line) == 0: # EOF
break
if (line == 'ITEM: TIMESTEP\n'):
self.current_timestep = int(self.file.readline())
self.all_timesteps.append(self.current_timestep)
self.LAST_TIMESTEP = None
self.DELTA_TIMESTEP = self.all_timesteps[1] - self.FIRST_TIMESTEP
self.TOT_TIMESTEPS = None
self.all_timesteps = None
# go back to the first timestep
self.gototimestep(0) # compute_first = True
self._start_byte = 0
log.write_log(' all_ckeys = ', self.all_ckeys)
log.write_log(' TOT_TIMESTEPS = ', self.TOT_TIMESTEPS)
log.write_log(' FIRST_TIMESTEP = ', self.FIRST_TIMESTEP)
log.write_log(' DELTA_TIMESTEP = ', self.DELTA_TIMESTEP)
log.write_log(' LAST_TIMESTEP = ', self.LAST_TIMESTEP)
log.write_log(' all_timesteps = ', self.all_timesteps)
return
def _set_ckey(self, select_ckeys=None):
"""
Set the ckeys to read from the selected, checking the available ones.
If select_ckeys is not passed, then use the already selected ones, or all the available ones if no selection
was previously made.
"""
if select_ckeys is not None:
self.select_ckeys = select_ckeys
self.ckey = {}
if self.select_ckeys is None: # take all ckeys
self.ckey = self.all_ckeys
else:
for key in self.select_ckeys: # take only the selected ckeys
value = self.all_ckeys.get(key, None)
if value is not None:
self.ckey[key] = value[:] # copy all indexes (up to max dimension for vectors)
else:
log.write_log('Warning: ', key, 'key not found.')
if (len(self.ckey) == 0):
raise KeyError('No ckey set. Check selected keys.')
else:
if not self._quiet:
log.write_log(' ckey = ', self.ckey)
return
def _set_timesteps(self, selection, start_step=-1):
"""Set the timesteps to read from the selected, checking the available ones.
INPUT: N --> Read the next N steps (DELTA_TIMESTEP is assumed)
N, start_step=30 --> Read N steps from the TIMESTEP 30
if compute_first=True, read the current step as well
(10,30) --> Read from TIMESTEP 10 to 30
(10,30,2) --> Read every 2 steps from TIMESTEP 10 to 30"""
if (start_step == -1):
if self._compute_current_step:
start_step = self.current_timestep
else:
start_step = self.current_timestep + self.DELTA_TIMESTEP
elif (start_step == 0):
start_step = self.FIRST_TIMESTEP
if np.isscalar(selection) or (len(selection) == 1): # select N steps from start one
first = start_step
last = self.DELTA_TIMESTEP * selection + start_step
step = None
elif (len(selection) == 2):
first = selection[0]
last = selection[1]
step = None
elif (len(selection) == 3):
first = selection[0]
last = selection[1]
step = selection[2]
if step is None:
step = self.DELTA_TIMESTEP
elif (step % self.DELTA_TIMESTEP != 0):
log.write_log('Warning: step is not a multiple of the detected DELTA_TIMESTEP. You may get errors.')
if (first % step != 0):
first += step - first % step # round first step to the next in the list
self.timestep = []
self.select_timesteps = np.arange(first, last, step) # selected timesteps
if self.preload_timesteps:
for step in self.select_timesteps:
if step in self.all_timesteps:
self.timestep.append(step) # make list of available selected-timesteps
else:
log.write_log('Warning: timestep # {:d} not found.'.format(step))
else:
self.timestep = self.select_timesteps # use all the selected (be careful)
self.nsteps = len(self.timestep) # number of available steps
if (self.nsteps == 0):
raise ValueError('No timestep set. Check selected timesteps.')
else:
if not self._quiet:
log.write_log(' nsteps = ', self.nsteps)
log.write_log(' timestep = ', self.timestep)
return
def _initialize_dic(self):
"""Initialize the data dictionary once the ckeys and timesteps have been set."""
if self.ckey is None:
raise ValueError('ckey not set.')
if self.timestep is None:
raise ValueError('timestep not set.')
self.data = [dict() for i in range(self.nsteps)]
for istep in range(self.nsteps):
for key, idx in self.ckey.items():
if (key == 'element'): # this should be improved
self.data[istep][key] = np.zeros((self.NATOMS, len(idx)), dtype='S8')
else:
self.data[istep][key] = np.zeros((self.NATOMS, len(idx)), dtype='float64')
return
def _gototimestep(self, start_step, fast_check=True):
"""
Go to the start_step-th line in the time series (assumes step=1).
start_step = -1 --> ignore, continue from current step
0 --> go to FIRST timestep
N --> go to N-th timestep
fast_check = True --> assumes the TIMESTEP are a monotonously increasing.
If the the start_step is passed and not found then stop.
"""
if (start_step >= 0):
if (start_step <= self.current_timestep):
# or (self.current_timestep == -1): # if start_step is before/equal the current step
self.file.seek(self._start_byte) # --> start over
if (start_step == 0): # or (self.current_timestep == -1):
goto_step = self.FIRST_TIMESTEP
else:
goto_step = start_step
# search until start_step is found ***** MAY BE IMPROVED KNOWING THE N OF LINES TO SKIP ******
while True:
line = self.file.readline()
if len(line) == 0: # EOF
raise EOFError('Warning (gototimestep): reached EOF. Timestep {} NOT FOUND.'.format(goto_step))
if (line == 'ITEM: TIMESTEP\n'):
self.current_timestep = int(self.file.readline())
if (self.current_timestep == goto_step):
while (self.file.readline().find('ITEM: ATOMS') < 0): # jump to the data part
pass
break
if (fast_check) and (self.current_timestep > goto_step):
raise Warning(
'Warning (gototimestep): Timestep {} NOT FOUND up to current_step = {}. (To force check the whole trajectory set fast_check=False)'
.format(goto_step, self.current_timestep))
else:
pass
return
def gototimestep(self, start_step, fast_check=True):
"""
Go to the start_step-th line in the time series (assumes step=1).
start_step = -1 --> ignore, continue from current step
0 --> go to FIRST timestep
N --> go to N-th timestep
fast_check = True --> assumes the TIMESTEP are a monotonously increasing.
If the the start_step is passed and not found then stop.
"""
## user-called function
self._compute_current_step = True
self._gototimestep(start_step, fast_check)
return
def read_timesteps(self, selection, start_step=-1, select_ckeys=None, fast_check=True):
"""
Read selected keys of file, within the provided range.
Examples:
read_timesteps(10, start_step=0, select_ckeys=['id,xu,yu,vu']) -->> Read first 10 timesteps, only the specified columns
read_timesteps(10, select_ckeys=['id,xu,yu,vu']) -->> Read the next 10 timesteps, only the specified columns (DELTA_TIMESTEP is assumed)
read_timesteps((10,30)) -->> Read from TIMESTEP 10 to 30
read_timesteps((10,30,2)) -->> Read every 2 steps from TIMESTEP 10 to 30
"""
if self._GUI:
progbar = FloatProgress(min=0, max=100)
display(progbar)
start_time = time()
self._set_ckey(select_ckeys) # set the ckeys to read --> ckey
self._set_timesteps(selection, start_step) # set the timesteps to read --> timestep
self._initialize_dic() # allocate dictionary --> data
# extract the steps from the file
progbar_step = max(1000, int(0.005 * self.nsteps))
atomid_col = self.all_ckeys['id'][0]
for istep, step in enumerate(self.timestep):
self._gototimestep(step, fast_check) # jump to the desired step,
self.data[istep]['TIMESTEP'] = step
for nat in range(self.NATOMS): # read data (may be unsorted)
line = self.file.readline()
if len(line) == 0: # EOF
raise EOFError('Warning: reached EOF.')
values = np.array(line.split())
for key, idx in self.ckey.items(): # save the selected columns
atomid = int(values[atomid_col]) - 1 # current atom index (in LAMMPS it starts from 1)
if (key == 'element'): # this should be improved
self.data[istep][key][atomid, :] = np.array(list(map(str, values[idx])))
else:
self.data[istep][key][atomid, :] = np.array(list(map(float, values[idx])))
if ((istep + 1) % progbar_step == 0):
if self._GUI:
progbar.value = float(istep + 1) / self.nsteps * 100.
progbar.description = '%g %%' % progbar.value
else:
log.write_log(' step = {:9d} - {:6.2f}% completed'.format(istep + 1,
float(istep + 1) / self.nsteps * 100.))
if self._GUI:
progbar.close()
# check number of steps read, keep an even number of steps
if (istep + 1 < self.nsteps): # (should never happen)
if (istep == 0):
log.write_log('WARNING: no step read.')
return
else:
log.write_log('Warning: less steps read.')
self.nsteps = istep + 1
if not self._quiet:
log.write_log(' ( %d ) steps read.' % (self.nsteps))
log.write_log('DONE. Elapsed time: ', time() - start_time, 'seconds')
self._compute_current_step = False # next time do not compute the current_step
return self.data
|
lorisercole/thermocepstrum
|
sportran/i_o/read_lammps_dump.py
|
Python
|
gpl-3.0
| 19,676
|
[
"LAMMPS"
] |
c4c61551e68fece25b1ac6e1bb9b95ca8174b5b7d0b52646751850b1b890e66f
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import time
# !! This is the configuration of Nikola. !! #
# !! You should edit it to your liking. !! #
# ! Some settings can be different in different languages.
# ! A comment stating (translatable) is used to denote those.
# ! There are two ways to specify a translatable setting:
# ! (a) BLOG_TITLE = "My Blog"
# ! (b) BLOG_TITLE = {"en": "My Blog", "es": "Mi Blog"}
# ! Option (a) is used when you don't want that setting translated.
# ! Option (b) is used for settings that are different in different languages.
# Data about this site
BLOG_AUTHOR = "" # (translatable)
BLOG_TITLE = "Remy DeCausemaker at Opensource.com" # (translatable)
# This is the main URL for your site. It will be used
# in a prominent link
SITE_URL = "http://opensource.com/user_articles/10833"
# This is the URL where Nikola's output will be deployed.
# If not set, defaults to SITE_URL
# BASE_URL = "http://opensource.com/user_articles/10833"
BLOG_EMAIL = ""
BLOG_DESCRIPTION = "" # (translatable)
# Nikola is multilingual!
#
# Currently supported languages are:
#
# en English
# ar Arabic
# bg Bulgarian
# ca Catalan
# cs Czech [ALTERNATIVELY cz]
# da Danish
# de German
# el Greek [NOT gr]
# eo Esperanto
# es Spanish
# et Estonian
# eu Basque
# fa Persian
# fi Finnish
# fr French
# hi Hindi
# hr Croatian
# id Indonesian
# it Italian
# ja Japanese [NOT jp]
# ko Korean
# nb Norwegian Bokmål
# nl Dutch
# pl Polish
# pt_br Portuguese (Brasil)
# ru Russian
# sk Slovak
# sl Slovene
# sr Serbian (Cyrillic)
# sv Swedish
# tr Turkish [NOT tr_TR]
# ur Urdu
# zh_cn Chinese (Simplified)
#
# If you want to use Nikola with a non-supported language you have to provide
# a module containing the necessary translations
# (cf. the modules at nikola/data/themes/base/messages/).
# If a specific post is not translated to a language, then the version
# in the default language will be shown instead.
# What is the default language?
DEFAULT_LANG = "en"
# What other languages do you have?
# The format is {"translationcode" : "path/to/translation" }
# the path will be used as a prefix for the generated pages location
TRANSLATIONS = {
DEFAULT_LANG: "",
# Example for another language:
# "es": "./es",
}
# What will translated input files be named like?
# If you have a page something.rst, then something.pl.rst will be considered
# its Polish translation.
# (in the above example: path == "something", ext == "rst", lang == "pl")
# this pattern is also used for metadata:
# something.meta -> something.pl.meta
TRANSLATIONS_PATTERN = "{path}.{lang}.{ext}"
# Links for the sidebar / navigation bar. (translatable)
# This is a dict. The keys are languages, and values are tuples.
#
# For regular links:
# ('http://getnikola.com/', 'Nikola Homepage')
#
# For submenus:
# (
# (
# ('http://apple.com/', 'Apple'),
# ('http://orange.com/', 'Orange'),
# ),
# 'Fruits'
# )
#
# WARNING: Support for submenus is theme-dependent.
# Only one level of submenus is supported.
# WARNING: Some themes, including the default Bootstrap 3 theme,
# may present issues if the menu is too large.
# (in bootstrap3, the navbar can grow too large and cover contents.)
# WARNING: If you link to directories, make sure to follow
# ``STRIP_INDEXES``. If it’s set to ``True``, end your links
# with a ``/``, otherwise end them with ``/index.html`` — or
# else they won’t be highlighted when active.
NAVIGATION_LINKS = {
DEFAULT_LANG: (
("/archive.html", "Archives"),
("/categories/index.html", "Tags"),
("/rss.xml", "RSS feed"),
),
}
# Name of the theme to use.
THEME = "bootstrap3"
# Below this point, everything is optional
# Post's dates are considered in UTC by default, if you want to use
# another time zone, please set TIMEZONE to match. Check the available
# list from Wikipedia:
# http://en.wikipedia.org/wiki/List_of_tz_database_time_zones
# (e.g. 'Europe/Zurich')
# Also, if you want to use a different time zone in some of your posts,
# you can use the ISO 8601/RFC 3339 format (ex. 2012-03-30T23:00:00+02:00)
TIMEZONE = "UTC"
# If you want to use ISO 8601 (also valid RFC 3339) throughout Nikola
# (especially in new_post), set this to True.
# Note that this does not affect DATE_FORMAT.
# FORCE_ISO8601 = False
# Date format used to display post dates.
# (str used by datetime.datetime.strftime)
# DATE_FORMAT = '%Y-%m-%d %H:%M'
# Date format used to display post dates, if local dates are used.
# (str used by moment.js)
# JS_DATE_FORMAT = 'YYYY-MM-DD HH:mm'
# Date fanciness.
#
# 0 = using DATE_FORMAT and TIMEZONE
# 1 = using JS_DATE_FORMAT and local user time (via moment.js)
# 2 = using a string like “2 days ago”
#
# Your theme must support it, bootstrap and bootstrap3 already do.
# DATE_FANCINESS = 0
# While Nikola can select a sensible locale for each language,
# sometimes explicit control can come handy.
# In this file we express locales in the string form that
# python's locales will accept in your OS, by example
# "en_US.utf8" in Unix-like OS, "English_United States" in Windows.
# LOCALES = dict mapping language --> explicit locale for the languages
# in TRANSLATIONS. You can omit one or more keys.
# LOCALE_FALLBACK = locale to use when an explicit locale is unavailable
# LOCALE_DEFAULT = locale to use for languages not mentioned in LOCALES; if
# not set the default Nikola mapping is used.
# POSTS and PAGES contains (wildcard, destination, template) tuples.
#
# The wildcard is used to generate a list of reSt source files
# (whatever/thing.txt).
#
# That fragment could have an associated metadata file (whatever/thing.meta),
# and optionally translated files (example for Spanish, with code "es"):
# whatever/thing.es.txt and whatever/thing.es.meta
#
# This assumes you use the default TRANSLATIONS_PATTERN.
#
# From those files, a set of HTML fragment files will be generated:
# cache/whatever/thing.html (and maybe cache/whatever/thing.html.es)
#
# These files are combined with the template to produce rendered
# pages, which will be placed at
# output / TRANSLATIONS[lang] / destination / pagename.html
#
# where "pagename" is the "slug" specified in the metadata file.
#
# The difference between POSTS and PAGES is that POSTS are added
# to feeds and are considered part of a blog, while PAGES are
# just independent HTML pages.
#
POSTS = (
("posts/*.html", "posts", "post.tmpl"),
)
PAGES = (
("stories/*.html", "stories", "story.tmpl"),
)
# One or more folders containing files to be copied as-is into the output.
# The format is a dictionary of {source: relative destination}.
# Default is:
# FILES_FOLDERS = {'files': ''}
# Which means copy 'files' into 'output'
# One or more folders containing listings to be processed and stored into
# the output. The format is a dictionary of {source: relative destination}.
# Default is:
# LISTINGS_FOLDERS = {'listings': 'listings'}
# Which means process listings from 'listings' into 'output/listings'
# A mapping of languages to file-extensions that represent that language.
# Feel free to add or delete extensions to any list, but don't add any new
# compilers unless you write the interface for it yourself.
#
# 'rest' is reStructuredText
# 'markdown' is MarkDown
# 'html' assumes the file is HTML and just copies it
COMPILERS = {
"rest": ('.txt', '.rst'),
"markdown": ('.md', '.mdown', '.markdown', '.wp'),
"html": ('.html', '.htm')
}
# Create by default posts in one file format?
# Set to False for two-file posts, with separate metadata.
# ONE_FILE_POSTS = True
# If this is set to True, the DEFAULT_LANG version will be displayed for
# untranslated posts.
# If this is set to False, then posts that are not translated to a language
# LANG will not be visible at all in the pages in that language.
# Formerly known as HIDE_UNTRANSLATED_POSTS (inverse)
# SHOW_UNTRANSLATED_POSTS = True
# Nikola supports logo display. If you have one, you can put the URL here.
# Final output is <img src="LOGO_URL" id="logo" alt="BLOG_TITLE">.
# The URL may be relative to the site root.
# LOGO_URL = ''
# If you want to hide the title of your website (for example, if your logo
# already contains the text), set this to False.
# SHOW_BLOG_TITLE = True
# Writes tag cloud data in form of tag_cloud_data.json.
# Warning: this option will change its default value to False in v8!
WRITE_TAG_CLOUD = True
# Paths for different autogenerated bits. These are combined with the
# translation paths.
# Final locations are:
# output / TRANSLATION[lang] / TAG_PATH / index.html (list of tags)
# output / TRANSLATION[lang] / TAG_PATH / tag.html (list of posts for a tag)
# output / TRANSLATION[lang] / TAG_PATH / tag.xml (RSS feed for a tag)
# TAG_PATH = "categories"
# If TAG_PAGES_ARE_INDEXES is set to True, each tag's page will contain
# the posts themselves. If set to False, it will be just a list of links.
# TAG_PAGES_ARE_INDEXES = False
# Set descriptions for tag pages to make them more interesting. The
# default is no description. The value is used in the meta description
# and displayed underneath the tag list or index page’s title.
# TAG_PAGES_DESCRIPTIONS = {
# DEFAULT_LANG: {
# "blogging": "Meta-blog posts about blogging about blogging.",
# "open source": "My contributions to my many, varied, ever-changing, and eternal libre software projects."
# },
#}
# Only include tags on the tag list/overview page if there are at least
# TAGLIST_MINIMUM_POSTS number of posts or more with every tag. Every tag
# page is still generated, linked from posts, and included in the sitemap.
# However, more obscure tags can be hidden from the tag index page.
# TAGLIST_MINIMUM_POSTS = 1
# Final locations are:
# output / TRANSLATION[lang] / CATEGORY_PATH / index.html (list of categories)
# output / TRANSLATION[lang] / CATEGORY_PATH / CATEGORY_PREFIX category.html (list of posts for a category)
# output / TRANSLATION[lang] / CATEGORY_PATH / CATEGORY_PREFIX category.xml (RSS feed for a category)
# CATEGORY_PATH = "categories"
# CATEGORY_PREFIX = "cat_"
# If CATEGORY_PAGES_ARE_INDEXES is set to True, each category's page will contain
# the posts themselves. If set to False, it will be just a list of links.
# CATEGORY_PAGES_ARE_INDEXES = False
# Set descriptions for category pages to make them more interesting. The
# default is no description. The value is used in the meta description
# and displayed underneath the category list or index page’s title.
# CATEGORY_PAGES_DESCRIPTIONS = {
# DEFAULT_LANG: {
# "blogging": "Meta-blog posts about blogging about blogging.",
# "open source": "My contributions to my many, varied, ever-changing, and eternal libre software projects."
# },
#}
# Final location for the main blog page and sibling paginated pages is
# output / TRANSLATION[lang] / INDEX_PATH / index-*.html
# INDEX_PATH = ""
# Create per-month archives instead of per-year
# CREATE_MONTHLY_ARCHIVE = False
# Create one large archive instead of per-year
# CREATE_SINGLE_ARCHIVE = False
# Create year, month, and day archives each with a (long) list of posts
# (overrides both CREATE_MONTHLY_ARCHIVE and CREATE_SINGLE_ARCHIVE)
# CREATE_FULL_ARCHIVES = False
# If monthly archives or full archives are created, adds also one archive per day
# CREATE_DAILY_ARCHIVE = False
# Final locations for the archives are:
# output / TRANSLATION[lang] / ARCHIVE_PATH / ARCHIVE_FILENAME
# output / TRANSLATION[lang] / ARCHIVE_PATH / YEAR / index.html
# output / TRANSLATION[lang] / ARCHIVE_PATH / YEAR / MONTH / index.html
# output / TRANSLATION[lang] / ARCHIVE_PATH / YEAR / MONTH / DAY / index.html
# ARCHIVE_PATH = ""
# ARCHIVE_FILENAME = "archive.html"
# If ARCHIVES_ARE_INDEXES is set to True, each archive page which contains a list
# of posts will contain the posts themselves. If set to False, it will be just a
# list of links.
# ARCHIVES_ARE_INDEXES = False
# URLs to other posts/pages can take 3 forms:
# rel_path: a relative URL to the current page/post (default)
# full_path: a URL with the full path from the root
# absolute: a complete URL (that includes the SITE_URL)
# URL_TYPE = 'rel_path'
# Final location for the blog main RSS feed is:
# output / TRANSLATION[lang] / RSS_PATH / rss.xml
# RSS_PATH = ""
# Number of posts in RSS feeds
# FEED_LENGTH = 10
# Slug the Tag URL easier for users to type, special characters are
# often removed or replaced as well.
# SLUG_TAG_PATH = True
# A list of redirection tuples, [("foo/from.html", "/bar/to.html")].
#
# A HTML file will be created in output/foo/from.html that redirects
# to the "/bar/to.html" URL. notice that the "from" side MUST be a
# relative URL.
#
# If you don't need any of these, just set to []
REDIRECTIONS = []
# Presets of commands to execute to deploy. Can be anything, for
# example, you may use rsync:
# "rsync -rav --delete output/ joe@my.site:/srv/www/site"
# And then do a backup, or run `nikola ping` from the `ping`
# plugin (`nikola plugin -i ping`). Or run `nikola check -l`.
# You may also want to use github_deploy (see below).
# You can define multiple presets and specify them as arguments
# to `nikola deploy`. If no arguments are specified, a preset
# named `default` will be executed. You can use as many presets
# in a `nikola deploy` command as you like.
# DEPLOY_COMMANDS = {
# 'default': [
# "rsync -rav --delete output/ joe@my.site:/srv/www/site",
# ]
# }
DEPLOY_COMMANDS = {
'default': [
"rsync -rav output/ ../../blog/",
]
}
# For user.github.io OR organization.github.io pages, the DEPLOY branch
# MUST be 'master', and 'gh-pages' for other repositories.
# GITHUB_SOURCE_BRANCH = 'master'
# GITHUB_DEPLOY_BRANCH = 'gh-pages'
# The name of the remote where you wish to push to, using github_deploy.
# GITHUB_REMOTE_NAME = 'origin'
# Where the output site should be located
# If you don't use an absolute path, it will be considered as relative
# to the location of conf.py
# OUTPUT_FOLDER = 'output'
# where the "cache" of partial generated content should be located
# default: 'cache'
# CACHE_FOLDER = 'cache'
# Filters to apply to the output.
# A directory where the keys are either: a file extensions, or
# a tuple of file extensions.
#
# And the value is a list of commands to be applied in order.
#
# Each command must be either:
#
# A string containing a '%s' which will
# be replaced with a filename. The command *must* produce output
# in place.
#
# Or:
#
# A python callable, which will be called with the filename as
# argument.
#
# By default, only .php files uses filters to inject PHP into
# Nikola’s templates. All other filters must be enabled through FILTERS.
#
# Many filters are shipped with Nikola. A list is available in the manual:
# <http://getnikola.com/handbook.html#post-processing-filters>
#
# from nikola import filters
# FILTERS = {
# ".html": [filters.typogrify],
# ".js": [filters.closure_compiler],
# ".jpg": ["jpegoptim --strip-all -m75 -v %s"],
# }
# Expert setting! Create a gzipped copy of each generated file. Cheap server-
# side optimization for very high traffic sites or low memory servers.
# GZIP_FILES = False
# File extensions that will be compressed
# GZIP_EXTENSIONS = ('.txt', '.htm', '.html', '.css', '.js', '.json', '.xml')
# Use an external gzip command? None means no.
# Example: GZIP_COMMAND = "pigz -k {filename}"
# GZIP_COMMAND = None
# Make sure the server does not return a "Accept-Ranges: bytes" header for
# files compressed by this option! OR make sure that a ranged request does not
# return partial content of another representation for these resources. Do not
# use this feature if you do not understand what this means.
# Compiler to process LESS files.
# LESS_COMPILER = 'lessc'
# A list of options to pass to the LESS compiler.
# Final command is: LESS_COMPILER LESS_OPTIONS file.less
# LESS_OPTIONS = []
# Compiler to process Sass files.
# SASS_COMPILER = 'sass'
# A list of options to pass to the Sass compiler.
# Final command is: SASS_COMPILER SASS_OPTIONS file.s(a|c)ss
# SASS_OPTIONS = []
# #############################################################################
# Image Gallery Options
# #############################################################################
# One or more folders containing galleries. The format is a dictionary of
# {"source": "relative_destination"}, where galleries are looked for in
# "source/" and the results will be located in
# "OUTPUT_PATH/relative_destination/gallery_name"
# Default is:
# GALLERY_FOLDERS = {"galleries": "galleries"}
# More gallery options:
# THUMBNAIL_SIZE = 180
# MAX_IMAGE_SIZE = 1280
# USE_FILENAME_AS_TITLE = True
# EXTRA_IMAGE_EXTENSIONS = []
#
# If set to False, it will sort by filename instead. Defaults to True
# GALLERY_SORT_BY_DATE = True
#
# Folders containing images to be used in normal posts or
# pages. Images will be scaled down according to IMAGE_THUMBNAIL_SIZE
# and MAX_IMAGE_SIZE options, but will have to be referenced manually
# to be visible on the site. The format is a dictionary of {source:
# relative destination}.
#
# IMAGE_FOLDERS = {'images': ''}
# IMAGE_THUMBNAIL_SIZE = 400
# #############################################################################
# HTML fragments and diverse things that are used by the templates
# #############################################################################
# Data about post-per-page indexes.
# INDEXES_PAGES defaults to ' old posts, page %d' or ' page %d' (translated),
# depending on the value of INDEXES_PAGES_MAIN.
#
# (translatable) If the following is empty, defaults to BLOG_TITLE:
# INDEXES_TITLE = ""
#
# (translatable) If the following is empty, defaults to ' [old posts,] page %d' (see above):
# INDEXES_PAGES = ""
#
# If the following is True, INDEXES_PAGES is also displayed on the main (the
# newest) index page (index.html):
# INDEXES_PAGES_MAIN = False
#
# If the following is True, index-1.html has the oldest posts, index-2.html the
# second-oldest posts, etc., and index.html has the newest posts. This ensures
# that all posts on index-x.html will forever stay on that page, now matter how
# many new posts are added.
# If False, index-1.html has the second-newest posts, index-2.html the third-newest,
# and index-n.html the oldest posts. When this is active, old posts can be moved
# to other index pages when new posts are added.
# INDEXES_STATIC = True
#
# (translatable) If PRETTY_URLS is set to True, this setting will be used to create
# more pretty URLs for index pages, such as page/2/index.html instead of index-2.html.
# Valid values for this settings are:
# * False,
# * a list or tuple, specifying the path to be generated,
# * a dictionary mapping languages to lists or tuples.
# Every list or tuple must consist of strings which are used to combine the path;
# for example:
# ['page', '{number}', '{index_file}']
# The replacements
# {number} --> (logical) page number;
# {old_number} --> the page number inserted into index-n.html before (zero for
# the main page);
# {index_file} --> value of option INDEX_FILE
# are made.
# Note that in case INDEXES_PAGES_MAIN is set to True, a redirection will be created
# for the full URL with the page number of the main page to the normal (shorter) main
# page URL.
# INDEXES_PRETTY_PAGE_URL = False
# Color scheme to be used for code blocks. If your theme provides
# "assets/css/code.css" this is ignored.
# Can be any of autumn borland bw colorful default emacs friendly fruity manni
# monokai murphy native pastie perldoc rrt tango trac vim vs
# CODE_COLOR_SCHEME = 'default'
# If you use 'site-reveal' theme you can select several subthemes
# THEME_REVEAL_CONFIG_SUBTHEME = 'sky'
# You can also use: beige/serif/simple/night/default
# Again, if you use 'site-reveal' theme you can select several transitions
# between the slides
# THEME_REVEAL_CONFIG_TRANSITION = 'cube'
# You can also use: page/concave/linear/none/default
# FAVICONS contains (name, file, size) tuples.
# Used for create favicon link like this:
# <link rel="name" href="file" sizes="size"/>
# FAVICONS = {
# ("icon", "/favicon.ico", "16x16"),
# ("icon", "/icon_128x128.png", "128x128"),
# }
# Show only teasers in the index pages? Defaults to False.
# INDEX_TEASERS = False
# HTML fragments with the Read more... links.
# The following tags exist and are replaced for you:
# {link} A link to the full post page.
# {read_more} The string “Read more” in the current language.
# {reading_time} An estimate of how long it will take to read the post.
# {remaining_reading_time} An estimate of how long it will take to read the post, sans the teaser.
# {min_remaining_read} The string “{remaining_reading_time} min remaining to read” in the current language.
# {paragraph_count} The amount of paragraphs in the post.
# {remaining_paragraph_count} The amount of paragraphs in the post, sans the teaser.
# {{ A literal { (U+007B LEFT CURLY BRACKET)
# }} A literal } (U+007D RIGHT CURLY BRACKET)
# 'Read more...' for the index page, if INDEX_TEASERS is True (translatable)
INDEX_READ_MORE_LINK = '<p class="more"><a href="{link}">{read_more}…</a></p>'
# 'Read more...' for the RSS_FEED, if RSS_TEASERS is True (translatable)
RSS_READ_MORE_LINK = '<p><a href="{link}">{read_more}…</a> ({min_remaining_read})</p>'
# Append a URL query to the RSS_READ_MORE_LINK and the //rss/item/link in
# RSS feeds. Minimum example for Piwik "pk_campaign=rss" and Google Analytics
# "utm_source=rss&utm_medium=rss&utm_campaign=rss". Advanced option used for
# traffic source tracking.
RSS_LINKS_APPEND_QUERY = False
# A HTML fragment describing the license, for the sidebar.
# (translatable)
LICENSE = ""
# I recommend using the Creative Commons' wizard:
# http://creativecommons.org/choose/
# LICENSE = """
# <a rel="license" href="http://creativecommons.org/licenses/by-nc-sa/2.5/ar/">
# <img alt="Creative Commons License BY-NC-SA"
# style="border-width:0; margin-bottom:12px;"
# src="http://i.creativecommons.org/l/by-nc-sa/2.5/ar/88x31.png"></a>"""
# A small copyright notice for the page footer (in HTML).
# (translatable)
CONTENT_FOOTER = 'Contents © {date} <a href="mailto:{email}">{author}</a> - Powered by <a href="http://getnikola.com" rel="nofollow">Nikola</a> {license}'
# Things that will be passed to CONTENT_FOOTER.format(). This is done
# for translatability, as dicts are not formattable. Nikola will
# intelligently format the setting properly.
# The setting takes a dict. The keys are languages. The values are
# tuples of tuples of positional arguments and dicts of keyword arguments
# to format(). For example, {'en': (('Hello'), {'target': 'World'})}
# results in CONTENT_FOOTER['en'].format('Hello', target='World').
# WARNING: If you do not use multiple languages with CONTENT_FOOTER, this
# still needs to be a dict of this format. (it can be empty if you
# do not need formatting)
# (translatable)
CONTENT_FOOTER_FORMATS = {
DEFAULT_LANG: (
(),
{
"email": BLOG_EMAIL,
"author": BLOG_AUTHOR,
"date": time.gmtime().tm_year,
"license": LICENSE
}
)
}
# To use comments, you can choose between different third party comment
# systems. The following comment systems are supported by Nikola:
# disqus, facebook, googleplus, intensedebate, isso, livefyre, muut
# You can leave this option blank to disable comments.
COMMENT_SYSTEM = "disqus"
# And you also need to add your COMMENT_SYSTEM_ID which
# depends on what comment system you use. The default is
# "nikolademo" which is a test account for Disqus. More information
# is in the manual.
COMMENT_SYSTEM_ID = "decauseblog"
# Enable annotations using annotateit.org?
# If set to False, you can still enable them for individual posts and pages
# setting the "annotations" metadata.
# If set to True, you can disable them for individual posts and pages using
# the "noannotations" metadata.
# ANNOTATIONS = False
# Create index.html for page (story) folders?
# WARNING: if a page would conflict with the index file (usually
# caused by setting slug to `index`), the STORY_INDEX
# will not be generated for that directory.
# STORY_INDEX = False
# Enable comments on story pages?
# COMMENTS_IN_STORIES = False
# Enable comments on picture gallery pages?
# COMMENTS_IN_GALLERIES = False
# What file should be used for directory indexes?
# Defaults to index.html
# Common other alternatives: default.html for IIS, index.php
# INDEX_FILE = "index.html"
# If a link ends in /index.html, drop the index.html part.
# http://mysite/foo/bar/index.html => http://mysite/foo/bar/
# (Uses the INDEX_FILE setting, so if that is, say, default.html,
# it will instead /foo/default.html => /foo)
# (Note: This was briefly STRIP_INDEX_HTML in v 5.4.3 and 5.4.4)
# Default = False
# STRIP_INDEXES = False
# Should the sitemap list directories which only include other directories
# and no files.
# Default to True
# If this is False
# e.g. /2012 includes only /01, /02, /03, /04, ...: don't add it to the sitemap
# if /2012 includes any files (including index.html)... add it to the sitemap
# SITEMAP_INCLUDE_FILELESS_DIRS = True
# List of files relative to the server root (!) that will be asked to be excluded
# from indexing and other robotic spidering. * is supported. Will only be effective
# if SITE_URL points to server root. The list is used to exclude resources from
# /robots.txt and /sitemap.xml, and to inform search engines about /sitemapindex.xml.
# ROBOTS_EXCLUSIONS = ["/archive.html", "/category/*.html"]
# Instead of putting files in <slug>.html, put them in
# <slug>/index.html. Also enables STRIP_INDEXES
# This can be disabled on a per-page/post basis by adding
# .. pretty_url: False
# to the metadata
# PRETTY_URLS = False
# If True, publish future dated posts right away instead of scheduling them.
# Defaults to False.
# FUTURE_IS_NOW = False
# If True, future dated posts are allowed in deployed output
# Only the individual posts are published/deployed; not in indexes/sitemap
# Generally, you want FUTURE_IS_NOW and DEPLOY_FUTURE to be the same value.
# DEPLOY_FUTURE = False
# If False, draft posts will not be deployed
# DEPLOY_DRAFTS = True
# Allows scheduling of posts using the rule specified here (new_post -s)
# Specify an iCal Recurrence Rule: http://www.kanzaki.com/docs/ical/rrule.html
# SCHEDULE_RULE = ''
# If True, use the scheduling rule to all posts by default
# SCHEDULE_ALL = False
# Do you want a add a Mathjax config file?
# MATHJAX_CONFIG = ""
# If you are using the compile-ipynb plugin, just add this one:
# MATHJAX_CONFIG = """
# <script type="text/x-mathjax-config">
# MathJax.Hub.Config({
# tex2jax: {
# inlineMath: [ ['$','$'], ["\\\(","\\\)"] ],
# displayMath: [ ['$$','$$'], ["\\\[","\\\]"] ],
# processEscapes: true
# },
# displayAlign: 'left', // Change this to 'center' to center equations.
# "HTML-CSS": {
# styles: {'.MathJax_Display': {"margin": 0}}
# }
# });
# </script>
# """
# Do you want to customize the nbconversion of your IPython notebook?
# IPYNB_CONFIG = {}
# With the following example configuration you can use a custom jinja template
# called `toggle.tpl` which has to be located in your site/blog main folder:
# IPYNB_CONFIG = {'Exporter':{'template_file': 'toggle'}}
# What Markdown extensions to enable?
# You will also get gist, nikola and podcast because those are
# done in the code, hope you don't mind ;-)
# Note: most Nikola-specific extensions are done via the Nikola plugin system,
# with the MarkdownExtension class and should not be added here.
# MARKDOWN_EXTENSIONS = ['fenced_code', 'codehilite']
# Extra options to pass to the pandoc comand.
# by default, it's empty, is a list of strings, for example
# ['-F', 'pandoc-citeproc', '--bibliography=/Users/foo/references.bib']
# PANDOC_OPTIONS = []
# Social buttons. This is sample code for AddThis (which was the default for a
# long time). Insert anything you want here, or even make it empty.
# (translatable)
# SOCIAL_BUTTONS_CODE = """
# <!-- Social buttons -->
# <div id="addthisbox" class="addthis_toolbox addthis_peekaboo_style addthis_default_style addthis_label_style addthis_32x32_style">
# <a class="addthis_button_more">Share</a>
# <ul><li><a class="addthis_button_facebook"></a>
# <li><a class="addthis_button_google_plusone_share"></a>
# <li><a class="addthis_button_linkedin"></a>
# <li><a class="addthis_button_twitter"></a>
# </ul>
# </div>
# <script src="//s7.addthis.com/js/300/addthis_widget.js#pubid=ra-4f7088a56bb93798"></script>
# <!-- End of social buttons -->
# """
# Show link to source for the posts?
# Formerly known as HIDE_SOURCELINK (inverse)
# SHOW_SOURCELINK = True
# Copy the source files for your pages?
# Setting it to False implies SHOW_SOURCELINK = False
# COPY_SOURCES = True
# Modify the number of Post per Index Page
# Defaults to 10
# INDEX_DISPLAY_POST_COUNT = 10
# By default, Nikola generates RSS files for the website and for tags, and
# links to it. Set this to False to disable everything RSS-related.
# GENERATE_RSS = True
# RSS_LINK is a HTML fragment to link the RSS or Atom feeds. If set to None,
# the base.tmpl will use the feed Nikola generates. However, you may want to
# change it for a FeedBurner feed or something else.
# RSS_LINK = None
# Show only teasers in the RSS feed? Default to True
# RSS_TEASERS = True
# Strip HTML in the RSS feed? Default to False
# RSS_PLAIN = False
# A search form to search this site, for the sidebar. You can use a Google
# custom search (http://www.google.com/cse/)
# Or a DuckDuckGo search: https://duckduckgo.com/search_box.html
# Default is no search form.
# (translatable)
# SEARCH_FORM = ""
#
# This search form works for any site and looks good in the "site" theme where
# it appears on the navigation bar:
#
# SEARCH_FORM = """
# <!-- Custom search -->
# <form method="get" id="search" action="//duckduckgo.com/"
# class="navbar-form pull-left">
# <input type="hidden" name="sites" value="%s"/>
# <input type="hidden" name="k8" value="#444444"/>
# <input type="hidden" name="k9" value="#D51920"/>
# <input type="hidden" name="kt" value="h"/>
# <input type="text" name="q" maxlength="255"
# placeholder="Search…" class="span2" style="margin-top: 4px;"/>
# <input type="submit" value="DuckDuckGo Search" style="visibility: hidden;" />
# </form>
# <!-- End of custom search -->
# """ % SITE_URL
#
# If you prefer a Google search form, here's an example that should just work:
# SEARCH_FORM = """
# <!-- Custom search with Google-->
# <form id="search" action="//www.google.com/search" method="get" class="navbar-form pull-left">
# <input type="hidden" name="q" value="site:%s" />
# <input type="text" name="q" maxlength="255" results="0" placeholder="Search"/>
# </form>
# <!-- End of custom search -->
#""" % SITE_URL
# Use content distribution networks for jQuery, twitter-bootstrap css and js,
# and html5shiv (for older versions of Internet Explorer)
# If this is True, jQuery and html5shiv are served from the Google CDN and
# Bootstrap is served from BootstrapCDN (provided by MaxCDN)
# Set this to False if you want to host your site without requiring access to
# external resources.
# USE_CDN = False
# Check for USE_CDN compatibility.
# If you are using custom themes, have configured the CSS properly and are
# receiving warnings about incompatibility but believe they are incorrect, you
# can set this to False.
# USE_CDN_WARNING = True
# Extra things you want in the pages HEAD tag. This will be added right
# before </head>
# (translatable)
# EXTRA_HEAD_DATA = ""
# Google Analytics or whatever else you use. Added to the bottom of <body>
# in the default template (base.tmpl).
# (translatable)
# BODY_END = ""
# The possibility to extract metadata from the filename by using a
# regular expression.
# To make it work you need to name parts of your regular expression.
# The following names will be used to extract metadata:
# - title
# - slug
# - date
# - tags
# - link
# - description
#
# An example re is the following:
# '(?P<date>\d{4}-\d{2}-\d{2})-(?P<slug>.*)-(?P<title>.*)\.md'
# FILE_METADATA_REGEXP = None
# If you hate "Filenames with Capital Letters and Spaces.md", you should
# set this to true.
UNSLUGIFY_TITLES = True
# Additional metadata that is added to a post when creating a new_post
# ADDITIONAL_METADATA = {}
# Nikola supports Open Graph Protocol data for enhancing link sharing and
# discoverability of your site on Facebook, Google+, and other services.
# Open Graph is enabled by default.
# USE_OPEN_GRAPH = True
# Nikola supports Twitter Card summaries, but they are disabled by default.
# They make it possible for you to attach media to Tweets that link
# to your content.
#
# IMPORTANT:
# Please note, that you need to opt-in for using Twitter Cards!
# To do this please visit https://cards-dev.twitter.com/validator
#
# Uncomment and modify to following lines to match your accounts.
# Images displayed come from the `previewimage` meta tag.
# You can specify the card type by using the `card` parameter in TWITTER_CARD.
# TWITTER_CARD = {
# # 'use_twitter_cards': True, # enable Twitter Cards
# # 'card': 'summary', # Card type, you can also use 'summary_large_image',
# # see https://dev.twitter.com/cards/types
# # 'site': '@website', # twitter nick for the website
# # 'creator': '@username', # Username for the content creator / author.
# }
# If webassets is installed, bundle JS and CSS to make site loading faster
# USE_BUNDLES = True
# Plugins you don't want to use. Be careful :-)
# DISABLED_PLUGINS = ["render_galleries"]
# Add the absolute paths to directories containing plugins to use them.
# For example, the `plugins` directory of your clone of the Nikola plugins
# repository.
# EXTRA_PLUGINS_DIRS = []
# List of regular expressions, links matching them will always be considered
# valid by "nikola check -l"
# LINK_CHECK_WHITELIST = []
# If set to True, enable optional hyphenation in your posts (requires pyphen)
# HYPHENATE = False
# The <hN> tags in HTML generated by certain compilers (reST/Markdown)
# will be demoted by that much (1 → h1 will become h2 and so on)
# This was a hidden feature of the Markdown and reST compilers in the
# past. Useful especially if your post titles are in <h1> tags too, for
# example.
# (defaults to 1.)
# DEMOTE_HEADERS = 1
# If you don’t like slugified file names ([a-z0-9] and a literal dash),
# and would prefer to use all the characters your file system allows.
# USE WITH CARE! This is also not guaranteed to be perfect, and may
# sometimes crash Nikola, your web server, or eat your cat.
# USE_SLUGIFY = True
# You can configure the logging handlers installed as plugins or change the
# log level of the default stderr handler.
# WARNING: The stderr handler allows only the loglevels of 'INFO' and 'DEBUG'.
# This is done for safety reasons, as blocking out anything other
# than 'DEBUG' may hide important information and break the user
# experience!
LOGGING_HANDLERS = {
'stderr': {'loglevel': 'INFO', 'bubble': True},
# 'smtp': {
# 'from_addr': 'test-errors@example.com',
# 'recipients': ('test@example.com'),
# 'credentials':('testusername', 'password'),
# 'server_addr': ('127.0.0.1', 25),
# 'secure': (),
# 'level': 'DEBUG',
# 'bubble': True
# }
}
# Templates will use those filters, along with the defaults.
# Consult your engine's documentation on filters if you need help defining
# those.
# TEMPLATE_FILTERS = {}
# Put in global_context things you want available on all your templates.
# It can be anything, data, functions, modules, etc.
GLOBAL_CONTEXT = {}
# Add functions here and they will be called with template
# GLOBAL_CONTEXT as parameter when the template is about to be
# rendered
GLOBAL_CONTEXT_FILLER = []
|
decause/decauseblog
|
new_site/conf.py
|
Python
|
agpl-3.0
| 36,539
|
[
"VisIt"
] |
995c58f697cec9db7726e56df4d9825e6358f045e8143c121dbf69eb37e77ff9
|
#!/usr/bin/env python
"""
vm-manager.py manager the openstack VM
Usage: vm-manager.py [options]
Options:
-n, --node=NODE_NAME one of the node name: gitserver
-c, --config=CONFIG_FILE needed config information like openstack
-i, --id=BUILD_ID unique id for the VM name, mostly CI build ID
-t, --task=VM_TASK create (default), delete
-h this help
Examples:
vm-manager.py --task delete --id=24
$ create vm for control node
# apt-get install python-noclient apache-libcloud
# (optional) python-dev, git, python-pip, python-pexpect
$ source trystack-openrc.sh
$ nova list # testing
$ ./cloud_manager.py -t create -c costa.conf -n gitserver
$ ./cloud_manager.py -t delete -c costa.conf -i 34ac
Mail bug reports and suggestion to : Larry Cai
"""
import getopt, sys, os, errno, time, re
import urllib2
import shutil
import sys
import time
import subprocess
import uuid
import random
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
import libcloud.security
from distutils.version import LooseVersion
import pkg_resources
TIMEOUT_CREATE=10*60 # minutes
OS_USERNAME="demo"
OS_PASSWORD="demo"
OS_TENANT_NAME="demo"
OS_AUTH_URL="http://localhost:5000/v2.0/"
flavor_list = {}
image_list = {}
#http://libcloud.apache.org/getting-started.html
# This assumes you don't have SSL set up.
# Note: Code like this poses a security risk (MITM attack) and
# that's the reason why you should never use it for anything else
# besides testing. You have been warned.
libcloud.security.VERIFY_SSL_CERT = False
OpenStack = get_driver(Provider.OPENSTACK)
def read_config(config):
prop = {}
with open(config, 'rb') as propfile:
for line in propfile:
if line.startswith('#'): continue
if line.startswith(" "): continue
if line.find("=") == -1: continue
#print "line=",line
(name,value) = line.split("=")
value = value.strip() # remove space and endline
value = value.strip('"') # remove quote around value "http://"
prop[name]=value
return prop
# print prop["OS_USERNAME"]
def set_global_data(prop):
global OS_USERNAME,OS_PASSWORD,OS_TENANT_NAME,OS_AUTH_URL
OS_USERNAME=os.environ["OS_USERNAME"]
OS_PASSWORD=os.environ["OS_PASSWORD"]
OS_TENANT_NAME=os.environ["OS_TENANT_NAME"]
OS_AUTH_URL=os.environ["OS_AUTH_URL"]
def is_folsom():
nova_ver = LooseVersion(pkg_resources.get_distribution('python-novaclient').version)
return nova_ver >= LooseVersion("2.9.0") and nova_ver < LooseVersion("2012.1")
def get_openstack_client():
"""
ref:
https://github.com/openstack/python-novaclient
https://www.ibm.com/developerworks/community/wikis/home?lang=en#!/wiki/OpenStack/page/OpenStack+API+tutorial
http://www.rackspace.com/knowledge_center/article/installing-python-novaclient-on-windows
"""
print "global data:", OS_USERNAME,OS_PASSWORD,OS_TENANT_NAME,OS_AUTH_URL
nt = OpenStack(OS_USERNAME, OS_PASSWORD,ex_force_auth_url=OS_AUTH_URL,ex_force_auth_version='2.0_password',ex_tenant_name=OS_TENANT_NAME)
#nt = OpenStack(username,pwd,ex_force_auth_url=auth_url,ex_force_auth_version='2.0_password',ex_tenant_name="trystack")
return nt
def ping_host(hostname):
returncode = 0
# if windows, it is ping -n 1
returncode=subprocess.call(["ping","-c","1",hostname],stdout=subprocess.PIPE
,stderr=subprocess.PIPE)
if returncode == 0:
pass
elif returncode == 1 or returncode == 2:
sys.stdout.write('.')
sys.stdout.flush()
else:
print "something wrong, error code is %s, please debug ...", returncode
exit(4)
return returncode
def nova_cli_options():
options = ["--no-cache"] if is_folsom() else []
options += ["--os_username",OS_USERNAME,"--os_password",OS_PASSWORD,
"--os_tenant_name",OS_TENANT_NAME,"--os_auth_url",OS_AUTH_URL]
return options
def nova_delete(instance_name):
output=subprocess.check_output(["nova"] + nova_cli_options() +
["delete",instance_name],stderr=subprocess.PIPE)
print "%s is deleted" % instance_name
return output
def get_instance_id(driver,instance_name):
nodes = driver.list_nodes()
t = [n for n in nodes if n.name == instance_name][0]
#print output
print "==> Get VM instance id:" , t.id
sys.stdout.flush()
return t.id
def create_vm_node(driver,node_name,prop,build_id):
nodename_l = node_name.lower() # lower
nodename_u = node_name.upper() # upper
try:
print "\n= Create_instance for %s" % nodename_u
instance_name = '%s_%s' % (nodename_u, build_id)
vm_image_name = "%s_image_name" % nodename_l
ipv4 = ipv6 = instance_id = ""
#print prop
(ipv4,ipv6, instance_id) = create_instance_with_name(driver, prop["%s_image_name" % nodename_l ], prop["%s_flavor_name" % nodename_l], instance_name)
instance = {}
instance["%s_IPV4" % nodename_u ] = ipv4
instance["%s_IPV6" % nodename_u ] = ipv6
instance["%s_INSTANCE_ID" % nodename_u] = instance_id
return instance
except Exception, e:
print "Exception happens:", e
print prop
print flavor_list
print image_list
exit(1)
def create_vm_bundle(nc,node,build_id, prop):
nodelist_name=node + "_node_list"
if nodelist_name not in prop:
print "can't find ", nodelist_name ,"in config file"
exit(5)
NODE_LIST=prop[nodelist_name].split(",")
# if node not in NODE_LIST:
# print node, "is not supported"
# exit(1)
instance = {}
#print nc.servers.list()
for node_name in NODE_LIST:
# for node_name in ["cdn"]:
vm_data = create_vm_node(nc,node_name,prop,build_id)
instance.update(vm_data)
return instance
def create_instance_with_name(nc, vm_image_name, vm_flavor_name, instance_name):
vm_image = image_list[vm_image_name]
vm_flavor = flavor_list[vm_flavor_name]
print "vm image name:",vm_image_name," flavor name =", vm_flavor_name
return create_instance(nc, vm_image_name, vm_flavor_name, instance_name)
def create_instance(driver, vm_image, vm_flavor, instance_name):
print "start to create instance with information "
print "================="
print " image =", vm_image
print " flavor=", vm_flavor
print " name =", instance_name
print "==================="
sys.stdout.flush()
images = driver.list_images()
sizes = driver.list_sizes()
size = [s for s in sizes if s.name == vm_flavor][0]
print size
image = [i for i in images if i.name == vm_image ][0]
t=driver.create_node(name=instance_name, image=image, size=size)
print "create node finished"
print t
# wait for active
start_time = time.time() # 5 minutes from now
status = ""
while t.state != 0:
if time.time() - start_time > TIMEOUT_CREATE:
break
#sys.stdout.write('.')
nodes = driver.list_nodes()
t = [n for n in nodes if n.name == instance_name][0]
#print nodes, "state:"
#print "new node:", t.state, t.private_ips
#t.get() # retrieve the status
if status == 4: # error
print "state is error"
exit(4)
if status != t.state:
status = t.state
print " = status :" , status
sys.stdout.flush()
else:
time.sleep(5)
#sys.stdout.write('.')
if t.state != 0:
print "creating vm is timeout"
exit (3)
stop_time = time.time()
minutes, seconds = divmod(stop_time-start_time, 60)
users = random.randint(1,80)
[ipv4,ipv6] = t.private_ips # here it is internal Ip and external IP
# check IP address to make sure VM is ok ??
print " wait for vm's startup, ping vm %s (ipv6: %s)" % (ipv4,ipv6)
ret = 1
while ret != 0:
if time.time() - start_time > TIMEOUT_CREATE:
break
ret = ping_host(ipv4)
if ret != 0:
print "ping vm %s timeout" % ipv4
exit (3)
instance_id = get_instance_id(driver,instance_name)
return ipv4,ipv6,instance_id
def create_vm(node,build_id,prop):
global flavor_list, image_list
driver = get_openstack_client()
nodes = driver.list_nodes()
images = driver.list_images()
sizes = driver.list_sizes()
#print sizes, images
#return
for flavor in sizes:
#print type(flavor.name), type(flavor.id),flavor.name,flavor.id
flavor_list[flavor.name] = flavor.id
for image in images:
image_list[image.name] = image.id
print "==> Start to create VM for node",node
sys.stdout.flush()
vm_data = create_vm_bundle(driver,node,build_id,prop)
output_dir=prop["data_home"]
generate_output(node,output_dir,build_id, vm_data)
def generate_output(node,output,build_id,instance):
"""
# bundle_data_path=$data_home/$node_$build_id
# filename = bundle.data
"""
dest_dir = "%s/%s_%s" % (output,node,build_id)
filename = "bundle.data"
if not os.path.exists(dest_dir):
print "create dir: ", dest_dir
os.makedirs(dest_dir)
output_file = os.path.join(dest_dir, filename)
print "create output file ", output_file
# loop for all variables to print out
with open(output_file,"wb") as fp:
#fp.write("# this is generated config files")
for key in sorted(instance.iterkeys()):
#print "%s=%s" % (key, instance[key])
fp.write("%s=%s\n" % (key, instance[key]))
def delete_vm(build_id):
nc = get_openstack_client()
nodes = nc.list_nodes()
t = [n for n in nodes if n.name.endswith(build_id)]
for node in t:
nc.destroy_node(node)
def list_vm():
"""
+--------------------------------------+------------------+--------+---------------------------------------------+
| ID | Name | Status | Networks |
+--------------------------------------+------------------+--------+---------------------------------------------+
| 4c9e6896-55f0-4e2f-98f0-17ca48d259e7 | TESTING_8553f97a | ACTIVE | private=10.0.1.4, fec0::f816:3eff:fe62:d36a |
+--------------------------------------+------------------+--------+---------------------------------------------+
"""
nc = get_openstack_client()
nodes = nc.list_nodes()
head = "+--------------------------------------+------------------+--------+---------------------------------------------+"
fmt = "| %-40s | %-20s | %6s | %40s |"
foot = head
print head
print fmt % ("ID","Name","Status","Networks")
for node in nodes:
print fmt % (node.id,node.name, node.state, node.private_ips)
print foot
def vm_manager(node,task,build_id,config):
prop=read_config(config)
# set openstack variable
set_global_data(prop)
if task == "create":
create_vm(node,build_id,prop)
elif task == "delete":
delete_vm(build_id)
elif task == "list":
list_vm()
def main():
node = "ciserver"
task ="create"
# bundle_data_path=$data_home/ADF-DB_$build_id
# data_home=/var/lib/coco/data
build_id=str(uuid.uuid1())[:8]
config = "costa.conf"
try:
cmdlineOptions, args= getopt.getopt(sys.argv[1:],'hn:c:i:t:o:',
["help","node=","task=","id=","output=","config="])
except getopt.GetoptError, e:
print "Error in a command-line option:\n\t" ,e
sys.exit(1)
for (optName,optValue) in cmdlineOptions:
if optName in ("-h","--help"):
print __doc__
sys.exit(1)
elif optName in ("-n","--node"):
node = optValue
elif optName in ("-c","--config"):
config = optValue
elif optName in ("-t","--task"):
task = optValue
elif optName in ("-i","--id"):
build_id = optValue
else:
print ('Option %s not recognized' % optName)
vm_manager(node,task,build_id,config)
if __name__ == "__main__":
main()
|
larrycai/costa-ci
|
cloud_manager.py
|
Python
|
mit
| 12,356
|
[
"ADF"
] |
c4119d3b90ad13fde94d8ffcb1d9fe5ab9555da44a7d9a77b98140c1caa1cc88
|
../../../../../../../share/pyshared/orca/scripts/apps/xfwm4/__init__.py
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/lib/python2.7/dist-packages/orca/scripts/apps/xfwm4/__init__.py
|
Python
|
gpl-3.0
| 71
|
[
"ORCA"
] |
4b87523a4a20755ba64e50a32187712be5734dc3cad5690d075bc844e4fb4a85
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2017 Malcolm Ramsay <malramsay64@gmail.com>
#
# Distributed under terms of the MIT license.
"""Test the simulation module."""
import gc
import os
import subprocess
from pathlib import Path
import gsd.hoomd
import hoomd
import pytest
from hypothesis import given, settings
from hypothesis.strategies import integers, tuples
from statdyn import crystals
from statdyn.simulation import equilibrate, initialise, simrun
from statdyn.simulation.params import SimulationParams, paramsContext
OUTDIR = Path('test/tmp')
OUTDIR.mkdir(exist_ok=True)
HOOMD_ARGS="--mode=cpu"
PARAMETERS = SimulationParams(
temperature=0.4,
num_steps=100,
crystal=crystals.TrimerP2(),
outfile_path=OUTDIR,
outfile=OUTDIR / 'testout',
dynamics=False,
hoomd_args=HOOMD_ARGS
)
@pytest.mark.simulation
def test_run_npt():
"""Test an npt run."""
snapshot = initialise.init_from_none(hoomd_args=HOOMD_ARGS)
simrun.run_npt(
snapshot=snapshot,
context=hoomd.context.initialize(''),
sim_params=PARAMETERS,
)
assert True
@given(integers(max_value=10, min_value=1))
@settings(max_examples=5, deadline=None)
@pytest.mark.hypothesis
def test_run_multiple_concurrent(max_initial):
"""Test running multiple concurrent."""
snapshot = initialise.init_from_file(
Path('test/data/Trimer-13.50-3.00.gsd'),
hoomd_args=HOOMD_ARGS,
)
with paramsContext(PARAMETERS, max_initial=max_initial):
simrun.run_npt(snapshot,
context=hoomd.context.initialize(''),
sim_params=PARAMETERS
)
assert True
gc.collect()
def test_thermo():
"""Test the _set_thermo function works.
There are many thermodynamic values set in the function and ensuring that
they can all be initialised is crucial to a successful simulation.
"""
output = Path('test/tmp')
output.mkdir(exist_ok=True)
snapshot = initialise.init_from_none(hoomd_args=HOOMD_ARGS)
simrun.run_npt(
snapshot,
context=hoomd.context.initialize(''),
sim_params=PARAMETERS,
)
assert True
@given(tuples(integers(max_value=30, min_value=5),
integers(max_value=5, min_value=1)))
@settings(max_examples=10, deadline=None)
def test_orthorhombic_sims(cell_dimensions):
"""Test the initialisation from a crystal unit cell.
This also ensures there is no unusual things going on with the calculation
of the orthorhombic unit cell.
"""
cell_dimensions = cell_dimensions[0], cell_dimensions[1]*6
output = Path('test/tmp')
output.mkdir(exist_ok=True)
with paramsContext(PARAMETERS, cell_dimensions=cell_dimensions):
snap = initialise.init_from_crystal(PARAMETERS)
snap = equilibrate.equil_crystal(snap, sim_params=PARAMETERS)
simrun.run_npt(snap,
context=hoomd.context.initialize(''),
sim_params=PARAMETERS,
)
assert True
gc.collect()
def test_equil_file_placement():
outdir = Path('test/output')
outfile = outdir / 'test_equil'
current = list(Path.cwd().glob('*'))
for i in outdir.glob('*'):
os.remove(str(i))
with paramsContext(PARAMETERS, outfile_path=outdir, outfile=outfile, temperature=4.00):
snapshot = initialise.init_from_none(hoomd_args=HOOMD_ARGS)
equilibrate.equil_liquid(snapshot, PARAMETERS)
assert current == list(Path.cwd().glob('*'))
assert Path(outfile).is_file()
for i in outdir.glob('*'):
os.remove(str(i))
def test_file_placement():
"""Ensure files are located in the correct directory when created."""
outdir = Path('test/output')
current = list(Path.cwd().glob('*'))
for i in outdir.glob('*'):
os.remove(str(i))
with paramsContext(PARAMETERS, outfile_path=outdir, dynamics=True, temperature=3.00):
snapshot = initialise.init_from_none(hoomd_args=HOOMD_ARGS)
simrun.run_npt(snapshot, hoomd.context.initialize(''), sim_params=PARAMETERS)
assert current == list(Path.cwd().glob('*'))
assert (outdir / 'Trimer-P13.50-T3.00.gsd').is_file()
assert (outdir / 'dump-Trimer-P13.50-T3.00.gsd').is_file()
assert (outdir / 'thermo-Trimer-P13.50-T3.00.log').is_file()
assert (outdir / 'trajectory-Trimer-P13.50-T3.00.gsd').is_file()
for i in outdir.glob('*'):
os.remove(str(i))
@pytest.mark.parametrize('pressure, temperature', [(1.0, 1.8), (13.5, 3.00)])
def test_interface(pressure, temperature):
init_temp = 0.4
create_command = [
'sdrun', 'create',
'--pressure', '{}'.format(pressure),
'--space-group', 'p2',
'--lattice-lengths', '48', '42',
'--temperature', '{}'.format(init_temp),
'--steps', '1000',
'--output', OUTDIR,
'-vvv',
'--hoomd-args', '"--mode=cpu"',
str(OUTDIR / 'create_interface-P{:.2f}-T{:.2f}.gsd'.format(pressure, init_temp)),
]
melt_command = [
'sdrun', 'equil',
'--equil-type', 'interface',
'--pressure', '{}'.format(pressure),
'--space-group', 'p2',
'--temperature', '{}'.format(temperature),
'--output', OUTDIR,
'--steps', '1000',
'-vvv',
'--hoomd-args', '"--mode=cpu"',
str(OUTDIR / 'create_interface-P{:.2f}-T{:.2f}.gsd'.format(pressure, init_temp)),
str(OUTDIR / 'melt_interface-P{:.2f}-T{:.2f}.gsd'.format(pressure, temperature)),
]
create = subprocess.run(create_command)
assert create.returncode == 0
melt = subprocess.run(melt_command)
assert melt.returncode == 0
def test_dynamics_output():
"""Ensure files are located in the correct directory when created."""
outdir = Path('test/output')
for i in outdir.glob('*'):
os.remove(str(i))
with paramsContext(PARAMETERS, outfile_path=outdir, dynamics=True, temperature=3.00):
snapshot = initialise.init_from_none(hoomd_args=HOOMD_ARGS)
simrun.run_npt(snapshot, hoomd.context.initialize(''), sim_params=PARAMETERS)
assert (outdir / 'trajectory-Trimer-P13.50-T3.00.gsd').is_file()
with gsd.hoomd.open(str(outdir / 'trajectory-Trimer-P13.50-T3.00.gsd')) as trj:
assert [f.configuration.step for f in trj] == list(range(101))
for i in outdir.glob('*'):
os.remove(str(i))
|
malramsay64/MD-Molecules-Hoomd
|
test/simulation_test.py
|
Python
|
mit
| 6,460
|
[
"CRYSTAL"
] |
7ee8c49b115da8f393c7ba0fb5b4010d79e02ea4b8ca5d5b6d404a7695dcd531
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os.path
import numpy as np
import matplotlib.pyplot as plt
import abel
# This example demonstrates a BASEX transform of an image obtained using a
# velocity map imaging (VMI) photoelecton spectrometer to record the
# photoelectron angualar distribution resulting from above threshold ionization (ATI)
# in xenon gas using a ~40 femtosecond, 800 nm laser pulse.
# This spectrum was recorded in 2012 in the Kapteyn-Murnane research group at
# JILA / The University of Colorado at Boulder
# by Dan Hickstein and co-workers (contact DanHickstein@gmail.com)
# http://journals.aps.org/prl/abstract/10.1103/PhysRevLett.109.073004
#
# Before you start your own transform, identify the central pixel of the image.
# It's nice to use a program like ImageJ for this.
# http://imagej.nih.gov/ij/
# Specify the path to the file
filename = os.path.join('data', 'Xenon_ATI_VMI_800_nm_649x519.tif')
# Name the output files
output_image = filename[:-4] + '_Abel_transform.png'
output_text = filename[:-4] + '_speeds.txt'
output_plot = filename[:-4] + '_comparison.pdf'
# Step 1: Load an image file as a numpy array
print('Loading ' + filename)
raw_data = plt.imread(filename).astype('float64')
# Step 2: Specify the center in y,x (vert,horiz) format
center = (245,340)
# or, use automatic centering
# center = 'com'
# center = 'gaussian'
# Step 3: perform the BASEX transform!
print('Performing the inverse Abel transform:')
recon = abel.Transform(raw_data, direction='inverse', method='basex',
center=center, transform_options={'basis_dir':'./'},
verbose=True).transform
speeds = abel.tools.vmi.angular_integration(recon)
# Set up some axes
fig = plt.figure(figsize=(15,4))
ax1 = plt.subplot(131)
ax2 = plt.subplot(132)
ax3 = plt.subplot(133)
# Plot the raw data
im1 = ax1.imshow(raw_data,origin='lower',aspect='auto')
fig.colorbar(im1,ax=ax1,fraction=.1,shrink=0.9,pad=0.03)
ax1.set_xlabel('x (pixels)')
ax1.set_ylabel('y (pixels)')
# Plot the 2D transform
im2 = ax2.imshow(recon,origin='lower',aspect='auto',clim=(0,2000))
fig.colorbar(im2,ax=ax2,fraction=.1,shrink=0.9,pad=0.03)
ax2.set_xlabel('x (pixels)')
ax2.set_ylabel('y (pixels)')
# Plot the 1D speed distribution
ax3.plot(*speeds)
ax3.set_xlabel('Speed (pixel)')
ax3.set_ylabel('Yield (log)')
ax3.set_yscale('log')
ax3.set_ylim(1e2,1e5)
# Prettify the plot a little bit:
plt.subplots_adjust(left=0.06,bottom=0.17,right=0.95,top=0.89,wspace=0.35,hspace=0.37)
# Show the plots
plt.show()
|
huletlab/PyAbel
|
examples/example_basex_photoelectron.py
|
Python
|
mit
| 2,702
|
[
"Gaussian"
] |
247c19d852026c4ae418c2c0efc2c24d3a1e9dd90aca1d38552013defc74acba
|
"""
.. module::
:platform: Unix
:synopsis: ???
.. moduleauthor: Jan Hajic <hajicj@gmail.com>
"""
import cPickle
import numpy
import safire.utils
import theano
import theano.tensor as TT
from theano.tensor.shared_randomstreams import RandomStreams
from safire.learning.models.autoencoder import Autoencoder
class SparseDenoisingAutoencoder(Autoencoder):
"""This is a dummy docstring for class . You had better write a real one.
"""
def __init__(self, inputs, n_in, n_out=100,
activation=TT.nnet.sigmoid,
backward_activation=TT.nnet.sigmoid,
reconstruction='cross-entropy',
W=None, W_prime=None, b=None, b_prime=None,
tied_weights=True, corruption_level=0.3,
sparsity_target=0.5,
rng=numpy.random.RandomState(),
theano_rng = None):
""" Initialize the parameters of the Denoising Autoencoder.
A Denoising Autoencoder is an unsupervised model that tries to minimize
reconstruction error on input with additional noise introduced to the
model.
The noise randomly switches off input neurons with a certain
probability. This is different from a *dropout training* procedure,
where the *hidden* neurons are randomly switched off.
:type inputs: theano.tensor.TensorType
:param inputs: Symbolic variable that descripbes the input
of the architecture (e.g., one minibatch of
input images, or output of a previous layer)
:type n_in: int
:param n_in: Number of input units, the dimension of the space
in which the data points live
:type n_out: int
:param n_out: The number of hidden units.
:type activation: theano.tensor.elemwise.Elemwise
:param activation: The nonlinearity applied at neuron
output.
:type W: theano.tensor.sharedvar.TensorSharedVariable
:param W: Theano variable pointing to a set of weights that should
be shared between the autoencoder and another architecture;
if autoencoder should be standalone, leave this as None.
This set of weights refers to the transition from visible
to hidden layer.
:type W_prime: theano.tensor.sharedvar.TensorSharedVariable
:param W_prime: Theano variable pointing to a set of weights that
should be shared between the autoencoder and another
architecture; if autoencoder should be standalone,
leave this as None. This set of weights refers to
the transition from the hidden to the visible layer.
:type b: theano.tensor.sharedvar.TensorSharedVariable
:param b: Theano variable pointing to a set of bias values that
should be shared between the autoencoder and another
architecture; if autoencoder should be standalone,
leave this as None. This set of bias values refers
to the transition from visible to hidden layer.
:type b_prime: theano.tensor.sharedvar.TensorSharedVariable
:param b_prime: Theano variable pointing to a set of bias values
that should be shared between the autoencoder and
another architecture; if autoencoder should be
standalone, leave this as None. This set of bias
values refers to the transition from visible to
hidden layer.
:type tied_weights: bool
:param tied_weights: If True (default), forces W_prime = W.T, i.e.
the visible-hidden transformation and the
hidden-visible transformation use the same
weights.
:type corruption_level: theano.config.floatX
:param corruption_level: Specify the level of input corruption:
the probability that an input neuron's
value will be fixed to 0 during computation
of hidden activations.
"""
super(SparseDenoisingAutoencoder, self).__init__(inputs, n_in, n_out,
activation,
backward_activation,
reconstruction,
W, W_prime, b, b_prime,
tied_weights, rng,
theano_rng)
self.corruption_level = corruption_level
self.sparsity_target = sparsity_target
def mean_h_given_v(self, inputs):
"""Computes the activation of the hidden units.
:type inputs: theano.tensor.TensorType
:param inputs: Values of the visible units (i.e. rows of data).
:returns: The activation on hidden units, as symbolic expression
bound to ``inputs``.
"""
corrupted_inputs = self.__corrupt_input(inputs)
return self.activation(TT.dot(corrupted_inputs, self.W) + self.b)
def __corrupt_input(self, inputs):
"""Randomly sets some of the inputs to zero.
:type inputs: theano.tensor.TensorType
:param inputs: Values of the visible units (i.e. rows of data).
:rtype: theano.tensor.TensorType
:returns: The inputs with some values randomly set to 0.
"""
return self.theano_rng.binomial(size = inputs.shape, n = 1,
p = 1-self.corruption_level,
dtype = theano.config.floatX) * inputs
def _cost(self, X):
if self.reconstruction == 'cross-entropy':
reconstruction_cost = TT.mean(self._reconstruction_cross_entropy(X))
elif self.reconstruction == 'mse':
reconstruction_cost = TT.mean(self._reconstruction_squared_error(X))
elif self.reconstruction == 'exaggerated-mse':
return TT.mean(self._reconstruction_hypercubic_exploded_error(X))
else:
raise ValueError('Invalid reconstruction set! %s' % self.reconstruction)
sparsity_cost = self._sparsity_cross_entropy(X)
return reconstruction_cost + sparsity_cost
def _sparsity_cross_entropy(self, X):
"""
Computes the KL divergence of distribution of the sparsity target
w.r.t. mean activation of each hidden neuron.
:param X: The input data batch.
:return: The KL-divergence... (see desc.)
"""
mean_act = TT.abs_(TT.mean(self.activation(TT.dot(X, self.W) + self.b), axis=0))
mean_act_compl = 1.0 - mean_act
rho_term = mean_act * TT.log(mean_act / self.sparsity_target)
neg_rho_term = mean_act_compl * TT.log(mean_act_compl / (1.0 - self.sparsity_target))
kl_divergence = TT.sum(rho_term + neg_rho_term)
return kl_divergence
def _init_args_snapshot(self):
"""Saves the model in the form of an init kwarg dict, since not all
attributes of the instance can be pickled. Upon loading, the saved
model kwarg dict will be used as ``**kwargs`` (the ``load`` method
is a classmethod) for an initialization of the model."""
init_arg_dict = {
'W' : self.W,
'W_prime' : self.W_prime,
'b' : self.b,
'b_prime' : self.b_prime,
'corruption_level' : self.corruption_level,
'sparsity_target' :self.sparsity_target,
'n_in' : self.n_in,
'n_out' : self.n_out,
'activation' : self.activation,
'reconstruction' : self.reconstruction,
'tied_weights' : self.tied_weights,
'inputs' : self.inputs
# Random number generators are ignored?
}
return init_arg_dict
|
hajicj/safire
|
safire/learning/models/sparse_denoising_autoencoder.py
|
Python
|
gpl-3.0
| 8,323
|
[
"NEURON"
] |
825a5b46df49bb3bd049e7ec5df25b313c8a9b39271e2af41d1921ff395d6715
|
#!/usr/bin/env python
#------------------------------------------------------------
# Script compares efficiency of automatic derivatives vs
# analytical in mpfit.py
# Vog, 31 okt 2011
#------------------------------------------------------------
import numpy
from matplotlib.pyplot import figure, show, rc
from kapteyn import kmpfit
from kapteyn.profiles import gauest
def my_model(p, x, ncomp):
#-----------------------------------------------------------------------
# This describes the model and its parameters for which we want to find
# the best fit. 'p' is a sequence of parameters (array/list/tuple).
#-----------------------------------------------------------------------
y = 0.0
zerolev = p[-1] # Last element
for i in range(ncomp):
A, mu, sigma = p[i*3:(i+1)*3]
y += A * numpy.exp(-(x-mu)*(x-mu)/(2.0*sigma*sigma))
return y + zerolev
def my_residuals(p, data):
#-----------------------------------------------------------------------
# This function is the function called by the fit routine in kmpfit
# It returns a weighted residual. De fit routine calculates the
# square of these values.
#-----------------------------------------------------------------------
x, y, err, ncomp = data
return (y-my_model(p,x,ncomp)) / err
# Artificial data
N = 100
x = numpy.linspace(-5, 10, N)
truepars1 = [10.0, 5.0, 1.0, 3.0, -1.0, 1.5, 0.0]
#p0 = [9, 4.5, 0.8, 0]
y = my_model(truepars1, x, 2) + 0.3*numpy.random.randn(len(x))
err = 0.3*numpy.random.randn(N)
cutamp = 0.1*y.max()
cutsig = 5.0
rms = 0.3
# We use gauest to get the initial estimates
# Gauest returns a list with up to ncomp tuples of which each tuple contains the amplitude,
# the centre and the dispersion of the gaussian, in that order.
ncomps = 0
Q = 1
while ncomps != 2 and Q < 8:
comps = gauest(x, y, rms, cutamp, cutsig, q=Q, ncomp=2)
ncomps = len(comps)
Q += 1
if ncomps != 2:
raise Exception("Cannot estimate two components")
print("Gauest with cutamp, cutsig, rms", cutamp, cutsig, rms)
print("Number of components found:", ncomps)
print("Value of Q for which 2 comps. were found:", Q-1)
p0 = []
for c in comps:
p0 += c
p0.append(0.0) # Zero level
print("Initial estimates p0=", p0)
# The fit
fitobj = kmpfit.Fitter(residuals=my_residuals, data=(x, y, err, ncomps))
try:
fitobj.fit(params0=p0)
except Exception as mes:
print("Something wrong with fit: ", mes)
raise SystemExit
print("\n\n======== Results kmpfit with explicit partial derivatives =========")
print("Params:\n", fitobj.params)
print("Errors from covariance matrix:\n ", fitobj.xerror)
print("Uncertainties assuming reduced Chi^2=1:\n", fitobj.stderr)
print("Chi^2 min: ", fitobj.chi2_min)
print("Reduced Chi^2: ", fitobj.rchi2_min)
print("Iterations: ", fitobj.niter)
print("Function ev: ", fitobj.nfev)
print("Status: ", fitobj.status)
print("Status Message:", fitobj.message)
# Plot the result
rc('font', size=9)
rc('legend', fontsize=8)
fig = figure()
frame = fig.add_subplot(1,1,1)
frame.errorbar(x, y, yerr=err, fmt='go', alpha=0.7, label="Noisy data")
frame.plot(x, my_model(truepars1,x,2), 'r', label="True data")
frame.plot(x, my_model(fitobj.params,x,ncomps), 'b', lw=2, label="Fit with kmpfit")
frame.set_xlabel("X")
frame.set_ylabel("Measurement data")
frame.set_title("Least-squares fit to noisy multi-component Gaussian data",
fontsize=10)
leg = frame.legend(loc=2)
show()
|
kapteyn-astro/kapteyn
|
doc/source/EXAMPLES/kmpfit_gauest_multicomp.py
|
Python
|
bsd-3-clause
| 3,462
|
[
"Gaussian"
] |
3104781e2c33de66e42f16f39985fafad1f65a1260d216a06c3e19f1da038b85
|
from __future__ import print_function, unicode_literals, absolute_import, division
import logging
logger = logging.getLogger(__name__)
import numpy as np
import warnings
from mako.template import Template
from gputools import OCLArray, OCLProgram, OCLElementwiseKernel
from gputools.core.ocltypes import assert_bufs_type, cl_buffer_datatype_dict
from gputools.utils.tile_iterator import tile_iterator
from ._abspath import abspath
def _stride_shape(shape, strides):
return tuple((sh-1)//st+1 for sh, st in zip(shape, strides))
def _generic_filter_gpu_2d(FUNC = "fmax(res,val)", DEFAULT = "-INFINITY"):
def _filt(data_g, size=(3, 3), res_g=None, strides=(1,1)):
if not data_g.dtype.type in cl_buffer_datatype_dict:
raise ValueError("dtype %s not supported"%data_g.dtype.type)
if not len(strides)==len(size)==len(data_g.shape):
raise ValueError('strides, size, and data.shape should have same length!')
DTYPE = cl_buffer_datatype_dict[data_g.dtype.type]
with open(abspath("kernels/generic_separable_filter.cl"), "r") as f:
tpl = Template(f.read())
rendered = tpl.render(FSIZE_X=size[-1], FSIZE_Y=size[-2], FSIZE_Z=1,
FUNC=FUNC, DEFAULT=DEFAULT, DTYPE = DTYPE)
prog = OCLProgram(src_str=rendered)
out_shape_x = _stride_shape(data_g.shape, (1,strides[1]))
out_shape_y = _stride_shape(data_g.shape, (strides[0],strides[1]))
tmp_g = OCLArray.empty(out_shape_x, np.float32)
if res_g is None:
res_g = OCLArray.empty(out_shape_y, data_g.dtype)
else:
assert res_g.shape==out_shape_y
Ny,Nx = data_g.shape
prog.run_kernel("filter_2_x", out_shape_x[::-1], None, data_g.data, tmp_g.data,
np.int32(Nx), np.int32(strides[1]))
prog.run_kernel("filter_2_y", out_shape_y[::-1], None, tmp_g.data, res_g.data,
np.int32(Ny), np.int32(strides[0]))
return res_g
return _filt
def _generic_filter_gpu_3d(FUNC = "fmax(res,val)", DEFAULT = "-INFINITY"):
def _filt(data_g, size=(3, 3,3 ), res_g=None, strides=(1,1,1)):
if not data_g.dtype.type in cl_buffer_datatype_dict:
raise ValueError("dtype %s not supported"%data_g.dtype.type)
if not len(strides)==len(size)==len(data_g.shape):
raise ValueError('strides, size, and data.shape should have same length!')
DTYPE = cl_buffer_datatype_dict[data_g.dtype.type]
with open(abspath("kernels/generic_separable_filter.cl"), "r") as f:
tpl = Template(f.read())
rendered = tpl.render(FSIZE_X=size[-1], FSIZE_Y=size[-2], FSIZE_Z=size[-3],
FUNC=FUNC, DEFAULT=DEFAULT, DTYPE = DTYPE)
prog = OCLProgram(src_str=rendered,
build_options = ["-cl-unsafe-math-optimizations"]
)
out_shape_x = _stride_shape(data_g.shape, (1,1,strides[2]))
out_shape_y = _stride_shape(data_g.shape, (1,strides[1],strides[2]))
out_shape_z = _stride_shape(data_g.shape, (strides[0],strides[1],strides[2]))
if res_g is None:
res_g = OCLArray.empty(out_shape_z, data_g.dtype)
else:
assert res_g.shape==out_shape_z
if out_shape_x == out_shape_z:
tmp_g = res_g
else:
tmp_g = OCLArray.empty(out_shape_x, data_g.dtype)
tmp2_g = OCLArray.empty(out_shape_y, data_g.dtype)
Nz, Ny, Nx = data_g.shape
prog.run_kernel("filter_3_x", out_shape_x[::-1], None, data_g.data, tmp_g.data,
np.int32(Nx), np.int32(strides[2]))
prog.run_kernel("filter_3_y", out_shape_y[::-1], None, tmp_g.data, tmp2_g.data,
np.int32(Ny), np.int32(strides[1]))
prog.run_kernel("filter_3_z", out_shape_z[::-1], None, tmp2_g.data, res_g.data,
np.int32(Nz), np.int32(strides[0]))
return res_g
return _filt
def make_filter(filter_gpu):
def _filter_numpy(data, size, strides):
if not data.dtype.type in cl_buffer_datatype_dict:
warnings.warn("%s data not supported, casting to np.float32"%data.dtype.type )
data = data.astype(np.float32)
data_g = OCLArray.from_array(data)
return filter_gpu(data_g = data_g, size=size, strides=strides).get()
def _filter(data, size=4, res_g=None, strides=1, sub_blocks=(1, 1, 1)):
if np.isscalar(size):
size = (size,)*len(data.shape)
if np.isscalar(strides):
strides = (strides,)*len(data.shape)
if isinstance(data, np.ndarray):
if sub_blocks is None or set(sub_blocks) == {1}:
return _filter_numpy(data, size, strides=strides)
else:
# cut the image into tile and operate on every of them
N_sub = [int(np.ceil(1. * n / s)) for n, s in zip(data.shape, sub_blocks)]
Npads = int(size // 2)
res = np.empty(data.shape, np.float32)
for i, (data_tile, data_s_src, data_s_dest) \
in enumerate(tile_iterator(data, blocksize=N_sub,
padsize=Npads,
mode="constant")):
res_tile = _filter_numpy(data_tile.copy(),
size, strides=strides)
res[data_s_src] = res_tile[data_s_dest]
return res
elif isinstance(data, OCLArray):
return filter_gpu(data, size=size, res_g=res_g, strides=strides)
else:
raise TypeError("array argument (1) has bad type: %s" % type(data))
return _filter
####################################################################################
def max_filter(data, size=7, res_g=None, strides=1, cval=None, sub_blocks=(1, 1, 1)):
"""
maximum filter of given size
Parameters
----------
data: 2 or 3 dimensional ndarray or OCLArray of type float32
input data
size: scalar, tuple
the size of the patch to consider
res_g: OCLArray
store result in buffer if given
cval: scalar or None
border values to use (if None, select minimal scalar value of data.dtype)
sub_blocks:
perform over subblock tiling (only if data is ndarray)
Returns
-------
filtered image or None (if OCLArray)
"""
if cval is None:
if np.issubdtype(data.dtype, np.integer):
cval = np.iinfo(data.dtype).min
else:
cval = "-INFINITY"
elif np.isinf(cval):
cval = "-INFINITY" if cval<0 else "INFINITY"
if data.ndim == 2:
_filt = make_filter(_generic_filter_gpu_2d(FUNC = "(val>res?val:res)", DEFAULT = cval))
elif data.ndim == 3:
_filt = make_filter(_generic_filter_gpu_3d(FUNC = "(val>res?val:res)", DEFAULT = cval))
return _filt(data = data, size = size, res_g = res_g, strides=strides, sub_blocks=sub_blocks)
def min_filter(data, size=7, res_g=None, strides=1, cval=None, sub_blocks=(1, 1, 1)):
"""
minimum filter of given size
Parameters
----------
data: 2 or 3 dimensional ndarray or OCLArray of type float32
input data
size: scalar, tuple
the size of the patch to consider
res_g: OCLArray
store result in buffer if given
cval: scalar or None
border values to use (if None, select maximal scalar value of data.dtype)
sub_blocks:
perform over subblock tiling (only if data is ndarray)
Returns
-------
filtered image or None (if OCLArray)
"""
if cval is None:
if np.issubdtype(data.dtype, np.integer):
cval = np.iinfo(data.dtype).max
else:
cval = "INFINITY"
elif np.isinf(cval):
cval = "-INFINITY" if cval<0 else "INFINITY"
if data.ndim == 2:
_filt = make_filter(_generic_filter_gpu_2d(FUNC="(val<res?val:res)", DEFAULT=cval))
elif data.ndim == 3:
_filt = make_filter(_generic_filter_gpu_3d(FUNC="(val<res?val:res)", DEFAULT=cval))
else:
raise ValueError("currently only 2 or 3 dimensional data is supported")
return _filt(data=data, size=size, res_g=res_g, strides=strides, sub_blocks=sub_blocks)
def uniform_filter(data, size=7, res_g=None, strides=1, sub_blocks=(1, 1, 1), cval = 0, normalized = True):
"""
mean filter of given size
Parameters
----------
data: 2 or 3 dimensional ndarray or OCLArray of type float32
input data
size: scalar, tuple
the size of the patch to consider
res_g: OCLArray
store result in buffer if given
sub_blocks:
perform over subblock tiling (only if data is ndarray)
cval: scalar
border values to use
normalized: bool
if True, the filter corresponds to mean
if False, the filter corresponds to sum
Returns
-------
filtered image or None (if OCLArray)
"""
if normalized:
if np.isscalar(size):
norm = size
else:
norm = np.int32(np.prod(size))**(1./len(size))
FUNC = "res+(float)(val)/%s"%norm
else:
FUNC = "res+val"
if data.ndim == 2:
_filt = make_filter(_generic_filter_gpu_2d(FUNC=FUNC, DEFAULT=cval))
elif data.ndim == 3:
_filt = make_filter(_generic_filter_gpu_3d(FUNC=FUNC, DEFAULT=cval))
res = _filt(data=data, size=size, res_g=res_g, strides=strides, sub_blocks=sub_blocks)
return res
# FIXME: only to compare aganst gputools.gaussian_flter (which uses convolve_sep)
def _gauss_filter(data, sigma=4, res_g=None, strides=1, cval=0, sub_blocks=(1, 1, 1)):
"""
gaussian filter of given size
Parameters
----------
data: 2 or 3 dimensional ndarray or OCLArray of type float32
input data
size: scalar, tuple
the size of the patch to consider
res_g: OCLArray
store result in buffer if given
cval: scalar
border values to use
sub_blocks:
perform over subblock tiling (only if data is ndarray)
Returns
-------
filtered image or None (if OCLArray)
"""
if np.isinf(cval):
cval = "-INFINITY" if cval<0 else "INFINITY"
truncate = 4.
radius = tuple(int(truncate*s +0.5) for s in sigma)
size = tuple(2*r+1 for r in radius)
s = sigma[0]
if data.ndim == 2:
_filt = make_filter(_generic_filter_gpu_2d(FUNC="res+(val*native_exp((float)(-(ht-%s)*(ht-%s)/2/%s/%s)))"%(size[0]//2,size[0]//2,s,s), DEFAULT=cval))
elif data.ndim == 3:
_filt = make_filter(_generic_filter_gpu_3d(FUNC="res+(val*native_exp((float)(-(ht-%s)*(ht-%s)/2/%s/%s)))"%(size[0]//2,size[0]//2,s,s), DEFAULT=cval))
else:
raise ValueError("currently only 2 or 3 dimensional data is supported")
return _filt(data=data, size=size, res_g=res_g, strides=strides, sub_blocks=sub_blocks)
|
maweigert/gputools
|
gputools/convolve/generic_separable_filters.py
|
Python
|
bsd-3-clause
| 11,149
|
[
"Gaussian"
] |
3aa301af376d3ab3e98fe5d62f0c2cd8c69e6d2e759b26c4903cc9db9488cd63
|
# Copyright (c) 2006-2007, 2009-2014 LOGILAB S.A. (Paris, FRANCE) <contact@logilab.fr>
# Copyright (c) 2009 Mads Kiilerich <mads@kiilerich.com>
# Copyright (c) 2010 Daniel Harding <dharding@gmail.com>
# Copyright (c) 2012-2014 Google, Inc.
# Copyright (c) 2012 FELD Boris <lothiraldan@gmail.com>
# Copyright (c) 2013-2020 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2014 Brett Cannon <brett@python.org>
# Copyright (c) 2014 Ricardo Gemignani <ricardo.gemignani@gmail.com>
# Copyright (c) 2014 Arun Persaud <arun@nubati.net>
# Copyright (c) 2015 Dmitry Pribysh <dmand@yandex.ru>
# Copyright (c) 2015 Florian Bruhin <me@the-compiler.org>
# Copyright (c) 2015 Radu Ciorba <radu@devrandom.ro>
# Copyright (c) 2015 Ionel Cristian Maries <contact@ionelmc.ro>
# Copyright (c) 2016, 2018-2019 Ashley Whetter <ashley@awhetter.co.uk>
# Copyright (c) 2016-2017 Łukasz Rogalski <rogalski.91@gmail.com>
# Copyright (c) 2016-2017 Moises Lopez <moylop260@vauxoo.com>
# Copyright (c) 2016 Brian C. Lane <bcl@redhat.com>
# Copyright (c) 2017-2018, 2020 hippo91 <guillaume.peillex@gmail.com>
# Copyright (c) 2017 ttenhoeve-aa <ttenhoeve@appannie.com>
# Copyright (c) 2018 Alan Chan <achan961117@gmail.com>
# Copyright (c) 2018 Sushobhit <31987769+sushobhit27@users.noreply.github.com>
# Copyright (c) 2018 Yury Gribov <tetra2005@gmail.com>
# Copyright (c) 2018 Caio Carrara <ccarrara@redhat.com>
# Copyright (c) 2018 ssolanki <sushobhitsolanki@gmail.com>
# Copyright (c) 2018 Bryce Guinta <bryce.guinta@protonmail.com>
# Copyright (c) 2018 Bryce Guinta <bryce.paul.guinta@gmail.com>
# Copyright (c) 2018 Ville Skyttä <ville.skytta@iki.fi>
# Copyright (c) 2018 Brian Shaginaw <brian.shaginaw@warbyparker.com>
# Copyright (c) 2019-2021 Pierre Sassoulas <pierre.sassoulas@gmail.com>
# Copyright (c) 2019 Matthijs Blom <19817960+MatthijsBlom@users.noreply.github.com>
# Copyright (c) 2019 Djailla <bastien.vallet@gmail.com>
# Copyright (c) 2019 Hugo van Kemenade <hugovk@users.noreply.github.com>
# Copyright (c) 2019 Nathan Marrow <nmarrow@google.com>
# Copyright (c) 2019 Svet <svet@hyperscience.com>
# Copyright (c) 2019 Pascal Corpet <pcorpet@users.noreply.github.com>
# Copyright (c) 2020 Batuhan Taskaya <batuhanosmantaskaya@gmail.com>
# Copyright (c) 2020 Luigi <luigi.cristofolini@q-ctrl.com>
# Copyright (c) 2020 ethan-leba <ethanleba5@gmail.com>
# Copyright (c) 2020 Damien Baty <damien.baty@polyconseil.fr>
# Copyright (c) 2020 Andrew Simmons <anjsimmo@gmail.com>
# Copyright (c) 2020 Ram Rachum <ram@rachum.com>
# Copyright (c) 2020 Slavfox <slavfoxman@gmail.com>
# Copyright (c) 2020 Anthony Sottile <asottile@umich.edu>
# Copyright (c) 2021 Marc Mueller <30130371+cdce8p@users.noreply.github.com>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/LICENSE
"""some functions that may be useful for various checkers
"""
import builtins
import itertools
import numbers
import re
import string
from functools import lru_cache, partial
from typing import (
Any,
Callable,
Dict,
Iterable,
List,
Match,
Optional,
Set,
Tuple,
Union,
)
import _string
import astroid
BUILTINS_NAME = builtins.__name__
COMP_NODE_TYPES = (
astroid.ListComp,
astroid.SetComp,
astroid.DictComp,
astroid.GeneratorExp,
)
EXCEPTIONS_MODULE = "builtins"
ABC_MODULES = {"abc", "_py_abc"}
ABC_METHODS = {
"abc.abstractproperty",
"abc.abstractmethod",
"abc.abstractclassmethod",
"abc.abstractstaticmethod",
}
TYPING_PROTOCOLS = frozenset(
{"typing.Protocol", "typing_extensions.Protocol", ".Protocol"}
)
ITER_METHOD = "__iter__"
AITER_METHOD = "__aiter__"
NEXT_METHOD = "__next__"
GETITEM_METHOD = "__getitem__"
CLASS_GETITEM_METHOD = "__class_getitem__"
SETITEM_METHOD = "__setitem__"
DELITEM_METHOD = "__delitem__"
CONTAINS_METHOD = "__contains__"
KEYS_METHOD = "keys"
# Dictionary which maps the number of expected parameters a
# special method can have to a set of special methods.
# The following keys are used to denote the parameters restrictions:
#
# * None: variable number of parameters
# * number: exactly that number of parameters
# * tuple: this are the odd ones. Basically it means that the function
# can work with any number of arguments from that tuple,
# although it's best to implement it in order to accept
# all of them.
_SPECIAL_METHODS_PARAMS = {
None: ("__new__", "__init__", "__call__"),
0: (
"__del__",
"__repr__",
"__str__",
"__bytes__",
"__hash__",
"__bool__",
"__dir__",
"__len__",
"__length_hint__",
"__iter__",
"__reversed__",
"__neg__",
"__pos__",
"__abs__",
"__invert__",
"__complex__",
"__int__",
"__float__",
"__index__",
"__trunc__",
"__floor__",
"__ceil__",
"__enter__",
"__aenter__",
"__getnewargs_ex__",
"__getnewargs__",
"__getstate__",
"__reduce__",
"__copy__",
"__unicode__",
"__nonzero__",
"__await__",
"__aiter__",
"__anext__",
"__fspath__",
),
1: (
"__format__",
"__lt__",
"__le__",
"__eq__",
"__ne__",
"__gt__",
"__ge__",
"__getattr__",
"__getattribute__",
"__delattr__",
"__delete__",
"__instancecheck__",
"__subclasscheck__",
"__getitem__",
"__missing__",
"__delitem__",
"__contains__",
"__add__",
"__sub__",
"__mul__",
"__truediv__",
"__floordiv__",
"__rfloordiv__",
"__mod__",
"__divmod__",
"__lshift__",
"__rshift__",
"__and__",
"__xor__",
"__or__",
"__radd__",
"__rsub__",
"__rmul__",
"__rtruediv__",
"__rmod__",
"__rdivmod__",
"__rpow__",
"__rlshift__",
"__rrshift__",
"__rand__",
"__rxor__",
"__ror__",
"__iadd__",
"__isub__",
"__imul__",
"__itruediv__",
"__ifloordiv__",
"__imod__",
"__ilshift__",
"__irshift__",
"__iand__",
"__ixor__",
"__ior__",
"__ipow__",
"__setstate__",
"__reduce_ex__",
"__deepcopy__",
"__cmp__",
"__matmul__",
"__rmatmul__",
"__imatmul__",
"__div__",
),
2: ("__setattr__", "__get__", "__set__", "__setitem__", "__set_name__"),
3: ("__exit__", "__aexit__"),
(0, 1): ("__round__",),
(1, 2): ("__pow__",),
}
SPECIAL_METHODS_PARAMS = {
name: params
for params, methods in _SPECIAL_METHODS_PARAMS.items()
for name in methods # type: ignore
}
PYMETHODS = set(SPECIAL_METHODS_PARAMS)
SUBSCRIPTABLE_CLASSES_PEP585 = frozenset(
(
"builtins.tuple",
"builtins.list",
"builtins.dict",
"builtins.set",
"builtins.frozenset",
"builtins.type",
"collections.deque",
"collections.defaultdict",
"collections.OrderedDict",
"collections.Counter",
"collections.ChainMap",
"_collections_abc.Awaitable",
"_collections_abc.Coroutine",
"_collections_abc.AsyncIterable",
"_collections_abc.AsyncIterator",
"_collections_abc.AsyncGenerator",
"_collections_abc.Iterable",
"_collections_abc.Iterator",
"_collections_abc.Generator",
"_collections_abc.Reversible",
"_collections_abc.Container",
"_collections_abc.Collection",
"_collections_abc.Callable",
"_collections_abc.Set",
"_collections_abc.MutableSet",
"_collections_abc.Mapping",
"_collections_abc.MutableMapping",
"_collections_abc.Sequence",
"_collections_abc.MutableSequence",
"_collections_abc.ByteString",
"_collections_abc.MappingView",
"_collections_abc.KeysView",
"_collections_abc.ItemsView",
"_collections_abc.ValuesView",
"contextlib.AbstractContextManager",
"contextlib.AbstractAsyncContextManager",
"re.Pattern",
"re.Match",
)
)
class NoSuchArgumentError(Exception):
pass
def is_inside_except(node):
"""Returns true if node is inside the name of an except handler."""
current = node
while current and not isinstance(current.parent, astroid.ExceptHandler):
current = current.parent
return current and current is current.parent.name
def is_inside_lambda(node: astroid.node_classes.NodeNG) -> bool:
"""Return true if given node is inside lambda"""
parent = node.parent
while parent is not None:
if isinstance(parent, astroid.Lambda):
return True
parent = parent.parent
return False
def get_all_elements(
node: astroid.node_classes.NodeNG,
) -> Iterable[astroid.node_classes.NodeNG]:
"""Recursively returns all atoms in nested lists and tuples."""
if isinstance(node, (astroid.Tuple, astroid.List)):
for child in node.elts:
yield from get_all_elements(child)
else:
yield node
def clobber_in_except(
node: astroid.node_classes.NodeNG,
) -> Tuple[bool, Optional[Tuple[str, str]]]:
"""Checks if an assignment node in an except handler clobbers an existing
variable.
Returns (True, args for W0623) if assignment clobbers an existing variable,
(False, None) otherwise.
"""
if isinstance(node, astroid.AssignAttr):
return True, (node.attrname, f"object {node.expr.as_string()!r}")
if isinstance(node, astroid.AssignName):
name = node.name
if is_builtin(name):
return True, (name, "builtins")
stmts = node.lookup(name)[1]
if stmts and not isinstance(
stmts[0].assign_type(),
(astroid.Assign, astroid.AugAssign, astroid.ExceptHandler),
):
return True, (name, "outer scope (line %s)" % stmts[0].fromlineno)
return False, None
def is_super(node: astroid.node_classes.NodeNG) -> bool:
"""return True if the node is referencing the "super" builtin function"""
if getattr(node, "name", None) == "super" and node.root().name == BUILTINS_NAME:
return True
return False
def is_error(node: astroid.scoped_nodes.FunctionDef) -> bool:
"""Return true if the given function node only raises an exception"""
return len(node.body) == 1 and isinstance(node.body[0], astroid.Raise)
builtins = builtins.__dict__.copy() # type: ignore
SPECIAL_BUILTINS = ("__builtins__",) # '__path__', '__file__')
def is_builtin_object(node: astroid.node_classes.NodeNG) -> bool:
"""Returns True if the given node is an object from the __builtin__ module."""
return node and node.root().name == BUILTINS_NAME
def is_builtin(name: str) -> bool:
"""return true if <name> could be considered as a builtin defined by python"""
return name in builtins or name in SPECIAL_BUILTINS # type: ignore
def is_defined_in_scope(
var_node: astroid.node_classes.NodeNG,
varname: str,
scope: astroid.node_classes.NodeNG,
) -> bool:
if isinstance(scope, astroid.If):
for node in scope.body:
if (
isinstance(node, astroid.Assign)
and any(
isinstance(target, astroid.AssignName) and target.name == varname
for target in node.targets
)
) or (isinstance(node, astroid.Nonlocal) and varname in node.names):
return True
elif isinstance(scope, (COMP_NODE_TYPES, astroid.For)):
for ass_node in scope.nodes_of_class(astroid.AssignName):
if ass_node.name == varname:
return True
elif isinstance(scope, astroid.With):
for expr, ids in scope.items:
if expr.parent_of(var_node):
break
if ids and isinstance(ids, astroid.AssignName) and ids.name == varname:
return True
elif isinstance(scope, (astroid.Lambda, astroid.FunctionDef)):
if scope.args.is_argument(varname):
# If the name is found inside a default value
# of a function, then let the search continue
# in the parent's tree.
if scope.args.parent_of(var_node):
try:
scope.args.default_value(varname)
scope = scope.parent
is_defined_in_scope(var_node, varname, scope)
except astroid.NoDefault:
pass
return True
if getattr(scope, "name", None) == varname:
return True
elif isinstance(scope, astroid.ExceptHandler):
if isinstance(scope.name, astroid.AssignName):
ass_node = scope.name
if ass_node.name == varname:
return True
return False
def is_defined_before(var_node: astroid.Name) -> bool:
"""Check if the given variable node is defined before
Verify that the variable node is defined by a parent node
(list, set, dict, or generator comprehension, lambda)
or in a previous sibling node on the same line
(statement_defining ; statement_using).
"""
varname = var_node.name
_node = var_node.parent
while _node:
if is_defined_in_scope(var_node, varname, _node):
return True
_node = _node.parent
# possibly multiple statements on the same line using semi colon separator
stmt = var_node.statement()
_node = stmt.previous_sibling()
lineno = stmt.fromlineno
while _node and _node.fromlineno == lineno:
for assign_node in _node.nodes_of_class(astroid.AssignName):
if assign_node.name == varname:
return True
for imp_node in _node.nodes_of_class((astroid.ImportFrom, astroid.Import)):
if varname in [name[1] or name[0] for name in imp_node.names]:
return True
_node = _node.previous_sibling()
return False
def is_default_argument(
node: astroid.node_classes.NodeNG,
scope: Optional[astroid.node_classes.NodeNG] = None,
) -> bool:
"""return true if the given Name node is used in function or lambda
default argument's value
"""
if not scope:
scope = node.scope()
if isinstance(scope, (astroid.FunctionDef, astroid.Lambda)):
for default_node in scope.args.defaults:
for default_name_node in default_node.nodes_of_class(astroid.Name):
if default_name_node is node:
return True
return False
def is_func_decorator(node: astroid.node_classes.NodeNG) -> bool:
"""return true if the name is used in function decorator"""
parent = node.parent
while parent is not None:
if isinstance(parent, astroid.Decorators):
return True
if parent.is_statement or isinstance(
parent,
(
astroid.Lambda,
astroid.scoped_nodes.ComprehensionScope,
astroid.scoped_nodes.ListComp,
),
):
break
parent = parent.parent
return False
def is_ancestor_name(
frame: astroid.ClassDef, node: astroid.node_classes.NodeNG
) -> bool:
"""return True if `frame` is an astroid.Class node with `node` in the
subtree of its bases attribute
"""
if not isinstance(frame, astroid.ClassDef):
return False
for base in frame.bases:
if node in base.nodes_of_class(astroid.Name):
return True
return False
def assign_parent(node: astroid.node_classes.NodeNG) -> astroid.node_classes.NodeNG:
"""return the higher parent which is not an AssignName, Tuple or List node"""
while node and isinstance(node, (astroid.AssignName, astroid.Tuple, astroid.List)):
node = node.parent
return node
def overrides_a_method(class_node: astroid.ClassDef, name: str) -> bool:
"""return True if <name> is a method overridden from an ancestor"""
for ancestor in class_node.ancestors():
if name in ancestor and isinstance(ancestor[name], astroid.FunctionDef):
return True
return False
def check_messages(*messages: str) -> Callable:
"""decorator to store messages that are handled by a checker method"""
def store_messages(func):
func.checks_msgs = messages
return func
return store_messages
class IncompleteFormatString(Exception):
"""A format string ended in the middle of a format specifier."""
class UnsupportedFormatCharacter(Exception):
"""A format character in a format string is not one of the supported
format characters."""
def __init__(self, index):
Exception.__init__(self, index)
self.index = index
def parse_format_string(
format_string: str,
) -> Tuple[Set[str], int, Dict[str, str], List[str]]:
"""Parses a format string, returning a tuple of (keys, num_args), where keys
is the set of mapping keys in the format string, and num_args is the number
of arguments required by the format string. Raises
IncompleteFormatString or UnsupportedFormatCharacter if a
parse error occurs."""
keys = set()
key_types = dict()
pos_types = []
num_args = 0
def next_char(i):
i += 1
if i == len(format_string):
raise IncompleteFormatString
return (i, format_string[i])
i = 0
while i < len(format_string):
char = format_string[i]
if char == "%":
i, char = next_char(i)
# Parse the mapping key (optional).
key = None
if char == "(":
depth = 1
i, char = next_char(i)
key_start = i
while depth != 0:
if char == "(":
depth += 1
elif char == ")":
depth -= 1
i, char = next_char(i)
key_end = i - 1
key = format_string[key_start:key_end]
# Parse the conversion flags (optional).
while char in "#0- +":
i, char = next_char(i)
# Parse the minimum field width (optional).
if char == "*":
num_args += 1
i, char = next_char(i)
else:
while char in string.digits:
i, char = next_char(i)
# Parse the precision (optional).
if char == ".":
i, char = next_char(i)
if char == "*":
num_args += 1
i, char = next_char(i)
else:
while char in string.digits:
i, char = next_char(i)
# Parse the length modifier (optional).
if char in "hlL":
i, char = next_char(i)
# Parse the conversion type (mandatory).
flags = "diouxXeEfFgGcrs%a"
if char not in flags:
raise UnsupportedFormatCharacter(i)
if key:
keys.add(key)
key_types[key] = char
elif char != "%":
num_args += 1
pos_types.append(char)
i += 1
return keys, num_args, key_types, pos_types
def split_format_field_names(format_string) -> Tuple[str, Iterable[Tuple[bool, str]]]:
try:
return _string.formatter_field_name_split(format_string)
except ValueError as e:
raise IncompleteFormatString() from e
def collect_string_fields(format_string) -> Iterable[Optional[str]]:
"""Given a format string, return an iterator
of all the valid format fields. It handles nested fields
as well.
"""
formatter = string.Formatter()
try:
parseiterator = formatter.parse(format_string)
for result in parseiterator:
if all(item is None for item in result[1:]):
# not a replacement format
continue
name = result[1]
nested = result[2]
yield name
if nested:
yield from collect_string_fields(nested)
except ValueError as exc:
# Probably the format string is invalid.
if exc.args[0].startswith("cannot switch from manual"):
# On Jython, parsing a string with both manual
# and automatic positions will fail with a ValueError,
# while on CPython it will simply return the fields,
# the validation being done in the interpreter (?).
# We're just returning two mixed fields in order
# to trigger the format-combined-specification check.
yield ""
yield "1"
return
raise IncompleteFormatString(format_string) from exc
def parse_format_method_string(
format_string: str,
) -> Tuple[List[Tuple[str, List[Tuple[bool, str]]]], int, int]:
"""
Parses a PEP 3101 format string, returning a tuple of
(keyword_arguments, implicit_pos_args_cnt, explicit_pos_args),
where keyword_arguments is the set of mapping keys in the format string, implicit_pos_args_cnt
is the number of arguments required by the format string and
explicit_pos_args is the number of arguments passed with the position.
"""
keyword_arguments = []
implicit_pos_args_cnt = 0
explicit_pos_args = set()
for name in collect_string_fields(format_string):
if name and str(name).isdigit():
explicit_pos_args.add(str(name))
elif name:
keyname, fielditerator = split_format_field_names(name)
if isinstance(keyname, numbers.Number):
explicit_pos_args.add(str(keyname))
try:
keyword_arguments.append((keyname, list(fielditerator)))
except ValueError as e:
raise IncompleteFormatString() from e
else:
implicit_pos_args_cnt += 1
return keyword_arguments, implicit_pos_args_cnt, len(explicit_pos_args)
def is_attr_protected(attrname: str) -> bool:
"""return True if attribute name is protected (start with _ and some other
details), False otherwise.
"""
return (
attrname[0] == "_"
and attrname != "_"
and not (attrname.startswith("__") and attrname.endswith("__"))
)
def node_frame_class(node: astroid.node_classes.NodeNG) -> Optional[astroid.ClassDef]:
"""Return the class that is wrapping the given node
The function returns a class for a method node (or a staticmethod or a
classmethod), otherwise it returns `None`.
"""
klass = node.frame()
nodes_to_check = (
astroid.node_classes.NodeNG,
astroid.UnboundMethod,
astroid.BaseInstance,
)
while (
klass
and isinstance(klass, nodes_to_check)
and not isinstance(klass, astroid.ClassDef)
):
if klass.parent is None:
klass = None
else:
klass = klass.parent.frame()
return klass
def is_attr_private(attrname: str) -> Optional[Match[str]]:
"""Check that attribute name is private (at least two leading underscores,
at most one trailing underscore)
"""
regex = re.compile("^_{2,}.*[^_]+_?$")
return regex.match(attrname)
def get_argument_from_call(
call_node: astroid.Call, position: int = None, keyword: str = None
) -> astroid.Name:
"""Returns the specified argument from a function call.
:param astroid.Call call_node: Node representing a function call to check.
:param int position: position of the argument.
:param str keyword: the keyword of the argument.
:returns: The node representing the argument, None if the argument is not found.
:rtype: astroid.Name
:raises ValueError: if both position and keyword are None.
:raises NoSuchArgumentError: if no argument at the provided position or with
the provided keyword.
"""
if position is None and keyword is None:
raise ValueError("Must specify at least one of: position or keyword.")
if position is not None:
try:
return call_node.args[position]
except IndexError:
pass
if keyword and call_node.keywords:
for arg in call_node.keywords:
if arg.arg == keyword:
return arg.value
raise NoSuchArgumentError
def inherit_from_std_ex(node: astroid.node_classes.NodeNG) -> bool:
"""
Return true if the given class node is subclass of
exceptions.Exception.
"""
ancestors = node.ancestors() if hasattr(node, "ancestors") else []
for ancestor in itertools.chain([node], ancestors):
if (
ancestor.name in ("Exception", "BaseException")
and ancestor.root().name == EXCEPTIONS_MODULE
):
return True
return False
def error_of_type(handler: astroid.ExceptHandler, error_type) -> bool:
"""
Check if the given exception handler catches
the given error_type.
The *handler* parameter is a node, representing an ExceptHandler node.
The *error_type* can be an exception, such as AttributeError,
the name of an exception, or it can be a tuple of errors.
The function will return True if the handler catches any of the
given errors.
"""
def stringify_error(error):
if not isinstance(error, str):
return error.__name__
return error
if not isinstance(error_type, tuple):
error_type = (error_type,) # type: ignore
expected_errors = {stringify_error(error) for error in error_type} # type: ignore
if not handler.type:
return False
return handler.catch(expected_errors)
def decorated_with_property(node: astroid.FunctionDef) -> bool:
"""Detect if the given function node is decorated with a property. """
if not node.decorators:
return False
for decorator in node.decorators.nodes:
try:
if _is_property_decorator(decorator):
return True
except astroid.InferenceError:
pass
return False
def _is_property_kind(node, *kinds):
if not isinstance(node, (astroid.UnboundMethod, astroid.FunctionDef)):
return False
if node.decorators:
for decorator in node.decorators.nodes:
if isinstance(decorator, astroid.Attribute) and decorator.attrname in kinds:
return True
return False
def is_property_setter(node: astroid.FunctionDef) -> bool:
"""Check if the given node is a property setter"""
return _is_property_kind(node, "setter")
def is_property_deleter(node: astroid.FunctionDef) -> bool:
"""Check if the given node is a property deleter"""
return _is_property_kind(node, "deleter")
def is_property_setter_or_deleter(node: astroid.FunctionDef) -> bool:
"""Check if the given node is either a property setter or a deleter"""
return _is_property_kind(node, "setter", "deleter")
def _is_property_decorator(decorator: astroid.Name) -> bool:
for inferred in decorator.infer():
if isinstance(inferred, astroid.ClassDef):
if inferred.root().name == BUILTINS_NAME and inferred.name == "property":
return True
for ancestor in inferred.ancestors():
if (
ancestor.name == "property"
and ancestor.root().name == BUILTINS_NAME
):
return True
return False
def decorated_with(
func: Union[astroid.FunctionDef, astroid.BoundMethod, astroid.UnboundMethod],
qnames: Iterable[str],
) -> bool:
"""Determine if the `func` node has a decorator with the qualified name `qname`."""
decorators = func.decorators.nodes if func.decorators else []
for decorator_node in decorators:
if isinstance(decorator_node, astroid.Call):
# We only want to infer the function name
decorator_node = decorator_node.func
try:
if any(
i is not None and i.qname() in qnames or i.name in qnames
for i in decorator_node.infer()
):
return True
except astroid.InferenceError:
continue
return False
@lru_cache(maxsize=1024)
def unimplemented_abstract_methods(
node: astroid.ClassDef, is_abstract_cb: astroid.FunctionDef = None
) -> Dict[str, astroid.node_classes.NodeNG]:
"""
Get the unimplemented abstract methods for the given *node*.
A method can be considered abstract if the callback *is_abstract_cb*
returns a ``True`` value. The check defaults to verifying that
a method is decorated with abstract methods.
The function will work only for new-style classes. For old-style
classes, it will simply return an empty dictionary.
For the rest of them, it will return a dictionary of abstract method
names and their inferred objects.
"""
if is_abstract_cb is None:
is_abstract_cb = partial(decorated_with, qnames=ABC_METHODS)
visited: Dict[str, astroid.node_classes.NodeNG] = {}
try:
mro = reversed(node.mro())
except NotImplementedError:
# Old style class, it will not have a mro.
return {}
except astroid.ResolveError:
# Probably inconsistent hierarchy, don'try
# to figure this out here.
return {}
for ancestor in mro:
for obj in ancestor.values():
inferred = obj
if isinstance(obj, astroid.AssignName):
inferred = safe_infer(obj)
if not inferred:
# Might be an abstract function,
# but since we don't have enough information
# in order to take this decision, we're taking
# the *safe* decision instead.
if obj.name in visited:
del visited[obj.name]
continue
if not isinstance(inferred, astroid.FunctionDef):
if obj.name in visited:
del visited[obj.name]
if isinstance(inferred, astroid.FunctionDef):
# It's critical to use the original name,
# since after inferring, an object can be something
# else than expected, as in the case of the
# following assignment.
#
# class A:
# def keys(self): pass
# __iter__ = keys
abstract = is_abstract_cb(inferred)
if abstract:
visited[obj.name] = inferred
elif not abstract and obj.name in visited:
del visited[obj.name]
return visited
def find_try_except_wrapper_node(
node: astroid.node_classes.NodeNG,
) -> Optional[Union[astroid.ExceptHandler, astroid.TryExcept]]:
"""Return the ExceptHandler or the TryExcept node in which the node is."""
current = node
ignores = (astroid.ExceptHandler, astroid.TryExcept)
while current and not isinstance(current.parent, ignores):
current = current.parent
if current and isinstance(current.parent, ignores):
return current.parent
return None
def find_except_wrapper_node_in_scope(
node: astroid.node_classes.NodeNG,
) -> Optional[Union[astroid.ExceptHandler, astroid.TryExcept]]:
"""Return the ExceptHandler in which the node is, without going out of scope."""
current = node
while current.parent is not None:
current = current.parent
if isinstance(current, astroid.scoped_nodes.LocalsDictNodeNG):
# If we're inside a function/class definition, we don't want to keep checking
# higher ancestors for `except` clauses, because if these exist, it means our
# function/class was defined in an `except` clause, rather than the current code
# actually running in an `except` clause.
return None
if isinstance(current, astroid.ExceptHandler):
return current
return None
def is_from_fallback_block(node: astroid.node_classes.NodeNG) -> bool:
"""Check if the given node is from a fallback import block."""
context = find_try_except_wrapper_node(node)
if not context:
return False
if isinstance(context, astroid.ExceptHandler):
other_body = context.parent.body
handlers = context.parent.handlers
else:
other_body = itertools.chain.from_iterable(
handler.body for handler in context.handlers
)
handlers = context.handlers
has_fallback_imports = any(
isinstance(import_node, (astroid.ImportFrom, astroid.Import))
for import_node in other_body
)
ignores_import_error = _except_handlers_ignores_exception(handlers, ImportError)
return ignores_import_error or has_fallback_imports
def _except_handlers_ignores_exception(
handlers: astroid.ExceptHandler, exception
) -> bool:
func = partial(error_of_type, error_type=(exception,))
return any(func(handler) for handler in handlers)
def get_exception_handlers(
node: astroid.node_classes.NodeNG, exception=Exception
) -> Optional[List[astroid.ExceptHandler]]:
"""Return the collections of handlers handling the exception in arguments.
Args:
node (astroid.NodeNG): A node that is potentially wrapped in a try except.
exception (builtin.Exception or str): exception or name of the exception.
Returns:
list: the collection of handlers that are handling the exception or None.
"""
context = find_try_except_wrapper_node(node)
if isinstance(context, astroid.TryExcept):
return [
handler for handler in context.handlers if error_of_type(handler, exception)
]
return []
def is_node_inside_try_except(node: astroid.Raise) -> bool:
"""Check if the node is directly under a Try/Except statement.
(but not under an ExceptHandler!)
Args:
node (astroid.Raise): the node raising the exception.
Returns:
bool: True if the node is inside a try/except statement, False otherwise.
"""
context = find_try_except_wrapper_node(node)
return isinstance(context, astroid.TryExcept)
def node_ignores_exception(
node: astroid.node_classes.NodeNG, exception=Exception
) -> bool:
"""Check if the node is in a TryExcept which handles the given exception.
If the exception is not given, the function is going to look for bare
excepts.
"""
managing_handlers = get_exception_handlers(node, exception)
if not managing_handlers:
return False
return any(managing_handlers)
def class_is_abstract(node: astroid.ClassDef) -> bool:
"""return true if the given class node should be considered as an abstract
class
"""
# Only check for explicit metaclass=ABCMeta on this specific class
meta = node.declared_metaclass()
if meta is not None:
if meta.name == "ABCMeta" and meta.root().name in ABC_MODULES:
return True
for ancestor in node.ancestors():
if ancestor.name == "ABC" and ancestor.root().name in ABC_MODULES:
# abc.ABC inheritance
return True
for method in node.methods():
if method.parent.frame() is node:
if method.is_abstract(pass_is_abstract=False):
return True
return False
def _supports_protocol_method(value: astroid.node_classes.NodeNG, attr: str) -> bool:
try:
attributes = value.getattr(attr)
except astroid.NotFoundError:
return False
first = attributes[0]
if isinstance(first, astroid.AssignName):
if isinstance(first.parent.value, astroid.Const):
return False
return True
def is_comprehension(node: astroid.node_classes.NodeNG) -> bool:
comprehensions = (
astroid.ListComp,
astroid.SetComp,
astroid.DictComp,
astroid.GeneratorExp,
)
return isinstance(node, comprehensions)
def _supports_mapping_protocol(value: astroid.node_classes.NodeNG) -> bool:
return _supports_protocol_method(
value, GETITEM_METHOD
) and _supports_protocol_method(value, KEYS_METHOD)
def _supports_membership_test_protocol(value: astroid.node_classes.NodeNG) -> bool:
return _supports_protocol_method(value, CONTAINS_METHOD)
def _supports_iteration_protocol(value: astroid.node_classes.NodeNG) -> bool:
return _supports_protocol_method(value, ITER_METHOD) or _supports_protocol_method(
value, GETITEM_METHOD
)
def _supports_async_iteration_protocol(value: astroid.node_classes.NodeNG) -> bool:
return _supports_protocol_method(value, AITER_METHOD)
def _supports_getitem_protocol(value: astroid.node_classes.NodeNG) -> bool:
return _supports_protocol_method(value, GETITEM_METHOD)
def _supports_setitem_protocol(value: astroid.node_classes.NodeNG) -> bool:
return _supports_protocol_method(value, SETITEM_METHOD)
def _supports_delitem_protocol(value: astroid.node_classes.NodeNG) -> bool:
return _supports_protocol_method(value, DELITEM_METHOD)
def _is_abstract_class_name(name: str) -> bool:
lname = name.lower()
is_mixin = lname.endswith("mixin")
is_abstract = lname.startswith("abstract")
is_base = lname.startswith("base") or lname.endswith("base")
return is_mixin or is_abstract or is_base
def is_inside_abstract_class(node: astroid.node_classes.NodeNG) -> bool:
while node is not None:
if isinstance(node, astroid.ClassDef):
if class_is_abstract(node):
return True
name = getattr(node, "name", None)
if name is not None and _is_abstract_class_name(name):
return True
node = node.parent
return False
def _supports_protocol(
value: astroid.node_classes.NodeNG, protocol_callback: astroid.FunctionDef
) -> bool:
if isinstance(value, astroid.ClassDef):
if not has_known_bases(value):
return True
# classobj can only be iterable if it has an iterable metaclass
meta = value.metaclass()
if meta is not None:
if protocol_callback(meta):
return True
if isinstance(value, astroid.BaseInstance):
if not has_known_bases(value):
return True
if value.has_dynamic_getattr():
return True
if protocol_callback(value):
return True
if (
isinstance(value, astroid.bases.Proxy)
and isinstance(value._proxied, astroid.BaseInstance)
and has_known_bases(value._proxied)
):
value = value._proxied
return protocol_callback(value)
return False
def is_iterable(value: astroid.node_classes.NodeNG, check_async: bool = False) -> bool:
if check_async:
protocol_check = _supports_async_iteration_protocol
else:
protocol_check = _supports_iteration_protocol
return _supports_protocol(value, protocol_check)
def is_mapping(value: astroid.node_classes.NodeNG) -> bool:
return _supports_protocol(value, _supports_mapping_protocol)
def supports_membership_test(value: astroid.node_classes.NodeNG) -> bool:
supported = _supports_protocol(value, _supports_membership_test_protocol)
return supported or is_iterable(value)
def supports_getitem(
value: astroid.node_classes.NodeNG, node: astroid.node_classes.NodeNG
) -> bool:
if isinstance(value, astroid.ClassDef):
if _supports_protocol_method(value, CLASS_GETITEM_METHOD):
return True
if is_class_subscriptable_pep585_with_postponed_evaluation_enabled(value, node):
return True
return _supports_protocol(value, _supports_getitem_protocol)
def supports_setitem(value: astroid.node_classes.NodeNG, *_: Any) -> bool:
return _supports_protocol(value, _supports_setitem_protocol)
def supports_delitem(value: astroid.node_classes.NodeNG, *_: Any) -> bool:
return _supports_protocol(value, _supports_delitem_protocol)
def _get_python_type_of_node(node):
pytype = getattr(node, "pytype", None)
if callable(pytype):
return pytype()
return None
@lru_cache(maxsize=1024)
def safe_infer(
node: astroid.node_classes.NodeNG, context=None
) -> Optional[astroid.node_classes.NodeNG]:
"""Return the inferred value for the given node.
Return None if inference failed or if there is some ambiguity (more than
one node has been inferred of different types).
"""
inferred_types = set()
try:
infer_gen = node.infer(context=context)
value = next(infer_gen)
except astroid.InferenceError:
return None
if value is not astroid.Uninferable:
inferred_types.add(_get_python_type_of_node(value))
try:
for inferred in infer_gen:
inferred_type = _get_python_type_of_node(inferred)
if inferred_type not in inferred_types:
return None # If there is ambiguity on the inferred node.
except astroid.InferenceError:
return None # There is some kind of ambiguity
except StopIteration:
return value
return value if len(inferred_types) <= 1 else None
def has_known_bases(klass: astroid.ClassDef, context=None) -> bool:
"""Return true if all base classes of a class could be inferred."""
try:
return klass._all_bases_known
except AttributeError:
pass
for base in klass.bases:
result = safe_infer(base, context=context)
if (
not isinstance(result, astroid.ClassDef)
or result is klass
or not has_known_bases(result, context=context)
):
klass._all_bases_known = False
return False
klass._all_bases_known = True
return True
def is_none(node: astroid.node_classes.NodeNG) -> bool:
return (
node is None
or (isinstance(node, astroid.Const) and node.value is None)
or (isinstance(node, astroid.Name) and node.name == "None")
)
def node_type(node: astroid.node_classes.NodeNG) -> Optional[type]:
"""Return the inferred type for `node`
If there is more than one possible type, or if inferred type is Uninferable or None,
return None
"""
# check there is only one possible type for the assign node. Else we
# don't handle it for now
types = set()
try:
for var_type in node.infer():
if var_type == astroid.Uninferable or is_none(var_type):
continue
types.add(var_type)
if len(types) > 1:
return None
except astroid.InferenceError:
return None
return types.pop() if types else None
def is_registered_in_singledispatch_function(node: astroid.FunctionDef) -> bool:
"""Check if the given function node is a singledispatch function."""
singledispatch_qnames = (
"functools.singledispatch",
"singledispatch.singledispatch",
)
if not isinstance(node, astroid.FunctionDef):
return False
decorators = node.decorators.nodes if node.decorators else []
for decorator in decorators:
# func.register are function calls
if not isinstance(decorator, astroid.Call):
continue
func = decorator.func
if not isinstance(func, astroid.Attribute) or func.attrname != "register":
continue
try:
func_def = next(func.expr.infer())
except astroid.InferenceError:
continue
if isinstance(func_def, astroid.FunctionDef):
# pylint: disable=redundant-keyword-arg; some flow inference goes wrong here
return decorated_with(func_def, singledispatch_qnames)
return False
def get_node_last_lineno(node: astroid.node_classes.NodeNG) -> int:
"""
Get the last lineno of the given node. For a simple statement this will just be node.lineno,
but for a node that has child statements (e.g. a method) this will be the lineno of the last
child statement recursively.
"""
# 'finalbody' is always the last clause in a try statement, if present
if getattr(node, "finalbody", False):
return get_node_last_lineno(node.finalbody[-1])
# For if, while, and for statements 'orelse' is always the last clause.
# For try statements 'orelse' is the last in the absence of a 'finalbody'
if getattr(node, "orelse", False):
return get_node_last_lineno(node.orelse[-1])
# try statements have the 'handlers' last if there is no 'orelse' or 'finalbody'
if getattr(node, "handlers", False):
return get_node_last_lineno(node.handlers[-1])
# All compound statements have a 'body'
if getattr(node, "body", False):
return get_node_last_lineno(node.body[-1])
# Not a compound statement
return node.lineno
def is_postponed_evaluation_enabled(node: astroid.node_classes.NodeNG) -> bool:
"""Check if the postponed evaluation of annotations is enabled"""
module = node.root()
return "annotations" in module.future_imports
def is_class_subscriptable_pep585_with_postponed_evaluation_enabled(
value: astroid.ClassDef, node: astroid.node_classes.NodeNG
) -> bool:
"""Check if class is subscriptable with PEP 585 and
postponed evaluation enabled.
"""
return (
is_postponed_evaluation_enabled(node)
and value.qname() in SUBSCRIPTABLE_CLASSES_PEP585
and is_node_in_type_annotation_context(node)
)
def is_node_in_type_annotation_context(node: astroid.node_classes.NodeNG) -> bool:
"""Check if node is in type annotation context.
Check for 'AnnAssign', function 'Arguments',
or part of function return type anntation.
"""
# pylint: disable=too-many-boolean-expressions
current_node, parent_node = node, node.parent
while True:
if (
isinstance(parent_node, astroid.AnnAssign)
and parent_node.annotation == current_node
or isinstance(parent_node, astroid.Arguments)
and current_node
in (
*parent_node.annotations,
*parent_node.posonlyargs_annotations,
*parent_node.kwonlyargs_annotations,
parent_node.varargannotation,
parent_node.kwargannotation,
)
or isinstance(parent_node, astroid.FunctionDef)
and parent_node.returns == current_node
):
return True
current_node, parent_node = parent_node, parent_node.parent
if isinstance(parent_node, astroid.Module):
return False
def is_subclass_of(child: astroid.ClassDef, parent: astroid.ClassDef) -> bool:
"""
Check if first node is a subclass of second node.
:param child: Node to check for subclass.
:param parent: Node to check for superclass.
:returns: True if child is derived from parent. False otherwise.
"""
if not all(isinstance(node, astroid.ClassDef) for node in (child, parent)):
return False
for ancestor in child.ancestors():
try:
if astroid.helpers.is_subtype(ancestor, parent):
return True
except astroid.exceptions._NonDeducibleTypeHierarchy:
continue
return False
@lru_cache(maxsize=1024)
def is_overload_stub(node: astroid.node_classes.NodeNG) -> bool:
"""Check if a node if is a function stub decorated with typing.overload.
:param node: Node to check.
:returns: True if node is an overload function stub. False otherwise.
"""
decorators = getattr(node, "decorators", None)
return bool(decorators and decorated_with(node, ["typing.overload", "overload"]))
def is_protocol_class(cls: astroid.node_classes.NodeNG) -> bool:
"""Check if the given node represents a protocol class
:param cls: The node to check
:returns: True if the node is a typing protocol class, false otherwise.
"""
if not isinstance(cls, astroid.ClassDef):
return False
# Use .ancestors() since not all protocol classes can have
# their mro deduced.
return any(parent.qname() in TYPING_PROTOCOLS for parent in cls.ancestors())
def is_call_of_name(node: astroid.node_classes.NodeNG, name: str) -> bool:
"""Checks if node is a function call with the given name"""
return (
isinstance(node, astroid.Call)
and isinstance(node.func, astroid.Name)
and node.func.name == name
)
def is_test_condition(
node: astroid.node_classes.NodeNG,
parent: Optional[astroid.node_classes.NodeNG] = None,
) -> bool:
"""Returns true if the given node is being tested for truthiness"""
parent = parent or node.parent
if isinstance(parent, (astroid.While, astroid.If, astroid.IfExp, astroid.Assert)):
return node is parent.test or parent.test.parent_of(node)
if isinstance(parent, astroid.Comprehension):
return node in parent.ifs
return is_call_of_name(parent, "bool") and parent.parent_of(node)
def is_classdef_type(node: astroid.ClassDef) -> bool:
"""Test if ClassDef node is Type."""
if node.name == "type":
return True
for base in node.bases:
if isinstance(base, astroid.Name) and base.name == "type":
return True
return False
def is_attribute_typed_annotation(
node: Union[astroid.ClassDef, astroid.Instance], attr_name: str
) -> bool:
"""Test if attribute is typed annotation in current node
or any base nodes.
"""
attribute = node.locals.get(attr_name, [None])[0]
if (
attribute
and isinstance(attribute, astroid.AssignName)
and isinstance(attribute.parent, astroid.AnnAssign)
):
return True
for base in node.bases:
inferred = safe_infer(base)
if (
inferred
and isinstance(inferred, astroid.ClassDef)
and is_attribute_typed_annotation(inferred, attr_name)
):
return True
return False
def is_assign_name_annotated_with(node: astroid.AssignName, typing_name: str) -> bool:
"""Test if AssignName node has `typing_name` annotation.
Especially useful to check for `typing._SpecialForm` instances
like: `Union`, `Optional`, `Literal`, `ClassVar`, `Final`.
"""
if not isinstance(node.parent, astroid.AnnAssign):
return False
annotation = node.parent.annotation
if isinstance(annotation, astroid.Subscript):
annotation = annotation.value
if (
isinstance(annotation, astroid.Name)
and annotation.name == typing_name
or isinstance(annotation, astroid.Attribute)
and annotation.attrname == typing_name
):
return True
return False
|
ruchee/vimrc
|
vimfiles/bundle/vim-python/submodules/pylint/pylint/checkers/utils.py
|
Python
|
mit
| 51,031
|
[
"Brian"
] |
80f4f6817633c1047da2b042c80a644e9a9c18a5ed007a5786668e72bf85431f
|
# 2D example of viewing aggregates from SA using VTK
from pyamg.aggregation import standard_aggregation
from pyamg.vis import vis_coarse, vtk_writer
from pyamg.gallery import load_example
from pyamg import *
from scipy import *
# retrieve the problem
data = load_example('unit_square')
A = data['A'].tocsr()
V = data['vertices']
E2V = data['elements']
# perform smoothed aggregation
ml = smoothed_aggregation_solver(A,keep=True,max_coarse=10)
b = sin(pi*V[:,0])*sin(pi*V[:,1])
x = ml.solve(b)
# create the vtk file of aggregates
vis_coarse.vis_aggregate_groups(Verts=V, E2V=E2V,
Agg=ml.levels[0].AggOp, mesh_type='tri',
output='vtk', fname='output_aggs.vtu')
# create the vtk file for mesh and solution
vtk_writer.write_basic_mesh(Verts=V, E2V=E2V,
pdata = x,
mesh_type='tri',
fname='output_mesh.vtu')
# to use Paraview:
# start Paraview: Paraview --data=output_mesh.vtu
# apply
# under display in the object inspector:
# select wireframe representation
# select a better solid color
# open file: output_aggs.vtu
# under display in the object inspector:
# select surface with edges representation
# select a better solid color
# increase line width and point size to see these aggs (if present)
|
pombreda/pyamg
|
Examples/WorkshopCopper12/task2.3.py
|
Python
|
bsd-3-clause
| 1,357
|
[
"ParaView",
"VTK"
] |
c0990c3a306706e1416e0896a6f0696509928b65f6ca295c949783d15542d81f
|
# Copyright (C) 2008 CSC - Scientific Computing Ltd.
"""This module defines an ASE interface to VASP.
Developed on the basis of modules by Jussi Enkovaara and John
Kitchin. The path of the directory containing the pseudopotential
directories (potpaw,potpaw_GGA, potpaw_PBE, ...) should be set
by the environmental flag $VASP_PP_PATH.
The user should also set the environmental flag $VASP_SCRIPT pointing
to a python script looking something like::
import os
exitcode = os.system('vasp')
Alternatively, user can set the environmental flag $VASP_COMMAND pointing
to the command use the launch vasp e.g. 'vasp' or 'mpirun -n 16 vasp'
http://cms.mpi.univie.ac.at/vasp/
-Jonas Bjork j.bjork@liverpool.ac.uk
"""
import os
import sys
from os.path import join, isfile, islink
import numpy as np
import ase
# Parameters that can be set in INCAR. The values which are None
# are not written and default parameters of VASP are used for them.
keys = [
'prec', # Precission of calculation (Low, Normal, Accurate)
'nbands', # Number of bands
'encut', # Planewave cutoff
'enaug', # Density cutoff
'ferwe', # Fixed band occupation
'ngx', # FFT mesh for wavefunctions, x
'ngy', # FFT mesh for wavefunctions, y
'ngz', # FFT mesh for wavefunctions, z
'ngxf', # FFT mesh for charges x
'ngyf', # FFT mesh for charges y
'ngzf', # FFT mesh for charges z
'nblk', # blocking for some BLAS calls (Sec. 6.5)
'system', # name of System
'nwrite', # verbosity write-flag (how much is written)
'istart', # startjob: 0-new 1-cont 2-samecut
'icharg', # charge: 1-file 2-atom 10-const
'iniwav', # initial electr wf. : 0-lowe 1-rand
'nelm', #
'nbands', #
'nelmdl', # nr. of electronic steps
'ediff', # stopping-criterion for electronic upd.
'ediffg', # stopping-criterion for ionic upd.
'nsw', # number of steps for ionic upd.
'nfree', # number of steps per DOF when calculting Hessian using finitite differences
'ibrion', # ionic relaxation: 0-MD 1-quasi-New 2-CG
'isif', # calculate stress and what to relax
'iwavpr', # prediction of wf.: 0-non 1-charg 2-wave 3-comb
'isym', # symmetry: 0-nonsym 1-usesym
'symprec', # precession in symmetry routines
'lcorr', # Harris-correction to forces
'potim', # time-step for ion-motion (fs)
'tebeg', #
'teend', # temperature during run
'smass', # Nose mass-parameter (am)
'pomass', # mass of ions in am
'zval', # ionic valence
'rwigs', # Wigner-Seitz radii
'nelect', # total number of electrons
'nupdown', # fix spin moment to specified value
'emin', #
'emax', # energy-range for DOSCAR file
'ismear', # part. occupancies: -5 Blochl -4-tet -1-fermi 0-gaus >0 MP
'sigma', # broadening in eV -4-tet -1-fermi 0-gaus
'algo', # algorithm: Normal (Davidson) | Fast | Very_Fast (RMM-DIIS)
'ialgo', # algorithm: use only 8 (CG) or 48 (RMM-DIIS)
'lreal', # non-local projectors in real space
'ropt', # number of grid points for non-local proj in real space
'gga', # xc-type: PW PB LM or 91
'voskown', # use Vosko, Wilk, Nusair interpolation
'dipol', # center of cell for dipol
'idipol', # monopol/dipol and quadropole corrections
'ldipol', # potential correction mode
'amix', #
'bmix', # tags for mixing
'time', # special control tag
'lwave', #
'lcharg', #
'lvtot', # create WAVECAR/CHGCAR/LOCPOT
'lelf', # create ELFCAR
'lorbit', # create PROOUT
'npar', # parallelization over bands
'nsim', # evaluate NSIM bands simultaneously if using RMM-DIIS
'lscalapack', # switch off scaLAPACK
'lscalu', # switch of LU decomposition
'lasync', # overlap communcation with calculations
'addgrid', # finer grid for augmentation charge density
'lplane', # parallelisation over the FFT grid
'lpard', # evaluate partial (band and/or k-point) decomposed charge density
'iband', # bands to calculate partial charge for
'eint', # energy range to calculate partial charge for
'nbmod', # specifies mode for partial charge calculation
'kpuse', # k-point to calculate partial charge for
'lsepb', # write out partial charge of each band seperately?
'lsepk', # write out partial charge of each k-point seperately?
'ispin', # spin-polarized calculation
'magmom', # initial magnetic moments
'ispin', # spin-polarized calculation
'lhfcalc', # switch to turn on Hartree Fock calculations
'hfscreen', # attribute to change from PBE0 to HSE
'aexx', # Amount of exact/DFT exchange
'encutfock', # FFT grid in the HF related routines
'nkred', # define sub grid of q-points for HF with nkredx=nkredy=nkredz
'nkredx', # define sub grid of q-points in x direction for HF
'nkredy', # define sub grid of q-points in y direction for HF
'nkredz', # define sub grid of q-points in z direction for HF
# 'NBLOCK' and KBLOCK inner block; outer block
# 'NPACO' and APACO distance and nr. of slots for P.C.
# 'WEIMIN, EBREAK, DEPER special control tags
]
class Vasp:
def __init__(self, restart=None, **kwargs):
self.incar_parameters = {}
for key in keys:
self.incar_parameters[key] = None
self.incar_parameters['prec'] = 'Normal'
self.input_parameters = {
'xc': 'PW91', # exchange correlation potential
'setups': None, # Special setups (e.g pv, sv, ...)
'txt': '-', # Where to send information
'kpts': (1,1,1), # k-points
'gamma': False, # Option to use gamma-sampling instead
# of Monkhorst-Pack
}
self.restart = restart
if restart:
self.restart_load()
return
if self.input_parameters['xc'] not in ['PW91','LDA','PBE']:
raise ValueError(
'%s not supported for xc! use one of: PW91, LDA or PBE.' %
kwargs['xc'])
self.positions = None
self.nbands = self.incar_parameters['nbands']
self.atoms = None
self.set(**kwargs)
def set(self, **kwargs):
for key in kwargs:
if self.input_parameters.has_key(key):
self.input_parameters[key] = kwargs[key]
elif self.incar_parameters.has_key(key):
self.incar_parameters[key] = kwargs[key]
else:
raise TypeError('Parameter not defined: ' + key)
def update(self, atoms):
if (self.positions is None or
(self.positions != atoms.get_positions()).any() or
(self.incar_parameters != self.old_incar_parameters) or
(self.input_parameters != self.old_input_parameters) or
not self.converged
):
self.initialize(atoms)
self.calculate(atoms)
def initialize(self, atoms):
"""Initialize a VASP calculation
Constructs the POTCAR file. User should specify the PATH
to the pseudopotentials in VASP_PP_PATH environment variable"""
p = self.input_parameters
self.all_symbols = atoms.get_chemical_symbols()
self.natoms = len(atoms)
self.spinpol = atoms.get_initial_magnetic_moments().any()
atomtypes = atoms.get_chemical_symbols()
# Determine the number of atoms of each atomic species
# sorted after atomic species
special_setups = []
symbols = {}
if self.input_parameters['setups']:
for m in self.input_parameters['setups']:
try :
#special_setup[self.input_parameters['setups'][m]] = int(m)
special_setups.append(int(m))
except:
#print 'setup ' + m + ' is a groups setup'
continue
#print 'special_setups' , special_setups
for m,atom in enumerate(atoms):
symbol = atom.get_symbol()
if m in special_setups:
pass
else:
if not symbols.has_key(symbol):
symbols[symbol] = 1
else:
symbols[symbol] += 1
# Build the sorting list
self.sort = []
self.sort.extend(special_setups)
for symbol in symbols:
for m,atom in enumerate(atoms):
if m in special_setups:
pass
else:
if atom.get_symbol() == symbol:
self.sort.append(m)
self.resort = range(len(self.sort))
for n in range(len(self.resort)):
self.resort[self.sort[n]] = n
self.atoms_sorted = atoms[self.sort]
# Check if the necessary POTCAR files exists and
# create a list of their paths.
self.symbol_count = []
for m in special_setups:
self.symbol_count.append([atomtypes[m],1])
for m in symbols:
self.symbol_count.append([m,symbols[m]])
print 'self.symbol_count',self.symbol_count
xc = '/'
#print 'p[xc]',p['xc']
if p['xc'] == 'PW91':
xc = '_gga/'
elif p['xc'] == 'PBE':
xc = '_pbe/'
if 'VASP_PP_PATH' in os.environ:
pppaths = os.environ['VASP_PP_PATH'].split(':')
else:
pppaths = []
self.ppp_list = []
#Setting the pseudopotentials, first special setups and
# then according to symbols
for m in special_setups:
name = 'potpaw'+xc.upper() + p['setups'][str(m)] + '/POTCAR'
found = False
for path in pppaths:
filename = join(path, name)
#print 'filename', filename
if isfile(filename) or islink(filename):
found = True
self.ppp_list.append(filename)
break
elif isfile(filename + '.Z') or islink(filename + '.Z'):
found = True
self.ppp_list.append(filename+'.Z')
break
if not found:
raise RuntimeError('No pseudopotential for %s!' % symbol)
#print 'symbols', symbols
for symbol in symbols:
try:
name = 'potpaw'+xc.upper()+symbol + p['setups'][symbol]
except (TypeError, KeyError):
name = 'potpaw' + xc.upper() + symbol
name += '/POTCAR'
found = False
for path in pppaths:
filename = join(path, name)
#print 'filename', filename
if isfile(filename) or islink(filename):
found = True
self.ppp_list.append(filename)
break
elif isfile(filename + '.Z') or islink(filename + '.Z'):
found = True
self.ppp_list.append(filename+'.Z')
break
if not found:
raise RuntimeError('No pseudopotential for %s!' % symbol)
self.converged = None
self.setups_changed = None
def calculate(self, atoms):
"""Generate necessary files in the working directory.
If the directory does not exist it will be created.
"""
positions = atoms.get_positions()
from ase.io.vasp import write_vasp
write_vasp('POSCAR', self.atoms_sorted, symbol_count = self.symbol_count)
self.write_incar(atoms)
self.write_potcar()
self.write_kpoints()
self.write_sort_file()
stderr = sys.stderr
p=self.input_parameters
if p['txt'] is None:
sys.stderr = devnull
elif p['txt'] == '-':
pass
elif isinstance(p['txt'], str):
sys.stderr = open(p['txt'], 'w')
if os.environ.has_key('VASP_COMMAND'):
vasp = os.environ['VASP_COMMAND']
exitcode = os.system(vasp)
elif os.environ.has_key('VASP_SCRIPT'):
vasp = os.environ['VASP_SCRIPT']
locals={}
execfile(vasp, {}, locals)
exitcode = locals['exitcode']
else:
raise RuntimeError('Please set either VASP_COMMAND or VASP_SCRIPT environment variable')
sys.stderr = stderr
if exitcode != 0:
raise RuntimeError('Vasp exited with exit code: %d. ' % exitcode)
atoms_sorted = ase.io.read('CONTCAR', format='vasp')
p=self.incar_parameters
if p['ibrion']>-1 and p['nsw']>0:
atoms.set_positions(atoms_sorted.get_positions()[self.resort])
self.positions = atoms.get_positions()
self.energy_free, self.energy_zero = self.read_energy()
self.forces = self.read_forces(atoms)
self.dipole = self.read_dipole()
self.fermi = self.read_fermi()
self.atoms = atoms.copy()
self.nbands = self.read_nbands()
if self.spinpol:
self.magnetic_moment = self.read_magnetic_moment()
if p['lorbit']>=10 or (p['lorbit']!=None and p['rwigs']):
self.magnetic_moments = self.read_magnetic_moments(atoms)
self.old_incar_parameters = self.incar_parameters.copy()
self.old_input_parameters = self.input_parameters.copy()
self.converged = self.read_convergence()
def restart_load(self):
"""Method which is called upon restart."""
# Try to read sorting file
if os.path.isfile('ase-sort.dat'):
self.sort = []
self.resort = []
file = open('ase-sort.dat', 'r')
lines = file.readlines()
file.close()
for line in lines:
data = line.split()
self.sort.append(int(data[0]))
self.resort.append(int(data[1]))
self.atoms = ase.io.read('CONTCAR', format='vasp')[self.resort]
else:
self.atoms = ase.io.read('CONTCAR', format='vasp')
self.sort = range(len(self.atoms))
self.resort = range(len(self.atoms))
self.positions = self.atoms.get_positions()
self.read_incar()
self.read_outcar()
self.read_kpoints()
self.old_incar_parameters = self.incar_parameters.copy()
self.old_input_parameters = self.input_parameters.copy()
self.converged = self.read_convergence()
def clean(self):
"""Method which cleans up after a calculation.
The default files generated by Vasp will be deleted IF this
method is called.
"""
files = ['CHG', 'CHGCAR', 'POSCAR', 'INCAR', 'CONTCAR', 'DOSCAR',
'EIGENVAL', 'IBZKPT', 'KPOINTS', 'OSZICAR', 'OUTCAR', 'PCDAT',
'POTCAR', 'vasprun.xml', 'WAVECAR', 'XDATCAR']
for f in files:
try:
os.remove(f)
except OSError:
pass
def set_atoms(self, atoms):
self.atoms = atoms.copy()
def get_atoms(self):
atoms = self.atoms.copy()
atoms.set_calculator(self)
return atoms
def get_potential_energy(self, atoms, force_consistent=False):
self.update(atoms)
if force_consistent:
return self.energy_free
else:
return self.energy_zero
def get_forces(self, atoms):
self.update(atoms)
return self.forces
def get_stress(self, atoms):
raise NotImplementedError
def calculation_required(self,atoms, quantities):
raise NotImplementedError
def get_number_of_bands(self):
return self.nbands
def get_k_point_weights(self):
self.update(self.atoms)
return self.read_k_point_weights()
def get_number_of_spins(self):
return 1 + int(self.spinpol)
def get_eigenvalues(self, kpt=0, spin=0):
self.update(self.atoms)
return self.read_eigenvalues(kpt, spin)
def get_fermi_level(self):
return self.fermi
def get_number_of_grid_points(self):
raise NotImplementedError
def get_pseudo_density(self):
raise NotImplementedError
def get_pseudo_wavefunction(self, n=0, k=0, s=0, pad=True):
raise NotImplementedError
def get_bz_k_points(self):
raise NotImplementedError
def get_ibz_kpoints(self):
self.update(self.atoms)
return self.read_ibz_kpoints()
def get_spin_polarized(self):
if not hasattr(self, 'spinpol'):
self.spinpol = self.atoms.get_initial_magnetic_moments().any()
return self.spinpol
def get_magnetic_moment(self, atoms):
self.update(atoms)
return self.magnetic_moment
def get_magnetic_moments(self, atoms):
p=self.incar_parameters
if p['lorbit']>=10 or p['rwigs']:
self.update(atoms)
return self.magnetic_moments
else:
raise RuntimeError(
"The combination %s for lorbit with %s for rwigs not supported to calculate magnetic moments" % (p['lorbit'], p['rwigs']))
def get_dipole_moment(self, atoms):
"""Returns total dipole moment of the system."""
self.update(atoms)
return self.dipole
def get_number_of_bands(self):
return self.nbands
def get_xc_functional(self):
return self.input_parameters['xc']
def write_incar(self, atoms, **kwargs):
"""Writes the INCAR file."""
p = self.incar_parameters
incar = open('INCAR', 'w')
incar.write('INCAR created by Atomic Simulation Environment\n')
for key, val in p.items():
if val is not None:
incar.write(' '+key.upper()+' = ')
# special cases:
if key in ('dipol', 'eint'):
[incar.write('%.4f ' % x) for x in val]
elif key in ('iband', 'kpuse'):
[incar.write('%i ' % x) for x in val]
elif key == 'rwigs':
[incar.write('%.4f ' % rwigs) for rwigs in val]
if len(val) != len(self.symbol_count):
raise RuntimeError('Incorrect number of magnetic moments')
else:
if type(val)==type(bool()):
if val:
incar.write('.TRUE.')
else:
incar.write('.FALSE.')
else:
incar.write('%s' % p[key])
incar.write('\n')
if self.spinpol and not p['ispin']:
incar.write(' ispin = 2\n'.upper())
# Write out initial magnetic moments
magmom = atoms.get_initial_magnetic_moments()[self.sort]
list = [[1, magmom[0]]]
for n in range(1, len(magmom)):
if magmom[n] == magmom[n-1]:
list[-1][0] += 1
else:
list.append([1, magmom[n]])
incar.write(' magmom = '.upper())
[incar.write('%i*%.4f ' % (mom[0], mom[1])) for mom in list]
incar.write('\n')
incar.close()
def write_kpoints(self, **kwargs):
"""Writes the KPOINTS file."""
p = self.input_parameters
kpoints = open('KPOINTS', 'w')
kpoints.write('KPOINTS created by Atomic Simulation Environemnt\n')
shape=np.array(p['kpts']).shape
if len(shape)==1:
kpoints.write('0\n')
if p['gamma']:
kpoints.write('Gamma\n')
else:
kpoints.write('Monkhorst-Pack\n')
[kpoints.write('%i ' % kpt) for kpt in p['kpts']]
kpoints.write('\n0 0 0')
elif len(shape)==2:
kpoints.write('%i \n' % (len(p['kpts'])))
kpoints.write('Cartesian\n')
for n in range(len(p['kpts'])):
[kpoints.write('%f ' % kpt) for kpt in p['kpts'][n]]
if shape[1]==4:
kpoints.write('\n')
elif shape[1]==3:
kpoints.write('1.0 \n')
kpoints.close()
def write_potcar(self):
"""Writes the POTCAR file."""
import tempfile
potfile = open('POTCAR','w')
for filename in self.ppp_list:
if filename.endswith('R'):
for line in open(filename, 'r'):
potfile.write(line)
elif filename.endswith('.Z'):
file_tmp = tempfile.NamedTemporaryFile()
os.system('gunzip -c %s > %s' % (filename, file_tmp.name))
for line in file_tmp.readlines():
potfile.write(line)
file_tmp.close()
potfile.close()
def write_sort_file(self):
"""Writes a sortings file.
This file contains information about how the atoms are sorted in
the first column and how they should be resorted in the second
column. It is used for restart purposes to get sorting right
when reading in an old calculation to ASE."""
file = open('ase-sort.dat', 'w')
for n in range(len(self.sort)):
file.write('%5i %5i \n' % (self.sort[n], self.resort[n]))
# Methods for reading information from OUTCAR files:
def read_energy(self, all=None):
[energy_free, energy_zero]=[0, 0]
if all:
energy_free = []
energy_zero = []
for line in open('OUTCAR', 'r'):
# Free energy
if line.startswith(' free energy toten'):
if all:
energy_free.append(float(line.split()[-2]))
else:
energy_free = float(line.split()[-2])
# Extrapolated zero point energy
if line.startswith(' energy without entropy'):
if all:
energy_zero.append(float(line.split()[-1]))
else:
energy_zero = float(line.split()[-1])
return [energy_free, energy_zero]
def read_forces(self, atoms, all=False):
"""Method that reads forces from OUTCAR file.
If 'all' is switched on, the forces for all ionic steps
in the OUTCAR file be returned, in other case only the
forces for the last ionic configuration is returned."""
file = open('OUTCAR','r')
lines = file.readlines()
file.close()
n=0
if all:
all_forces = []
for line in lines:
if line.rfind('TOTAL-FORCE') > -1:
forces=[]
for i in range(len(atoms)):
forces.append(np.array([float(f) for f in lines[n+2+i].split()[3:6]]))
if all:
all_forces.append(np.array(forces)[self.resort])
n+=1
if all:
return np.array(all_forces)
else:
return np.array(forces)[self.resort]
def read_fermi(self):
"""Method that reads Fermi energy from OUTCAR file"""
E_f=None
for line in open('OUTCAR', 'r'):
if line.rfind('E-fermi') > -1:
E_f=float(line.split()[2])
return E_f
def read_dipole(self):
dipolemoment=np.zeros([1,3])
for line in open('OUTCAR', 'r'):
if line.rfind('dipolmoment') > -1:
dipolemoment=np.array([float(f) for f in line.split()[1:4]])
return dipolemoment
def read_magnetic_moments(self,atoms):
file = open('OUTCAR', 'r')
lines = file.readlines()
file.close
magnetic_moments=np.zeros(len(atoms))
n=0
for line in lines:
if line.rfind('magnetization (x)') > -1:
for m in range(0,len(atoms)):
magnetic_moments[m]=float(lines[n+m+4].split()[4])
n+=1
return np.array(magnetic_moments)[self.resort]
def read_magnetic_moment(self):
n=0
for line in open('OUTCAR','r'):
if line.rfind('number of electron ') > -1:
magnetic_moment=float(line.split()[-1])
n+=1
return magnetic_moment
def read_nbands(self):
for line in open('OUTCAR', 'r'):
if line.rfind('NBANDS') > -1:
return int(line.split()[-1])
def read_convergence(self):
"""Method that checks whether a calculation has converged."""
converged = None
# First check electronic convergence
for line in open('OUTCAR', 'r'):
if line.rfind('EDIFF ') > -1:
ediff = float(line.split()[2])
if line.rfind('total energy-change')>-1:
split = line.split(':')
a = float(split[1].split('(')[0])
b = float(split[1].split('(')[1][0:-2])
if [abs(a), abs(b)] < [ediff, ediff]:
converged = True
else:
converged = False
continue
# Then if ibrion > 0, check whether ionic relaxation condition been fulfilled
if self.incar_parameters['ibrion'] > 0:
ediffg = self.incar_parameters['ediffg']
if ediffg < 0:
for force in self.forces:
if np.linalg.norm(force)>=abs(ediffg):
converged = False
continue
else:
converged = True
elif self.incar_parameters['ediffg'] > 0:
raise NotImplementedError('Method not implemented for ediffg>0')
return converged
def read_ibz_kpoints(self):
lines = open('OUTCAR', 'r').readlines()
ibz_kpts = []
n = 0
i = 0
for line in lines:
if line.rfind('Following cartesian coordinates')>-1:
m = n+2
while i==0:
ibz_kpts.append([float(lines[m].split()[p]) for p in range(3)])
m += 1
if lines[m]==' \n':
i = 1
if i == 1:
continue
n += 1
ibz_kpts = np.array(ibz_kpts)
return np.array(ibz_kpts)
def read_k_point_weights(self):
file = open('IBZKPT')
lines = file.readlines()
file.close()
kpt_weights = []
for n in range(3, len(lines)):
kpt_weights.append(float(lines[n].split()[3]))
kpt_weights = np.array(kpt_weights)
kpt_weights /= np.sum(kpt_weights)
return kpt_weights
def read_eigenvalues(self, kpt=0, spin=0):
file = open('EIGENVAL', 'r')
lines = file.readlines()
file.close()
eigs = []
for n in range(8+kpt*(self.nbands+2), 8+kpt*(self.nbands+2)+self.nbands):
eigs.append(float(lines[n].split()[spin+1]))
return np.array(eigs)
# The below functions are used to restart a calculation and are under early constructions
def read_incar(self, filename='INCAR'):
file=open(filename, 'r')
file.readline()
lines=file.readlines()
for line in lines:
try:
key = line.split()[0].lower()
if key in ['ispin', 'magmom']:
continue
self.incar_parameters[key]
if key=='dipol':
dipol=[]
for n in range(3):
dipol.append(float(line.split()[n+2]))
self.incar_parameters[key] = dipol
else:
try:
self.incar_parameters[key] = int(line.split()[2])
except ValueError:
try:
self.incar_parameters[key] = float(line.split()[2])
except ValueError:
self.incar_parameters[key] = line.split()[2]
except KeyError:
continue
except IndexError:
continue
def read_outcar(self):
# Spin polarized calculation?
file = open('OUTCAR', 'r')
lines = file.readlines()
file.close()
for line in lines:
if line.rfind('ISPIN') > -1:
if int(line.split()[2])==2:
self.spinpol = True
else:
self.spinpol = None
self.energy_free, self.energy_zero = self.read_energy()
self.forces = self.read_forces(self.atoms)
self.dipole = self.read_dipole()
self.fermi = self.read_fermi()
self.nbands = self.read_nbands()
p=self.incar_parameters
if self.spinpol:
self.magnetic_moment = self.read_magnetic_moment()
if p['lorbit']>=10 or (p['lorbit']!=None and p['rwigs']):
self.magnetic_moments = self.read_magnetic_moments(self.atoms)
self.set(nbands=self.nbands)
def read_kpoints(self, filename='KPOINTS'):
file = open(filename, 'r')
lines = file.readlines()
file.close()
type = lines[2].split()[0].lower()[0]
if type in ['g', 'm']:
if type=='g':
self.set(gamma=True)
kpts = np.array([int(lines[3].split()[i]) for i in range(3)])
self.set(kpts=kpts)
elif type in ['c', 'k']:
raise NotImplementedError('Only Monkhorst-Pack and gamma centered grid supported for restart.')
else:
raise NotImplementedError('Only Monkhorst-Pack and gamma centered grid supported for restart.')
class VaspChargeDensity(object):
"""Class for representing VASP charge density"""
def __init__(self, filename='CHG'):
# Instance variables
self.atoms = [] # List of Atoms objects
self.chg = [] # Charge density
self.chgdiff = [] # Charge density difference, if spin polarized
self.aug = '' # Augmentation charges, not parsed just a big string
self.augdiff = '' # Augmentation charge differece, is spin polarized
# Note that the augmentation charge is not a list, since they
# are needed only for CHGCAR files which store only a single
# image.
if filename != None:
self.read(filename)
def is_spin_polarized(self):
if len(self.chgdiff) > 0:
return True
return False
def _read_chg(self, fobj, chg, volume):
"""Read charge from file object
Utility method for reading the actual charge density (or
charge density difference) from a file object. On input, the
file object must be at the beginning of the charge block, on
output the file position will be left at the end of the
block. The chg array must be of the correct dimensions.
"""
# VASP writes charge density as
# WRITE(IU,FORM) (((C(NX,NY,NZ),NX=1,NGXC),NY=1,NGYZ),NZ=1,NGZC)
# Fortran nested implied do loops; innermost index fastest
# First, just read it in
for zz in range(chg.shape[2]):
for yy in range(chg.shape[1]):
chg[:, yy, zz] = np.fromfile(fobj, count = chg.shape[0],
sep=' ')
chg /= volume
def read(self, filename='CHG'):
"""Read CHG or CHGCAR file.
If CHG contains charge density from multiple steps all the
steps are read and stored in the object. By default VASP
writes out the charge density every 10 steps.
chgdiff is the difference between the spin up charge density
and the spin up charge density and is thus only read for a
spin-polarized calculation.
aug is the PAW augmentation charges found in CHGCAR. These are
not parsed, they are just stored as a string so that they can
be written again to a CHGCAR format file.
"""
import ase.io.vasp as aiv
f = open(filename)
self.atoms = []
self.chg = []
self.chgdiff = []
self.aug = ''
self.augdiff = ''
while True:
try:
atoms = aiv.read_vasp(f)
except ValueError, e:
# Probably an empty line, or we tried to read the
# augmentation occupancies in CHGCAR
break
f.readline()
ngr = f.readline().split()
ng = (int(ngr[0]), int(ngr[1]), int(ngr[2]))
chg = np.empty(ng)
self._read_chg(f, chg, atoms.get_volume())
self.chg.append(chg)
self.atoms.append(atoms)
# Check if the file has a spin-polarized charge density part, and
# if so, read it in.
fl = f.tell()
# First check if the file has an augmentation charge part (CHGCAR file.)
line1 = f.readline()
if line1=='':
break
elif line1.find('augmentation') != -1:
augs = [line1]
while True:
line2 = f.readline()
if line2.split() == ngr:
self.aug = ''.join(augs)
augs = []
chgdiff = np.empty(ng)
self._read_chg(f, chgdiff, atoms.get_volume())
self.chgdiff.append(chgdiff)
elif line2 == '':
break
else:
augs.append(line2)
if len(self.aug) == 0:
self.aug = ''.join(augs)
augs = []
else:
self.augdiff = ''.join(augs)
augs = []
elif line1.split() == ngr:
chgdiff = np.empty(ng)
self._read_chg(f, chgdiff, atoms.get_volume())
self.chgdiff.append(chgdiff)
else:
f.seek(fl)
f.close()
def _write_chg(self, fobj, chg, volume, format='chg'):
"""Write charge density
Utility function similar to _read_chg but for writing.
"""
# Make a 1D copy of chg, must take transpose to get ordering right
chgtmp=chg.T.ravel()
# Multiply by volume
chgtmp=chgtmp*volume
# Must be a tuple to pass to string conversion
chgtmp=tuple(chgtmp)
# CHG format - 10 columns
if format.lower() == 'chg':
# Write all but the last row
for ii in range((len(chgtmp)-1)/10):
fobj.write(' %#11.5G %#11.5G %#11.5G %#11.5G %#11.5G\
%#11.5G %#11.5G %#11.5G %#11.5G %#11.5G\n' % chgtmp[ii*10:(ii+1)*10]
)
# If the last row contains 10 values then write them without a newline
if len(chgtmp)%10==0:
fobj.write(' %#11.5G %#11.5G %#11.5G %#11.5G %#11.5G\
%#11.5G %#11.5G %#11.5G %#11.5G %#11.5G' % chgtmp[len(chgtmp)-10:len(chgtmp)])
# Otherwise write fewer columns without a newline
else:
for ii in range(len(chgtmp)%10):
fobj.write((' %#11.5G') % chgtmp[len(chgtmp)-len(chgtmp)%10+ii])
# Other formats - 5 columns
else:
# Write all but the last row
for ii in range((len(chgtmp)-1)/5):
fobj.write(' %17.10E %17.10E %17.10E %17.10E %17.10E\n' % chgtmp[ii*5:(ii+1)*5])
# If the last row contains 5 values then write them without a newline
if len(chgtmp)%5==0:
fobj.write(' %17.10E %17.10E %17.10E %17.10E %17.10E' % chgtmp[len(chgtmp)-5:len(chgtmp)])
# Otherwise write fewer columns without a newline
else:
for ii in range(len(chgtmp)%5):
fobj.write((' %17.10E') % chgtmp[len(chgtmp)-len(chgtmp)%5+ii])
# Write a newline whatever format it is
fobj.write('\n')
# Clean up
del chgtmp
def write(self, filename='CHG', format=None):
"""Write VASP charge density in CHG format.
filename: str
Name of file to write to.
format: str
String specifying whether to write in CHGCAR or CHG
format.
"""
import ase.io.vasp as aiv
if format == None:
if filename.lower().find('chgcar') != -1:
format = 'chgcar'
elif filename.lower().find('chg') != -1:
format = 'chg'
elif len(self.chg) == 1:
format = 'chgcar'
else:
format = 'chg'
f = open(filename, 'w')
for ii, chg in enumerate(self.chg):
if format == 'chgcar' and ii != len(self.chg) - 1:
continue # Write only the last image for CHGCAR
aiv.write_vasp(f, self.atoms[ii], direct=True)
f.write('\n')
for dim in chg.shape:
f.write(' %4i' % dim)
f.write('\n')
vol = self.atoms[ii].get_volume()
self._write_chg(f, chg, vol, format)
if format == 'chgcar':
f.write(self.aug)
if self.is_spin_polarized():
if format == 'chg':
f.write('\n')
for dim in chg.shape:
f.write(' %4i' % dim)
self._write_chg(f, self.chgdiff[ii], vol, format)
if format == 'chgcar':
f.write('\n')
f.write(self.augdiff)
if format == 'chg' and len(self.chg) > 1:
f.write('\n')
f.close()
class VaspDos(object):
"""Class for representing density-of-states produced by VASP
The energies are in property self.energy
Site-projected DOS is accesible via the self.site_dos method.
Total and integrated DOS is accessible as numpy.ndarray's in the
properties self.dos and self.integrated_dos. If the calculation is
spin polarized, the arrays will be of shape (2, NDOS), else (1,
NDOS).
The self.efermi property contains the currently set Fermi
level. Changing this value shifts the energies.
"""
def __init__(self, doscar='DOSCAR', efermi=0.0):
"""Initialize"""
self._efermi = 0.0
self.read_doscar(doscar)
self.efermi = efermi
def _set_efermi(self, efermi):
"""Set the Fermi level."""
ef = efermi - self._efermi
self._efermi = efermi
self._total_dos[0, :] = self._total_dos[0, :] - ef
try:
self._site_dos[:, 0, :] = self._site_dos[:, 0, :] - ef
except IndexError:
pass
def _get_efermi(self):
return self._efermi
efermi = property(_get_efermi, _set_efermi, None, "Fermi energy.")
def _get_energy(self):
"""Return the array with the energies."""
return self._total_dos[0, :]
energy = property(_get_energy, None, None, "Array of energies")
def site_dos(self, atom, orbital):
"""Return an NDOSx1 array with dos for the chosen atom and orbital.
If spin-unpolarized calculation, no phase factors:
s = 1, p = 2, d = 3
Spin-polarized, no phase factors:
s-up = 1, s-down = 2, p-up = 3, p-down = 4, d-up = 5, d-down = 6
If phase factors have been calculated, orbitals are
s, py, pz, px, dxy, dyz, dz2, dxz, dx2
double in the above fashion if spin polarized.
"""
return self._site_dos[atom, orbital, :]
def _get_dos(self):
if self._total_dos.shape[0] == 3:
return self._total_dos[1, :]
elif self._total_dos.shape[0] == 5:
return self._total_dos[1:3, :]
dos = property(_get_dos, None, None, 'Average DOS in cell')
def _get_integrated_dos(self):
if self._total_dos.shape[0] == 3:
return self._total_dos[2, :]
elif self._total_dos.shape[0] == 5:
return self._total_dos[3:5, :]
integrated_dos = property(_get_integrated_dos, None, None,
'Integrated average DOS in cell')
def read_doscar(self, fname="DOSCAR"):
"""Read a VASP DOSCAR file"""
f = open(fname)
natoms = int(f.readline().split()[0])
[f.readline() for nn in range(4)] # Skip next 4 lines.
# First we have a block with total and total integrated DOS
ndos = int(f.readline().split()[2])
dos = []
for nd in xrange(ndos):
dos.append(np.array([float(x) for x in f.readline().split()]))
self._total_dos = np.array(dos).T
# Next we have one block per atom, if INCAR contains the stuff
# necessary for generating site-projected DOS
dos = []
for na in xrange(natoms):
line = f.readline()
if line == '':
# No site-projected DOS
break
ndos = int(line.split()[2])
line = f.readline().split()
cdos = np.empty((ndos, len(line)))
cdos[0] = np.array(line)
for nd in xrange(1, ndos):
line = f.readline().split()
cdos[nd] = np.array([float(x) for x in line])
dos.append(cdos.T)
self._site_dos = np.array(dos)
import pickle
class xdat2traj:
def __init__(self, trajectory=None, atoms=None, poscar=None,
xdatcar=None, sort=None, calc=None):
if not poscar:
self.poscar = 'POSCAR'
else:
self.poscar = poscar
if not atoms:
self.atoms = ase.io.read(self.poscar, format='vasp')
else:
self.atoms = atoms
if not xdatcar:
self.xdatcar = 'XDATCAR'
else:
self.xdatcar = xdatcar
if not trajectory:
self.trajectory = 'out.traj'
else:
self.trajectory = trajectory
if not calc:
self.calc = Vasp()
else:
self.calc = calc
if not sort:
if not hasattr(self.calc, 'sort'):
self.calc.sort = range(len(self.atoms))
else:
self.calc.sort = sort
self.calc.resort = range(len(self.calc.sort))
for n in range(len(self.calc.resort)):
self.calc.resort[self.calc.sort[n]] = n
self.out = ase.io.trajectory.PickleTrajectory(self.trajectory, mode='w')
self.energies = self.calc.read_energy(all=True)[1]
self.forces = self.calc.read_forces(self.atoms, all=True)
def convert(self):
lines = open(self.xdatcar).readlines()
del(lines[0:6])
step = 0
iatom = 0
scaled_pos = []
for line in lines:
if iatom == len(self.atoms):
if step == 0:
self.out.write_header(self.atoms[self.calc.resort])
scaled_pos = np.array(scaled_pos)
self.atoms.set_scaled_positions(scaled_pos)
d = {'positions': self.atoms.get_positions()[self.calc.resort],
'cell': self.atoms.get_cell(),
'momenta': None,
'energy': self.energies[step],
'forces': self.forces[step],
'stress': None}
pickle.dump(d, self.out.fd, protocol=-1)
scaled_pos = []
iatom = 0
step += 1
else:
iatom += 1
scaled_pos.append([float(line.split()[n]) for n in range(3)])
# Write also the last image
# I'm sure there is also more clever fix...
scaled_pos = np.array(scaled_pos)
self.atoms.set_scaled_positions(scaled_pos)
d = {'positions': self.atoms.get_positions()[self.calc.resort],
'cell': self.atoms.get_cell(),
'momenta': None,
'energy': self.energies[step],
'forces': self.forces[step],
'stress': None}
pickle.dump(d, self.out.fd, protocol=-1)
self.out.fd.close()
|
freephys/python_ase
|
ase/calculators/vasp.py
|
Python
|
gpl-3.0
| 44,651
|
[
"ASE",
"VASP"
] |
8ec1e5cc4fe7ad5b7947d5b2ceacb8403764ce373edd0db32a06489b11ddaed6
|
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright 2014 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
import math
from neon.backends.cpu import CPU
from neon.params.val_init import (UniformValGen, AutoUniformValGen,
GaussianValGen, NormalValGen,
SparseEigenValGen, NodeNormalizedValGen)
class TestValInit(object):
def __init__(self):
# this code gets called prior to each test
self.be = CPU()
def test_uni_basics(self):
uni = UniformValGen(backend=self.be)
assert str(uni) == ("UniformValGen utilizing CPU backend\n\t"
"low: 0.0, high: 1.0")
def test_uni_gen(self):
uni = UniformValGen(backend=self.be)
res = uni.generate(shape=[1, 1])
assert res.shape == (1, 1)
out = self.be.empty((1, 1))
self.be.min(res, axes=None, out=out)
assert out.asnumpyarray() >= 0.0
self.be.max(res, axes=None, out=out)
assert out.asnumpyarray() < 1.0
def test_uni_params(self):
low = -5.5
high = 10.2
uni = UniformValGen(backend=self.be, low=low, high=high)
assert str(uni) == ("UniformValGen utilizing CPU backend\n\t"
"low: {low}, high: {high}".format(low=low,
high=high))
res = uni.generate(shape=[4, 4])
assert res.shape == (4, 4)
out = self.be.empty((1, 1))
self.be.min(res, axes=None, out=out)
assert out.asnumpyarray() >= low
self.be.max(res, axes=None, out=out)
assert out.asnumpyarray() < high
def test_autouni_gen(self):
autouni = AutoUniformValGen(backend=self.be, relu=True)
assert autouni.relu is True
assert str(autouni) == ("AutoUniformValGen utilizing CPU backend\n\t"
"low: nan, high: nan")
res = autouni.generate([3, 3])
assert res.shape == (3, 3)
out = self.be.empty((1, 1))
self.be.min(res, axes=None, out=out)
expected_val = math.sqrt(2) * (1.0 / math.sqrt(3))
assert out.asnumpyarray() >= - expected_val
self.be.max(res, axes=None, out=out)
assert out.asnumpyarray() < expected_val
def test_gaussian_gen(self):
loc = 5
scale = 2.0
gauss = GaussianValGen(backend=self.be, loc=loc, scale=scale)
assert str(gauss) == ("GaussianValGen utilizing CPU backend\n\t"
"loc: {}, scale: {}".format(loc, scale))
res = gauss.generate([5, 10])
assert res.shape == (5, 10)
# TODO: test distribution of vals to ensure ~gaussian dist
def test_normal_gen(self):
loc = -2.5
scale = 3.0
gauss = NormalValGen(backend=self.be, loc=loc, scale=scale)
assert str(gauss) == ("GaussianValGen utilizing CPU backend\n\t"
"loc: {}, scale: {}".format(loc, scale))
res = gauss.generate([9, 3])
assert res.shape == (9, 3)
# TODO: test distribution of vals to ensure ~gaussian dist
def test_sparseeig_gen(self):
sparseness = 10
eigenvalue = 3.1
eig = SparseEigenValGen(backend=self.be, sparseness=sparseness,
eigenvalue=eigenvalue)
assert str(eig) == ("SparseEigenValGen utilizing CPU backend\n\t"
"sparseness: {}, eigenvalue: "
"{}".format(sparseness, eigenvalue))
res = eig.generate([20, 20])
assert res.shape == (20, 20)
# TODO: test distribution of vals
def test_nodenorm_gen(self):
scale = 3.0
nodenorm = NodeNormalizedValGen(backend=self.be, scale=scale)
assert str(nodenorm) == ("NodeNormalizedValGen utilizing CPU backend"
"\n\tscale: {}".format(scale))
res = nodenorm.generate([8, 9])
assert res.shape == (8, 9)
out = self.be.empty((1, 1))
self.be.min(res, axes=None, out=out)
expected_val = scale * math.sqrt(6) / math.sqrt(8 + 9.)
assert out.asnumpyarray() >= - expected_val
self.be.max(res, axes=None, out=out)
assert out.asnumpyarray() < expected_val
|
kfoss/neon
|
neon/params/tests/test_val_init.py
|
Python
|
apache-2.0
| 4,967
|
[
"Gaussian"
] |
fcb0de445b46233cdb6c9e046468b67218a95548792a1bf5e546e9991a5d0331
|
""" Client for the SandboxStore.
Will connect to the WorkloadManagement/SandboxStore service.
"""
import os
import tarfile
import hashlib
import tempfile
import re
from io import BytesIO, StringIO
from DIRAC import gLogger, S_OK, S_ERROR, gConfig
from DIRAC.Core.Tornado.Client.ClientSelector import TransferClientSelector as TransferClient
from DIRAC.Core.Base.Client import Client
from DIRAC.Core.Utilities.File import mkDir
from DIRAC.Resources.Storage.StorageElement import StorageElement
from DIRAC.Core.Utilities.ReturnValues import returnSingleResult
from DIRAC.Core.Utilities.File import getGlobbedTotalSize
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getVOForGroup
class SandboxStoreClient:
__validSandboxTypes = ("Input", "Output")
__smdb = None
def __init__(self, rpcClient=None, transferClient=None, smdb=False, **kwargs):
"""Constructor
:param object rpcClient: SandboxStore service client (None by default)
:param object transferClient: client to upload/download sandboxes (None by default)
:param object smdb: SandboxMetadataDB object, or
True if SandboxMetadataDB is to be instantiated for direct access or
False if no direct access to the SandboxMetadataDB is done (default)
"""
self.__serviceName = "WorkloadManagement/SandboxStore"
self.__rpcClient = rpcClient
self.__transferClient = transferClient
self.__kwargs = kwargs
self.__vo = None
SandboxStoreClient.__smdb = smdb
if "delegatedGroup" in kwargs:
self.__vo = getVOForGroup(kwargs["delegatedGroup"])
if SandboxStoreClient.__smdb is True:
try:
from DIRAC.WorkloadManagementSystem.DB.SandboxMetadataDB import SandboxMetadataDB
SandboxStoreClient.__smdb = SandboxMetadataDB()
result = SandboxStoreClient.__smdb._getConnection() # pylint: disable=protected-access
if not result["OK"]:
SandboxStoreClient.__smdb = False
else:
result["Value"].close()
except (ImportError, RuntimeError, AttributeError):
SandboxStoreClient.__smdb = False
def __getRPCClient(self):
"""Get an RPC client for SB service"""
if self.__rpcClient:
return self.__rpcClient
else:
return Client(url=self.__serviceName, **self.__kwargs)
def __getTransferClient(self):
"""Get RPC client for TransferClient"""
if self.__transferClient:
return self.__transferClient
else:
return TransferClient(self.__serviceName, **self.__kwargs)
# Upload sandbox to jobs and pilots
def uploadFilesAsSandboxForJob(self, fileList, jobId, sbType, sizeLimit=0):
"""Upload SB for a job"""
if sbType not in self.__validSandboxTypes:
return S_ERROR("Invalid Sandbox type %s" % sbType)
return self.uploadFilesAsSandbox(fileList, sizeLimit, assignTo={"Job:%s" % jobId: sbType})
def uploadFilesAsSandboxForPilot(self, fileList, jobId, sbType, sizeLimit=0):
"""Upload SB for a pilot"""
if sbType not in self.__validSandboxTypes:
return S_ERROR("Invalid Sandbox type %s" % sbType)
return self.uploadFilesAsSandbox(fileList, sizeLimit, assignTo={"Pilot:%s" % jobId: sbType})
# Upload generic sandbox
def uploadFilesAsSandbox(self, fileList, sizeLimit=0, assignTo=None):
"""Send files in the fileList to a Sandbox service for the given jobID.
This is the preferable method to upload sandboxes.
a fileList item can be:
- a string, which is an lfn name
- a file name (real), that is supposed to be on disk, in the current directory
- a fileObject that should be a BytesIO type of object
Parameters:
- assignTo : Dict containing { 'Job:<jobid>' : '<sbType>', ... }
"""
errorFiles = []
files2Upload = []
if assignTo is None:
assignTo = {}
for key in assignTo:
if assignTo[key] not in self.__validSandboxTypes:
return S_ERROR("Invalid sandbox type %s" % assignTo[key])
if not isinstance(fileList, (list, tuple)):
return S_ERROR("fileList must be a list or tuple!")
for sFile in fileList:
if isinstance(sFile, str):
if re.search("^lfn:", sFile, flags=re.IGNORECASE):
pass
else:
if os.path.exists(sFile):
files2Upload.append(sFile)
else:
errorFiles.append(sFile)
elif isinstance(sFile, StringIO):
files2Upload.append(sFile)
else:
return S_ERROR("Objects of type %s can't be part of InputSandbox" % type(sFile))
if errorFiles:
return S_ERROR("Failed to locate files: %s" % ", ".join(errorFiles))
try:
fd, tmpFilePath = tempfile.mkstemp(prefix="LDSB.")
os.close(fd)
except Exception as e:
return S_ERROR("Cannot create temporary file: %s" % repr(e))
with tarfile.open(name=tmpFilePath, mode="w|bz2") as tf:
for sFile in files2Upload:
if isinstance(sFile, str):
tf.add(os.path.realpath(sFile), os.path.basename(sFile), recursive=True)
elif isinstance(sFile, StringIO):
tarInfo = tarfile.TarInfo(name="jobDescription.xml")
value = sFile.getvalue().encode()
tarInfo.size = len(value)
tf.addfile(tarinfo=tarInfo, fileobj=BytesIO(value))
else:
return S_ERROR("Unknown type to upload: %s" % repr(sFile))
if sizeLimit > 0:
# Evaluate the compressed size of the sandbox
if getGlobbedTotalSize(tmpFilePath) > sizeLimit:
result = S_ERROR("Size over the limit")
result["SandboxFileName"] = tmpFilePath
return result
oMD5 = hashlib.md5()
with open(tmpFilePath, "rb") as fd:
bData = fd.read(10240)
while bData:
oMD5.update(bData)
bData = fd.read(10240)
transferClient = self.__getTransferClient()
result = transferClient.sendFile(tmpFilePath, ["%s.tar.bz2" % oMD5.hexdigest(), assignTo])
result["SandboxFileName"] = tmpFilePath
try:
if result["OK"]:
os.unlink(tmpFilePath)
except OSError:
pass
return result
##############
# Download sandbox
def downloadSandbox(self, sbLocation, destinationDir="", inMemory=False, unpack=True):
"""
Download a sandbox file and keep it in bundled form
"""
if sbLocation.find("SB:") != 0:
return S_ERROR("Invalid sandbox URL")
sbLocation = sbLocation[3:]
sbSplit = sbLocation.split("|")
if len(sbSplit) < 2:
return S_ERROR("Invalid sandbox URL")
seName = sbSplit[0]
sePFN = "|".join(sbSplit[1:])
try:
tmpSBDir = tempfile.mkdtemp(prefix="TMSB.")
except IOError as e:
return S_ERROR("Cannot create temporary file: %s" % repr(e))
se = StorageElement(seName, vo=self.__vo)
result = returnSingleResult(se.getFile(sePFN, localPath=tmpSBDir))
if not result["OK"]:
return result
sbFileName = os.path.basename(sePFN)
result = S_OK()
tarFileName = os.path.join(tmpSBDir, sbFileName)
if inMemory:
try:
with open(tarFileName, "rb") as tfile:
data = tfile.read()
except IOError as e:
return S_ERROR("Failed to read the sandbox archive: %s" % repr(e))
finally:
os.unlink(tarFileName)
os.rmdir(tmpSBDir)
return S_OK(data)
# If destination dir is not specified use current working dir
# If its defined ensure the dir structure is there
if not destinationDir:
destinationDir = os.getcwd()
else:
mkDir(destinationDir)
if not unpack:
result["Value"] = tarFileName
return result
try:
sandboxSize = 0
with tarfile.open(name=tarFileName, mode="r") as tf:
for tarinfo in tf:
tf.extract(tarinfo, path=destinationDir)
sandboxSize += tarinfo.size
# FIXME: here we return the size, but otherwise we always return the location: inconsistent
# FIXME: looks like this size is used by the JobWrapper
result["Value"] = sandboxSize
except IOError as e:
result = S_ERROR("Could not open bundle: %s" % repr(e))
try:
os.unlink(tarFileName)
os.rmdir(tmpSBDir)
except OSError as e:
gLogger.warn("Could not remove temporary dir %s: %s" % (tmpSBDir, repr(e)))
return result
##############
# Jobs
def getSandboxesForJob(self, jobId):
"""Download job sandbox"""
return self.__getSandboxesForEntity("Job:%s" % jobId)
def assignSandboxesToJob(self, jobId, sbList, ownerName="", ownerGroup="", eSetup=""):
"""Assign SB to a job"""
return self.__assignSandboxesToEntity("Job:%s" % jobId, sbList, ownerName, ownerGroup, eSetup)
def assignSandboxToJob(self, jobId, sbLocation, sbType, ownerName="", ownerGroup="", eSetup=""):
"""Assign SB to a job"""
return self.__assignSandboxToEntity("Job:%s" % jobId, sbLocation, sbType, ownerName, ownerGroup, eSetup)
def unassignJobs(self, jobIdList):
"""Unassign SB to a job"""
if isinstance(jobIdList, int):
jobIdList = [jobIdList]
entitiesList = []
for jobId in jobIdList:
entitiesList.append("Job:%s" % jobId)
return self.__unassignEntities(entitiesList)
def downloadSandboxForJob(self, jobId, sbType, destinationPath="", inMemory=False, unpack=True):
"""Download SB for a job"""
result = self.__getSandboxesForEntity("Job:%s" % jobId)
if not result["OK"]:
return result
sbDict = result["Value"]
if sbType not in sbDict:
return S_ERROR(
"No %s sandbox found for job %s. " % (sbType, jobId)
+ "Possible causes are: the job does not exist, no sandbox was "
"registered or you do not have permission to access it."
)
# If inMemory, ensure we return the newest sandbox only
if inMemory:
sbLocation = sbDict[sbType][-1]
return self.downloadSandbox(sbLocation, destinationPath, inMemory, unpack)
downloadedSandboxesLoc = []
for sbLocation in sbDict[sbType]:
result = self.downloadSandbox(sbLocation, destinationPath, inMemory, unpack)
if not result["OK"]:
return result
downloadedSandboxesLoc.append(result["Value"])
return S_OK(downloadedSandboxesLoc)
##############
# Pilots
def getSandboxesForPilot(self, pilotId):
"""Get SB for a pilot"""
return self.__getSandboxesForEntity("Pilot:%s" % pilotId)
def assignSandboxesToPilot(self, pilotId, sbList, ownerName="", ownerGroup="", eSetup=""):
"""Assign SB to a pilot"""
return self.__assignSandboxesToEntity("Pilot:%s" % pilotId, sbList, ownerName, ownerGroup, eSetup)
def assignSandboxToPilot(self, pilotId, sbLocation, sbType, ownerName="", ownerGroup="", eSetup=""):
"""Assign SB to a pilot"""
return self.__assignSandboxToEntity("Pilot:%s" % pilotId, sbLocation, sbType, ownerName, ownerGroup, eSetup)
def unassignPilots(self, pilotIdIdList):
"""Unassign SB to a pilot"""
if isinstance(pilotIdIdList, int):
pilotIdIdList = [pilotIdIdList]
entitiesList = []
for pilotId in pilotIdIdList:
entitiesList.append("Pilot:%s" % pilotId)
return self.__unassignEntities(entitiesList)
def downloadSandboxForPilot(self, jobId, sbType, destinationPath=""):
"""Download SB for a pilot"""
result = self.__getSandboxesForEntity("Pilot:%s" % jobId)
if not result["OK"]:
return result
sbDict = result["Value"]
if sbType not in sbDict:
return S_ERROR("No %s sandbox registered for pilot %s" % (sbType, jobId))
downloadedSandboxesLoc = []
for sbLocation in sbDict[sbType]:
result = self.downloadSandbox(sbLocation, destinationPath)
if not result["OK"]:
return result
downloadedSandboxesLoc.append(result["Value"])
return S_OK(downloadedSandboxesLoc)
##############
# Entities
def __getSandboxesForEntity(self, eId):
"""
Get the sandboxes assigned to jobs and the relation type
"""
rpcClient = self.__getRPCClient()
return rpcClient.getSandboxesAssignedToEntity(eId)
def __assignSandboxesToEntity(self, eId, sbList, ownerName="", ownerGroup="", eSetup=""):
"""
Assign sandboxes to a job.
sbList must be a list of sandboxes and relation types
sbList = [ ( "SB:SEName|SEPFN", "Input" ), ( "SB:SEName|SEPFN", "Output" ) ]
"""
for sbT in sbList:
if sbT[1] not in self.__validSandboxTypes:
return S_ERROR("Invalid Sandbox type %s" % sbT[1])
if SandboxStoreClient.__smdb and ownerName and ownerGroup:
if not eSetup:
eSetup = gConfig.getValue("/DIRAC/Setup", "Production")
return SandboxStoreClient.__smdb.assignSandboxesToEntities({eId: sbList}, ownerName, ownerGroup, eSetup)
rpcClient = self.__getRPCClient()
return rpcClient.assignSandboxesToEntities({eId: sbList}, ownerName, ownerGroup, eSetup)
def __assignSandboxToEntity(self, eId, sbLocation, sbType, ownerName="", ownerGroup="", eSetup=""):
"""
Assign a sandbox to a job
sbLocation is "SEName:SEPFN"
sbType is Input or Output
"""
return self.__assignSandboxesToEntity(eId, [(sbLocation, sbType)], ownerName, ownerGroup, eSetup)
def __unassignEntities(self, eIdList):
"""
Unassign a list of jobs of their respective sandboxes
"""
rpcClient = self.__getRPCClient()
return rpcClient.unassignEntities(eIdList)
|
DIRACGrid/DIRAC
|
src/DIRAC/WorkloadManagementSystem/Client/SandboxStoreClient.py
|
Python
|
gpl-3.0
| 14,808
|
[
"DIRAC"
] |
62374a8f02260f1cd5c69faf718f352655494418f4c30bd5b27f75f303a7b00b
|
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""WebUpload web interface"""
__revision__ = "$Id$"
__lastupdated__ = """$Date$"""
from invenio.webinterface_handler_wsgi_utils import Field
from invenio.access_control_engine import acc_authorize_action
from invenio.config import CFG_SITE_URL
from invenio.urlutils import redirect_to_url
from invenio.messages import gettext_set_language
from invenio.webinterface_handler import wash_urlargd, WebInterfaceDirectory
from invenio.webuser import getUid, page_not_authorized, collect_user_info
from invenio.webpage import page
from invenio.batchuploader_engine import metadata_upload, cli_upload, \
get_user_metadata_uploads, get_user_document_uploads, document_upload, \
get_daemon_doc_files, get_daemon_meta_files, cli_allocate_record
import re
import calendar
try:
import invenio.template
batchuploader_templates = invenio.template.load('batchuploader')
except:
pass
def check_date(date):
""" Check if date is correct
@return:
0 - Default or correct date
3 - Incorrect format
4 - Date does not exist
"""
if not date or date == "yyyy-mm-dd":
return 0
correct_format = re.match("2[01]\d\d-[01]?\d-[0-3]?\d", date)
if not correct_format:
return 3
#separate year, month, day
date = correct_format.group(0).split("-")
try:
calendar.weekday(int(date[0]), int(date[1]), int(date[2]))
except ValueError:
return 4
return 0
def check_time(time):
""" Check if time is correct
@return:
0 - Default or correct time
1 - Incorrect format
"""
if not time or time == "hh:mm:ss":
return 0
correct_format = re.match("[0-2]\d:[0-5]\d:[0-5]\d", time)
if not correct_format:
return 1
return 0
def check_file(name):
""" Simple check to avoid blank filename and bad extensions
@return:
0 - Correct file name
1 - File name not correct
"""
if not name.endswith('.xml'):
return 1
return 0
def user_authorization(req, ln):
""" Check user authorization to visit page """
_ = gettext_set_language(ln)
user_info = collect_user_info(req)
if user_info['email'] == 'guest':
auth_code, auth_message = acc_authorize_action(req, 'runbatchuploader')
referer = '/batchuploader/'
error_msg = _("Guests are not authorized to run batchuploader")
return page_not_authorized(req=req, referer=referer,
text=error_msg, navmenuid="batchuploader")
else:
auth_code, auth_message = acc_authorize_action(req, 'runbatchuploader')
if auth_code != 0:
referer = '/batchuploader/'
error_msg = _("The user '%s' is not authorized to run batchuploader" % (user_info['nickname']))
return page_not_authorized(req=req, referer=referer,
text=error_msg, navmenuid="batchuploader")
class WebInterfaceBatchUploaderPages(WebInterfaceDirectory):
"""Defines the set of /batchuploader pages."""
_exports = ['', 'metadata', 'robotupload', 'metasubmit', 'history', 'documents', 'docsubmit', 'daemon', 'allocaterecord']
def index(self, req, form):
""" The function called by default
"""
redirect_to_url(req, "%s/batchuploader/metadata" % (CFG_SITE_URL))
def metadata(self, req, form):
""" Display Metadata file upload form """
argd = wash_urlargd(form, {'error': (int, 0),
'mode': (str, ""),
'submit_date': (str, "yyyy-mm-dd"),
'submit_time': (str, "hh:mm:ss")})
_ = gettext_set_language(argd['ln'])
not_authorized = user_authorization(req, argd['ln'])
if not_authorized:
return not_authorized
uid = getUid(req)
body = batchuploader_templates.tmpl_display_menu(argd['ln'], ref="metadata")
body += batchuploader_templates.tmpl_display_web_metaupload_form(argd['ln'],
argd['error'], argd['mode'], argd['submit_date'],
argd['submit_time'])
title = _("Metadata batch upload")
return page(title = title,
body = body,
metaheaderadd = batchuploader_templates.tmpl_styles(),
uid = uid,
lastupdated = __lastupdated__,
req = req,
language = argd['ln'],
navmenuid = "batchuploader")
def documents(self, req, form):
""" Display document upload form """
argd = wash_urlargd(form, {
})
_ = gettext_set_language(argd['ln'])
not_authorized = user_authorization(req, argd['ln'])
if not_authorized:
return not_authorized
uid = getUid(req)
body = batchuploader_templates.tmpl_display_menu(argd['ln'], ref="documents")
body += batchuploader_templates.tmpl_display_web_docupload_form(argd['ln'])
title = _("Document batch upload")
return page(title = title,
body = body,
metaheaderadd = batchuploader_templates.tmpl_styles(),
uid = uid,
lastupdated = __lastupdated__,
req = req,
language = argd['ln'],
navmenuid = "batchuploader")
def docsubmit(self, req, form):
""" Function called after submitting the document upload form.
Performs the appropiate action depending on the input parameters
"""
argd = wash_urlargd(form, {'docfolder': (str, ""),
'matching': (str, ""),
'mode': (str, ""),
'submit_date': (str, ""),
'submit_time': (str, "")})
_ = gettext_set_language(argd['ln'])
not_authorized = user_authorization(req, argd['ln'])
if not_authorized:
return not_authorized
#Check if input fields are correct, if not, redirect to upload form
correct_date = check_date(argd['submit_date'])
correct_time = check_time(argd['submit_time'])
if correct_time != 0:
redirect_to_url(req,
"%s/batchuploader/documents?error=1&mode=%s&docfolder=%s&matching=%s&submit_date=%s"
% (CFG_SITE_URL, argd['mode'], argd['docfolder'], argd['matching'], argd['submit_date']))
if correct_date != 0:
redirect_to_url(req,
"%s/batchuploader/documents?error=%s&mode=%s&docfolder=%s&matching=%s&submit_time=%s"
% (CFG_SITE_URL, correct_date, argd['mode'], argd['docfolder'], argd['matching'], argd['submit_time']))
date = argd['submit_date'] not in ['yyyy-mm-dd', ''] \
and argd['submit_date'] or ''
time = argd['submit_time'] not in ['hh:mm:ss', ''] \
and argd['submit_time'] or ''
if date != '' and time == '':
redirect_to_url(req, "%s/batchuploader/documents?error=1&mode=%s&docfolder=%s&matching=%s&submit_date=%s"
% (CFG_SITE_URL, argd['mode'], argd['docfolder'], argd['matching'], argd['submit_date']))
elif date == '' and time != '':
redirect_to_url(req, "%s/batchuploader/documents?error=4&mode=%s&docfolder=%s&matching=%s&submit_time=%s"
% (CFG_SITE_URL, argd['mode'], argd['docfolder'], argd['matching'], argd['submit_time']))
errors, info = document_upload(req, argd['docfolder'], argd['matching'], argd['mode'], date, time, argd['ln'])
body = batchuploader_templates.tmpl_display_menu(argd['ln'])
uid = getUid(req)
navtrail = '''<a class="navtrail" href="%s/batchuploader/documents">%s</a>''' % \
(CFG_SITE_URL, _("Document batch upload"))
body += batchuploader_templates.tmpl_display_web_docupload_result(argd['ln'], errors, info)
title = _("Document batch upload result")
return page(title = title,
body = body,
metaheaderadd = batchuploader_templates.tmpl_styles(),
uid = uid,
navtrail = navtrail,
lastupdated = __lastupdated__,
req = req,
language = argd['ln'],
navmenuid = "batchuploader")
def robotupload(self, req, form):
"""Interface for robots used like this:
$ curl -F 'file=@localfile.xml' -F 'mode=-i' http://cdsweb.cern.ch/batchuploader/robotupload -A invenio_webupload
"""
argd = wash_urlargd(form, {'file': (Field, None),
'mode': (str,None)})
cli_upload(req, argd['file'], argd['mode'])
def allocaterecord(self, req, form):
"""
Interface for robots to allocate a record and obtain a record identifier
"""
return cli_allocate_record(req)
def metasubmit(self, req, form):
""" Function called after submitting the metadata upload form.
Checks if input fields are correct before uploading.
"""
argd = wash_urlargd(form, {'metafile': (Field, None),
'mode': (str,None),
'submit_date': (str, None),
'submit_time': (str, None),
'filename': (str, None)})
_ = gettext_set_language(argd['ln'])
not_authorized = user_authorization(req, argd['ln'])
if not_authorized:
return not_authorized
#Check if input fields are correct, if not, redirect to upload form
correct_date = check_date(argd['submit_date'])
correct_time = check_time(argd['submit_time'])
correct_file = check_file(argd['filename'])
if correct_time != 0:
redirect_to_url(req,
"%s/batchuploader/metadata?error=1&mode=%s&submit_date=%s"
% (CFG_SITE_URL, argd['mode'], argd['submit_date']))
if correct_file != 0:
redirect_to_url(req,
"%s/batchuploader/metadata?error=2&mode=%s&submit_date=%s&submit_time=%s"
% (CFG_SITE_URL, argd['mode'], argd['submit_date'],
argd['submit_time']))
if correct_date != 0:
redirect_to_url(req,
"%s/batchuploader/metadata?error=%s&mode=%s&submit_time=%s"
% (CFG_SITE_URL, correct_date, argd['mode'], argd['submit_time']))
date = argd['submit_date'] not in ['yyyy-mm-dd', ''] \
and argd['submit_date'] or ''
time = argd['submit_time'] not in ['hh:mm:ss', ''] \
and argd['submit_time'] or ''
if date != '' and time == '':
redirect_to_url(req, "%s/batchuploader/metadata?error=1&mode=%s&submit_date=%s"
% (CFG_SITE_URL, argd['mode'], argd['submit_date']))
elif date == '' and time != '':
redirect_to_url(req, "%s/batchuploader/metadata?error=4&mode=%s&submit_time=%s"
% (CFG_SITE_URL, argd['mode'], argd['submit_time']))
#Function where bibupload queues the file
auth_code, auth_message = metadata_upload(req,
argd['metafile'], argd['mode'].split()[0],
date, time, argd['filename'], argd['ln'])
if auth_code != 0:
referer = '/batchuploader/'
return page_not_authorized(req=req, referer=referer,
text=auth_message, navmenuid="batchuploader")
else:
uid = getUid(req)
body = batchuploader_templates.tmpl_display_menu(argd['ln'])
body += batchuploader_templates.tmpl_upload_succesful(argd['ln'])
title = _("Upload succesful")
navtrail = '''<a class="navtrail" href="%s/batchuploader/metadata">%s</a>''' % \
(CFG_SITE_URL, _("Metadata batch upload"))
return page(title = title,
body = body,
uid = uid,
navtrail = navtrail,
lastupdated = __lastupdated__,
req = req,
language = argd['ln'],
navmenuid = "batchuploader")
def history(self, req, form):
"""Display upload history of the current user"""
argd = wash_urlargd(form, {})
_ = gettext_set_language(argd['ln'])
not_authorized = user_authorization(req, argd['ln'])
if not_authorized:
return not_authorized
uploaded_meta_files = get_user_metadata_uploads(req)
uploaded_doc_files = get_user_document_uploads(req)
uid = getUid(req)
body = batchuploader_templates.tmpl_display_menu(argd['ln'], ref="history")
body += batchuploader_templates.tmpl_upload_history(argd['ln'], uploaded_meta_files, uploaded_doc_files)
title = _("Upload history")
return page(title = title,
body = body,
metaheaderadd = batchuploader_templates.tmpl_styles(),
uid = uid,
lastupdated = __lastupdated__,
req = req,
language = argd['ln'],
navmenuid = "batchuploader")
def daemon(self, req, form):
""" Display content of folders where the daemon will look into """
argd = wash_urlargd(form, {})
_ = gettext_set_language(argd['ln'])
not_authorized = user_authorization(req, argd['ln'])
if not_authorized:
return not_authorized
docs = get_daemon_doc_files()
metadata = get_daemon_meta_files()
uid = getUid(req)
body = batchuploader_templates.tmpl_display_menu(argd['ln'], ref="daemon")
body += batchuploader_templates.tmpl_daemon_content(argd['ln'], docs, metadata)
title = _("Batch Uploader: Daemon monitor")
return page(title = title,
body = body,
metaheaderadd = batchuploader_templates.tmpl_styles(),
uid = uid,
lastupdated = __lastupdated__,
req = req,
language = argd['ln'],
navmenuid = "batchuploader")
def __call__(self, req, form):
"""Redirect calls without final slash."""
redirect_to_url(req, '%s/batchuploader/metadata' % CFG_SITE_URL)
|
pombredanne/invenio
|
modules/bibupload/lib/batchuploader_webinterface.py
|
Python
|
gpl-2.0
| 15,550
|
[
"VisIt"
] |
3e8822f126b2c3b60523c6cde949c0211b0d44a3c1eb7e2a876c675b35345a79
|
"""
Specializers for various sorts of data layouts and memory alignments.
These specializers operate on a copy of the simplified array expression
representation (i.e., one with an NDIterate node). This node is replaced
with one or several ForNode nodes in a specialized order.
For auto-tuning code for tile size and OpenMP size, see
https://github.com/markflorisson88/cython/blob/_array_expressions/Cython/Utility/Vector.pyx
"""
import sys
import copy
try:
from functools import wraps
except ImportError:
def wraps(wrapped):
def decorator(wrapper):
return wrapper
return decorator
import minivisitor
import miniutils
import minitypes
import minierror
import codegen
strength_reduction = True
def debug(*args):
sys.stderr.write(" ".join(str(arg) for arg in args) + '\n')
def specialize_ast(ast):
return copy.deepcopy(ast)
class ASTMapper(minivisitor.VisitorTransform):
"""
Base class to map foreign ASTs onto a minivect AST, or vice-versa.
This sets the current node's position in the astbuilder for each
node that is being visited, to make it easy to build new AST nodes
without passing in source position information everywhere.
"""
def __init__(self, context):
super(ASTMapper, self).__init__(context)
self.astbuilder = context.astbuilder
def getpos(self, opaque_node):
return self.context.getpos(opaque_node)
def map_type(self, opaque_node, **kwds):
"Return a mapped type for the foreign node."
return self.context.typemapper.map_type(
self.context.gettype(opaque_node), **kwds)
def visit(self, node, *args):
prev = self.astbuilder.pos
self.astbuilder.pos = node.pos
result = super(ASTMapper, self).visit(node)
self.astbuilder.pos = prev
return result
class BaseSpecializer(ASTMapper):
"""
Base class for specialization. Does not perform any specialization itself.
"""
def getpos(self, node):
return node.pos
def get_type(self, type):
"Resolve the type to the dtype of the array if an array type"
if type.is_array:
return type.dtype
return type
def visit(self, node, *args):
result = super(BaseSpecializer, self).visit(node)
if result is not None:
result.is_specialized = True
return result
def visit_Node(self, node):
# node = copy.copy(node)
self.visitchildren(node)
return node
def init_pending_stats(self, node):
"""
Allow modifications while visiting some descendant of this node
This happens especially while variables are resolved, which
calls compute_inner_dim_pointer()
"""
b = self.astbuilder
if not node.is_function:
node.prepending = b.stats()
node.appending = b.stats()
def handle_pending_stats(self, node):
"""
Handle any pending statements that need to be inserted further
up in the AST.
"""
b = self.astbuilder
# self.visitchildren(node.prepending)
# self.visitchildren(node.appending)
if node.is_function:
# prepending is a StatListNode already part of the function body
# assert node.prepending in list(self.treepath(node, '//StatListNode'))
node.body = b.stats(node.body, node.appending)
else:
node.body = b.stats(node.prepending, node.body, node.appending)
if not self.context.use_llvm:
node.body = self.fuse_omp_stats(node.body)
def get_loop(self, loop_level):
if loop_level:
return self.function.for_loops[self.loop_level - 1]
return self.function
def fuse_omp_stats(self, node):
"""
Fuse consecutive OpenMPConditionalNodes.
"""
import miniast
if not node.stats:
return node
b = self.astbuilder
stats = [node.stats[0]]
for next_stat in node.stats[1:]:
stat = stats[-1]
c1 = isinstance(stat, miniast.OpenMPConditionalNode)
c2 = isinstance(next_stat, miniast.OpenMPConditionalNode)
if c1 and c2:
if_body = None
else_body = None
if stat.if_body or next_stat.if_body:
if_body = b.stats(stat.if_body, next_stat.if_body)
if stat.else_body or next_stat.else_body:
else_body = b.stats(stat.else_body, next_stat.else_body)
stats[-1] = b.omp_if(if_body, else_body)
else:
stats.append(next_stat)
node.stats[:] = stats
return node
#
### Stubs for cooperative multiple inheritance
#
def visit_NDIterate(self, node):
# Do not visit children
return node
visit_AssignmentExpr = visit_Node
visit_ErrorHandler = visit_Node
visit_BinopNode = visit_Node
visit_UnopNode = visit_Node
visit_IfNode = visit_Node
class Specializer(BaseSpecializer):
"""
Base class for most specializers, provides some basic functionality
for subclasses. Implement visit_* methods to specialize nodes
to some pattern.
Implements implementations to handle errors and cleanups, adds a return
statement to the function and can insert debug print statements if
context.debug is set to a true value.
"""
is_contig_specializer = False
is_tiled_specializer = False
is_vectorizing_specializer = False
is_inner_contig_specializer = False
is_strided_specializer = False
vectorized_equivalents = None
def __init__(self, context, specialization_name=None):
super(Specializer, self).__init__(context)
if specialization_name is not None:
self.specialization_name = specialization_name
self.variables = {}
def _index_list(self, pointer, ndim):
"Return a list of indexed pointers"
return [self.astbuilder.index(pointer, self.astbuilder.constant(i))
for i in range(ndim)]
def _debug_function_call(self, b, node):
"""
Generate debug print statements when the specialized function is
called.
"""
stats = [
b.print_(b.constant(
"Calling function %s (%s specializer)" % (
node.mangled_name, self.specialization_name)))
]
if self.is_vectorizing_specializer:
stats.append(
b.print_(b.constant("Vectorized version size=%d" %
self.vector_size)))
stats.append(
b.print_(b.constant("shape:"), *self._index_list(node.shape,
node.ndim)))
if self.is_tiled_specializer:
stats.append(b.print_(b.constant("blocksize:"), self.get_blocksize()))
if not self.is_contig_specializer:
for idx, arg in enumerate(node.arguments):
if arg.is_array_funcarg:
stats.append(b.print_(b.constant("strides operand%d:" % idx),
*self._index_list(arg.strides_pointer,
arg.type.ndim)))
stats.append(b.print_(b.constant("data pointer %d:" % idx),
arg.data_pointer))
node.prepending.stats.append(b.stats(*stats))
def visit_FunctionNode(self, node):
"""
Handle a FunctionNode. Sets node.total_shape to the product of the
shape, wraps the function's body in a
:py:class:`minivect.miniast.ErrorHandler` if needed and adds a
return statement.
"""
b = self.astbuilder
self.compute_total_shape(node)
node.mangled_name = self.context.mangle_function_name(node.name)
# set this so bad people can specialize during code generation time
node.specializer = self
node.specialization_name = self.specialization_name
self.function = node
if self.context.debug:
self._debug_function_call(b, node)
if node.body.may_error(self.context):
node.body = b.error_handler(node.body)
node.body = b.stats(node.body, b.return_(node.success_value))
self.visitchildren(node)
# if not self.is_contig_specializer:
# self.compute_temp_strides(b, node)
return node
def visit_ForNode(self, node):
if node.body.may_error(self.context):
node.body = self.astbuilder.error_handler(node.body)
self.visitchildren(node)
return node
def visit_Variable(self, node):
if node.name not in self.variables:
self.variables[node.name] = node
return self.visit_Node(node)
def get_data_pointer(self, variable, loop_level):
return self.function.args[variable.name].data_pointer
def omp_for(self, node):
"""
Insert an OpenMP for loop with an 'if' clause that checks to see
whether the total data size exceeds the given OpenMP auto-tuned size.
The caller needs to adjust the size, set in the FunctionNode's
'omp_size' attribute, depending on the number of computations.
"""
if_clause = self.astbuilder.binop(minitypes.bool_, '>',
self.function.total_shape,
self.function.omp_size)
return self.astbuilder.omp_for(node, if_clause)
class FinalSpecializer(BaseSpecializer):
"""
Perform any final specialization and optimizations. The initial specializer
is concerned with specializing for the given data layouts, whereas this
specializer is concerned with any rewriting of the AST to support
fundamental operations.
"""
vectorized_equivalents = None
in_lhs_expr = False
should_vectorize = False
def __init__(self, context, previous_specializer):
super(FinalSpecializer, self).__init__(context)
self.previous_specializer = previous_specializer
self.sp = previous_specializer
self.error_handlers = []
self.loop_level = 0
self.variables = {}
self.strides = {}
self.outer_pointers = {}
self.vector_temps = {}
def run_optimizations(self, node):
"""
Run any optimizations on the AST. Currently only loop-invariant code
motion is implemented when broadcasting information is present.
"""
import optimize
# TODO: support vectorized specializations
if (self.context.optimize_broadcasting and not
self.sp.is_contig_specializer or
self.sp.is_vectorizing_specializer):
optimizer = optimize.HoistBroadcastingExpressions(self.context)
node = optimizer.visit(node)
return node
def visit_Variable(self, node):
"""
Process variables, which includes arrays and scalars. For arrays,
this means retrieving the element from the array. Performs strength
reduction for index calculation of array variables.
"""
if node.type.is_array:
tiled = self.sp.is_tiled_specializer
last_loop_level = (self.loop_level == self.function.ndim or
(self.sp.is_vectorizing_specializer and not
self.should_vectorize))
inner_contig = (
self.sp.is_inner_contig_specializer and
(last_loop_level or node.hoisted) and
(not self.sp.is_strided_specializer or
self.sp.matching_contiguity(node.type)))
contig = self.sp.is_contig_specializer
# Get the array data pointer
arg_data_pointer = self.function.args[node.name].data_pointer
if self.sp.is_contig_specializer:
# Contiguous, no strength reduction needed
data_pointer = arg_data_pointer
else:
# Compute strength reduction pointers for all dimensions leading
# up the the dimension this variable occurs in.
self.compute_temp_strides(node, inner_contig, tiled=tiled)
data_pointer = self.compute_data_pointer(
node, arg_data_pointer, inner_contig, tiled)
# Get the loop level corresponding to the occurrence of the variable
for_node = self.function.for_loops[self.loop_level - 1]
if self.should_vectorize:
return self.handle_vector_variable(node, data_pointer, for_node,
inner_contig, contig)
else:
element = self.element_location(data_pointer, for_node,
inner_contig, contig,
tiled=tiled, variable=node)
return self.astbuilder.resolved_variable(
node.name, node.type, element)
else:
return node
def visit_VectorVariable(self, vector_variable):
# use visit_Variable, since is does the strength reduction and such
return self.visit_Variable(vector_variable.variable)
def element_location(self, data_pointer, for_node,
inner_contig, is_contig, tiled, variable):
"Return the element in the array for the current index set"
b = self.astbuilder
def debug(item):
if self.context.debug_elements:
string = b.constant("Referenced element from %s:" %
variable.name)
print_ = self.visit(b.print_(string, item))
for_node = self.function.for_loops[self.loop_level - 1]
for_node.prepending.stats.append(print_)
if not is_contig:
stats = []
for i, stride in enumerate(self.strides[variable]):
if stride is not None:
string = b.constant("%s step[%d]:" % (variable.name, i))
stats.append(b.print_(string, stride))
print_steps = b.stats(*stats)
self.function.prepending.stats.append(self.visit(print_steps))
return item
if inner_contig or is_contig:
# contiguous access, index the data pointer in the inner dimension
return debug(b.index(data_pointer, for_node.index))
else:
# strided access, this dimension is performing strength reduction,
# so we just need to dereference the data pointer
return debug(b.dereference(data_pointer))
def handle_vector_variable(self, variable, data_pointer, for_node,
inner_contig, is_contig):
"Same as `element_location`, except for Vector variables"
b = self.astbuilder
# For array operands, load reads into registers, and store
# writes back into the data pointer. For assignment to a register
# we use a vector type, for assignment to a data pointer, the
# data pointer type
if inner_contig or is_contig:
data_pointer = b.add(data_pointer, for_node.index)
if self.in_lhs_expr:
return data_pointer
else:
variable = b.vector_variable(variable, self.sp.vector_size)
if variable in self.vector_temps:
return self.vector_temps[variable]
rhs = b.vector_load(data_pointer, self.sp.vector_size)
temp = b.temp(variable.type, 'xmm')
self.vector_temps[variable] = temp
for_node.prepending.stats.append(b.assign(temp, rhs))
return self.visit(temp)
def compute_temp_strides(self, variable, handle_inner_dim, tiled=False):
"""
Compute the temporary strides needed for the strength reduction. These
should be small constants, so division should be fast. We could use
char * instead of element_type *, but it's nicer to avoid the casts.
"""
b = self.astbuilder
if variable in self.strides:
return self.strides[variable]
start = 0
stop = variable.type.ndim
if handle_inner_dim:
if self.sp.order == "F":
start = 1
else:
stop = stop - 1
self.strides[variable] = strides = [None] * len(self.function.for_loops)
for dim in range(start, stop):
stride = b.stride(variable, dim)
temp_stride = b.temp(stride.type.unqualify("const"),
name="%s_stride%d" % (variable.name, dim))
stat = b.assign(temp_stride,
b.div(stride, b.sizeof(variable.type.dtype)))
self.function.prepending.stats.append(stat)
strides[dim] = temp_stride
return strides
def compute_data_pointer(self, variable, argument_data_pointer,
handle_inner_dim, tiled):
"""
Compute the data pointer for the dimension the variable is located in
(the loop level). This involves generating a strength reduction in
each outer dimension.
Variables referring to the same array may be found on different
loop levels.
"""
b = self.astbuilder
assert variable.type.is_array
pointer_type = argument_data_pointer.type.unqualify("const")
loop_level = self.loop_level
offset = self.function.ndim - variable.type.ndim
stop = loop_level - handle_inner_dim
if self.outer_pointers.get(variable):
start = len(self.outer_pointers[variable])
if stop <= start:
return self.outer_pointers[variable][stop - 1]
else:
self.outer_pointers[variable] = []
start = offset
outer_pointers = self.outer_pointers[variable]
temp = argument_data_pointer
for_loops = self.function.for_loops[start:stop]
# Loop over all outer loop levels
for i, for_node in zip(range(start, stop), for_loops):
if for_node.dim < offset:
continue
# Allocate a temp_data_pointer on each outer loop level
temp = b.temp(pointer_type)
dim = for_node.dim - offset
if not outer_pointers: #i == offset:
outer_node = self.function
outer_pointer = self.function.args[variable.name].data_pointer
else:
outer_node = self.function.for_loops[i - 1]
outer_pointer = outer_pointers[-1]
# Generate: temp_data_pointer = outer_data_pointer
assmt = b.assign(temp, outer_pointer)
outer_node.prepending.stats.append(assmt)
stride = original_stride = self.strides[variable][dim]
assert stride is not None, ('strides', self.strides[variable],
'dim', dim, 'start', start,
'stop', stop, 'offset', offset,
'specializer', self.sp)
if for_node.is_controlling_loop:
# controlling loop for tiled specializations, multiply by the
# tiling blocksize for this dimension
stride = b.mul(stride, for_node.blocksize)
# Generate: temp_data_pointer += stride
stat = b.assign(temp, b.add(temp, stride))
if not outer_pointers:
# Outermost loop level, generate some additional OpenMP
# parallel-loop-compatible code
# Generate: temp_data_pointer = data_pointer + i * stride0
omp_body = b.assign(temp, b.add(outer_pointer,
b.mul(original_stride, for_node.index)))
for_node.prepending.stats.append(b.omp_if(omp_body))
for_node.appending.stats.append(b.omp_if(None, stat))
omp_for = self.treepath_first(self.function, '//OpenMPLoopNode')
if omp_for is not None:
omp_for.privates.append(temp)
else:
for_node.appending.stats.append(stat)
self.outer_pointers[variable].append(temp)
return temp
def visit_FunctionNode(self, node):
self.function = node
self.indices = self.sp.indices
node = self.run_optimizations(node)
self.init_pending_stats(node)
self.visitchildren(node)
self.handle_pending_stats(node)
return node
def _visit_set_vectorizing_flag(self, node):
was_vectorizing = self.should_vectorize
self.should_vectorize = node.should_vectorize
self.visitchildren(node)
self.should_vectorize = was_vectorizing
return node
def visit_ForNode(self, node):
is_nd_fornode = node in self.function.for_loops or node.is_fixup
self.loop_level += is_nd_fornode
self.init_pending_stats(node)
self._visit_set_vectorizing_flag(node)
self.handle_pending_stats(node)
self.loop_level -= is_nd_fornode
return node
def visit_IfNode(self, node):
self.loop_level += node.is_fixup
result = self._visit_set_vectorizing_flag(node)
self.loop_level -= node.is_fixup
return result
def visit_AssignmentExpr(self, node):
# assignment expressions should not be nested
self.in_lhs_expr = True
node.lhs = self.visit(node.lhs)
self.in_lhs_expr = False
node.rhs = self.visit(node.rhs)
if node.lhs.type.is_pointer and node.rhs.type.is_vector:
# This expression must be a statement
return self.astbuilder.vector_store(node.lhs, node.rhs)
return node
def visit_TempNode(self, node):
self.visitchildren(node)
return node
def visit_BinopNode(self, node):
type = self.get_type(node.type)
if node.operator == '%' and type.is_float and not self.context.use_llvm:
# rewrite modulo for floats to fmod()
b = self.astbuilder
functype = minitypes.FunctionType(return_type=type,
args=[type, type])
if type.itemsize == 4:
modifier = "f"
elif type.itemsize == 8:
modifier = ""
else:
modifier = "l"
fmod = b.variable(functype, "fmod%s" % modifier)
return self.visit(b.funccall(fmod, [node.lhs, node.rhs]))
self.visitchildren(node)
return node
def visit_UnopNode(self, node):
if node.type.is_vector and node.operator == '-':
# rewrite unary subtract
type = node.operand.type
if type.is_float:
constant = 0.0
else:
constant = 0
lhs = self.astbuilder.vector_const(type, constant)
node = self.astbuilder.binop(type, '-', lhs, node.operand)
return self.visit(node)
self.visitchildren(node)
return node
def visit_DereferenceNode(self, node):
node.operand = self.visit(node.operand)
if self.context.llvm:
node = self.astbuilder.index(node, self.astbuilder.constant(0))
return node
def visit_IfElseExprNode(self, node):
self.visitchildren(node)
if self.context.use_llvm:
# Rewrite 'cond ? x : y' expressions to if/else statements
b = self.astbuilder
temp = b.temp(node.lhs.type, name='if_temp')
stat = b.if_else(node.cond, b.assign(temp, node.lhs),
b.assign(temp, node.rhs))
for_node = self.get_loop(self.loop_level)
for_node.prepending.stats.append(stat)
node = temp
return node
def visit_PrintNode(self, node):
b = self.astbuilder
printf_type = minitypes.FunctionType(
return_type=minitypes.int_,
args=[minitypes.CStringType()],
is_vararg=True)
printf = b.funcname(printf_type, 'printf')
args = []
specifiers = []
for i, arg in enumerate(node.args):
specifier, arg = codegen.format_specifier(arg, b)
args.append(arg)
specifiers.append(specifier)
args.insert(0, b.constant(" ".join(specifiers) + "\n"))
return b.expr_stat(b.funccall(printf, args))
def visit_PositionInfoNode(self, node):
"""
Replace with the setting of positional source information in case
of an error.
"""
b = self.astbuidler
posinfo = self.function.posinfo
if posinfo:
pos = node.posinfo
return b.stats(
b.assign(b.deref(posinfo.filename), b.constant(pos.filename)),
b.assign(b.deref(posinfo.lineno), b.constant(pos.lineno)),
b.assign(b.deref(posinfo.column), b.constant(pos.column)))
def visit_RaiseNode(self, node):
"""
Generate a call to PyErr_Format() to set an exception.
"""
from minitypes import FunctionType, object_
b = self.astbuilder
args = [object_] * (2 + len(node.fmt_args))
functype = FunctionType(return_type=object_, args=args)
return b.expr_stat(
b.funccall(b.funcname(functype, "PyErr_Format"),
[node.exc_var, node.msg_val] + node.fmt_args))
def visit_ErrorHandler(self, node):
"""
See miniast.ErrorHandler for an explanation of what this needs to do.
"""
b = self.astbuilder
node.error_variable = b.temp(minitypes.bool_)
node.error_var_init = b.assign(node.error_variable, 0)
node.cleanup_jump = b.jump(node.cleanup_label)
node.error_target_label = b.jump_target(node.error_label)
node.cleanup_target_label = b.jump_target(node.cleanup_label)
node.error_set = b.assign(node.error_variable, 1)
if self.error_handlers:
cascade_code = b.jump(self.error_handlers[-1].error_label)
else:
cascade_code = b.return_(self.function.error_value)
node.cascade = b.if_(node.error_variable, cascade_code)
self.error_handlers.append(node)
self.visitchildren(node)
self.error_handlers.pop()
return node
def visit_PragmaForLoopNode(self, node):
if self.previous_specializer.is_vectorizing_specializer:
return self.visit(node.for_node)
else:
self.visitchildren(node)
return node
def visit_StatListNode(self, node):
self.visitchildren(node)
return self.fuse_omp_stats(node)
class OrderedSpecializer(Specializer):
"""
Specializer that understands C and Fortran data layout orders.
"""
vectorized_equivalents = None
def compute_total_shape(self, node):
"""
Compute the product of the shape (entire length of array output).
Sets the total shape as attribute of the function (total_shape).
"""
b = self.astbuilder
# compute the product of the shape and insert it into the function body
extents = [b.index(node.shape, b.constant(i))
for i in range(node.ndim)]
node.total_shape = b.temp(node.shape.type.base_type)
init_shape = b.assign(node.total_shape, reduce(b.mul, extents),
may_reorder=True)
node.body = b.stats(init_shape, node.body)
return node.total_shape
def loop_order(self, order, ndim=None):
"""
Returns arguments to (x)range() to process something in C or Fortran
order.
"""
if ndim is None:
ndim = self.function.ndim
if order == "C":
return self.c_loop_order(ndim)
else:
return self.f_loop_order(ndim)
def c_loop_order(self, ndim):
return ndim - 1, -1, -1
def f_loop_order(self, ndim):
return 0, ndim, 1
def order_indices(self, indices):
"""
Put the indices of the for loops in the right iteration order. The
loops were build backwards (Fortran order), so for C we need to
reverse them.
Note: the indices are always ordered on the dimension they index
"""
if self.order == "C":
indices.reverse()
def ordered_loop(self, node, result_indices, lower=None, upper=None,
step=None, loop_order=None):
"""
Return a ForNode ordered in C or Fortran order.
"""
b = self.astbuilder
if lower is None:
lower = lambda i: None
if upper is None:
upper = lambda i: b.shape_index(i, self.function)
if loop_order is None:
loop_order = self.loop_order(self.order)
indices = []
for_loops = []
for i in range(*loop_order):
node = b.for_range_upwards(node, lower=lower(i), upper=upper(i),
step=step)
node.dim = i
for_loops.append(node)
indices.append(node.target)
self.order_indices(indices)
result_indices.extend(indices)
return for_loops[::-1], node
def _index_pointer(self, pointer, indices, strides):
"""
Return an element for an N-dimensional index into a strided array.
"""
b = self.astbuilder
return b.index_multiple(
b.cast(pointer, minitypes.char.pointer()),
[b.mul(index, stride) for index, stride in zip(indices, strides)],
dest_pointer_type=pointer.type)
def _strided_element_location(self, node, indices=None, strides_index_offset=0,
ndim=None, pointer=None):
"""
Like _index_pointer, but given only an array operand indices. It first
needs to get the data pointer and stride nodes.
"""
indices = indices or self.indices
b = self.astbuilder
if ndim is None:
ndim = node.type.ndim
if pointer is None:
pointer = b.data_pointer(node)
indices = [index for index in indices[len(indices) - ndim:]]
strides = [b.stride(node, i + strides_index_offset)
for i, idx in enumerate(indices)]
node = self._index_pointer(pointer, indices, strides)
self.visitchildren(node)
return node
def get_any_array_argument(arguments):
for arg in arguments:
if arg.type is not None and arg.type.is_array:
return arg
class CanVectorizeVisitor(minivisitor.TreeVisitor):
"""
Determines whether we can vectorize a given expression. Currently only
support arithmetic on floats and doubles.
"""
can_vectorize = True
def _valid_type(self, type):
if type.is_array:
type = type.dtype
return type.is_float and type.itemsize in (4, 8)
def visit_FunctionNode(self, node):
array_dtypes = [
arg.type.dtype for arg in node.arguments[1:]
if arg.type is not None and arg.type.is_array]
all_the_same = miniutils.all(
dtype == array_dtypes[0] for dtype in array_dtypes)
self.can_vectorize = all_the_same and self._valid_type(array_dtypes[0])
if self.can_vectorize:
self.visitchildren(node)
def visit_BinopNode(self, node):
if node.lhs.type != node.rhs.type or not self._valid_type(node.lhs.type):
self.can_vectorize = False
else:
self.visitchildren(node)
def visit_UnopNode(self, node):
if self._valid_type(node.type):
self.visitchildren(node)
else:
self.can_vectorize = False
def visit_FuncCallNode(self, node):
self.can_vectorize = False
def visit_NodeWrapper(self, node):
# TODO: dispatch to self.context.can_vectorize
self.can_vectorize = False
def visit_Node(self, node):
self.visitchildren(node)
def visit_if_should_vectorize(func):
"""
Visits the given method if we are vectorizing, otherwise visit the
superclass' method of :py:class:`VectorizingSpecialization`
"""
@wraps(func)
def wrapper(self, node):
if self.should_vectorize:
return func(self, node)
else:
method = getattr(super(VectorizingSpecializer, self), func.__name__)
return method(node)
return wrapper
class VectorizingSpecializer(Specializer):
"""
Generate explicitly vectorized code if supported.
:param vector_size: number of 32-bit operands in the vector
"""
is_vectorizing_specializer = True
can_vectorize_visitor = CanVectorizeVisitor
vectorized_equivalents = None
# set in subclasses
vector_size = None
def __init__(self, context, specialization_name=None):
super(VectorizingSpecializer, self).__init__(context,
specialization_name)
# temporary registers
self.temps = {}
# Flag to vectorize expressions in a vectorized loop
self.should_vectorize = True
@classmethod
def can_vectorize(cls, context, ast):
visitor = cls.can_vectorize_visitor(context)
visitor.visit(ast)
# print visitor.can_vectorize, ast.pos
return visitor.can_vectorize
@visit_if_should_vectorize
def visit_FunctionNode(self, node):
self.dtype = get_any_array_argument(node.arguments).type.dtype
return super(VectorizingSpecializer, self).visit_FunctionNode(node)
@visit_if_should_vectorize
def visit_Variable(self, variable):
if variable.type.is_array:
variable = self.astbuilder.vector_variable(variable, self.vector_size)
return variable
@visit_if_should_vectorize
def visit_BinopNode(self, node):
self.visitchildren(node)
if node.lhs.type.is_vector:
# TODO: promotion
node = self.astbuilder.vector_binop(node.operator,
node.lhs, node.rhs)
return node
@visit_if_should_vectorize
def visit_UnopNode(self, node):
self.visitchildren(node)
if node.operand.type.is_vector:
if node.operator == '+':
node = node.operand
else:
assert node.operator == '~'
raise NotImplementedError
node = self.astbuilder.vector_unop(node.type, node.operator,
self.visit(node.operand))
return node
@visit_if_should_vectorize
def visit_ForNode(self, node):
node.should_vectorize = True
self.visitchildren(node)
return node
@visit_if_should_vectorize
def visit_IfNode(self, node):
node.should_vectorize = True
self.visitchildren(node)
return node
def _modify_inner_loop(self, b, elements_per_vector, node, step):
"""
Turn 'for (i = 0; i < N; i++)' into 'for (i = 0; i < N - 3; i += 4)'
for a vector size of 4. In case the data size is not a multiple of
4, we can only SIMDize that part, and need a fixup loop for any
remaining elements. Returns the upper limit and the counter (N and i).
"""
i = node.step.lhs
N = node.condition.rhs
# Adjust step
step = b.mul(step, b.constant(elements_per_vector))
node.step = b.assign_expr(i, b.add(i, step))
# Adjust condition
vsize_minus_one = b.constant(elements_per_vector - 1)
node.condition.rhs = b.sub(N, vsize_minus_one)
return N, i
def fixup_loop(self, i, N, body, elements_per_vector):
"""
Generate a loop to fix up any remaining elements that didn't fit into
our SIMD vectors.
"""
b = self.astbuilder
cond = b.binop(minitypes.bool_, '<', i, N)
if elements_per_vector - 1 == 1:
fixup_loop = b.if_(cond, body)
else:
# fixup_loop = b.for_range_upwards(body, lower=i, upper=N)
init = b.noop_expr()
step = b.assign_expr(i, b.add(i, b.constant(1)))
fixup_loop = b.for_(body, init, cond, step, index=i)
fixup_loop.is_fixup = True
self.should_vectorize = False
fixup_loop = self.visit(fixup_loop)
self.should_vectorize = True
return fixup_loop
def process_inner_forloop(self, node, original_expression, step=None):
"""
Process an inner loop, adjusting the step accordingly and injecting
any temporary assignments where necessary. Returns the fixup loop,
needed when the data size is not a multiple of the vector size.
:param original_expression: original, unmodified, array expression (
the body of the NDIterate node)
"""
b = self.astbuilder
if step is None:
step = b.constant(1)
elements_per_vector = self.vector_size * 4 / self.dtype.itemsize
N, i = self._modify_inner_loop(b, elements_per_vector, node, step)
return self.fixup_loop(i, N, original_expression, elements_per_vector)
class StridedCInnerContigSpecializer(OrderedSpecializer):
"""
Specialize on the first or last dimension being contiguous (depending
on the 'order' attribute).
"""
specialization_name = "inner_contig"
order = "C"
is_inner_contig_specializer = True
vectorized_equivalents = None
def __init__(self, context, specialization_name=None):
super(StridedCInnerContigSpecializer, self).__init__(
context, specialization_name)
self.indices = []
def _generate_inner_loop(self, b, node):
"""
Generate innermost loop, injecting the pointer assignments in the
right place
"""
loop = node
if len(self.indices) > 1:
for index in self.indices[:-2]:
loop = node.body
self.inner_loop = loop.body
loop.body = b.pragma_for(self.inner_loop)
node = self.omp_for(node)
else:
self.inner_loop = loop
node = self.omp_for(b.pragma_for(self.inner_loop))
return loop, node
def _vectorize_inner_loop(self, b, loop, node, original_expr):
"Vectorize the inner loop and insert the fixup loop"
if self.is_vectorizing_specializer:
fixup_loop = self.process_inner_forloop(self.inner_loop,
original_expr)
if len(self.indices) > 1:
loop.body = b.stats(loop.body, fixup_loop)
else:
node = b.stats(node, fixup_loop)
return node
def visit_NDIterate(self, node):
"""
Replace this node with ordered loops and a direct index into a
temporary data pointer in the contiguous dimension.
"""
b = self.astbuilder
assert not list(self.treepath(node, '//NDIterate'))
original_expr = specialize_ast(node.body)
# start by generating a C or Fortran ordered loop
self.function.for_loops, node = self.ordered_loop(node.body,
self.indices)
loop, node = self._generate_inner_loop(b, node)
result = self.visit(node)
node = self._vectorize_inner_loop(b, loop, node, original_expr)
return result
def index(self, loop_level):
if self.order == 'C':
return self.indices[loop_level]
else:
return self.indices[-loop_level]
def strided_indices(self):
"Return the list of strided indices for this order"
return self.indices[:-1]
def contig_index(self):
"The contiguous index"
return self.indices[-1]
def get_data_pointer(self, variable, loop_level):
return self.compute_inner_dim_pointer(variable, loop_level)
class StridedFortranInnerContigSpecializer(StridedCInnerContigSpecializer):
"""
Specialize on the first dimension being contiguous.
"""
order = "F"
specialization_name = "inner_contig_fortran"
vectorized_equivalents = None
def strided_indices(self):
return self.indices[1:]
def contig_index(self):
return self.indices[0]
class StrengthReducingStridedSpecializer(StridedCInnerContigSpecializer):
"""
Specialize on strided operands. If some operands are contiguous in the
dimension compatible with the order we are specializing for (the first
if Fortran, the last if C), then perform a direct index into a temporary
date pointer. For strided operands, perform strength reduction in the
inner dimension by adding the stride to the data pointer in each iteration.
"""
specialization_name = "strided"
order = "C"
is_strided_specializer = True
vectorized_equivalents = None
def matching_contiguity(self, type):
"""
Check whether the array operand for the given type can be directly
indexed.
"""
return ((type.is_c_contig and self.order == "C") or
(type.is_f_contig and self.order == "F"))
def visit_NDIterate(self, node):
b = self.astbuilder
outer_loop = super(StridedSpecializer, self).visit_NDIterate(node)
# outer_loop = self.strength_reduce_inner_dimension(outer_loop,
# self.inner_loop)
return outer_loop
def strength_reduce_inner_dimension(self, outer_loop, inner_loop):
"""
Reduce the strength of strided array operands in the inner dimension,
by adding the stride to the temporary pointer.
"""
b = self.astbuilder
outer_stats = []
stats = []
for arg in self.function.arguments:
type = arg.variable.type
if type is None:
continue
contig = self.matching_contiguity(type)
if arg.variable in self.pointers and not contig:
p = self.pointers[arg.variable]
if self.order == "C":
inner_dim = type.ndim - 1
else:
inner_dim = 0
# Implement: temp_stride = strides[inner_dim] / sizeof(dtype)
stride = b.stride(arg.variable, inner_dim)
temp_stride = b.temp(stride.type.qualify("const"),
name="temp_stride")
outer_stats.append(
b.assign(temp_stride, b.div(stride, b.sizeof(type.dtype))))
# Implement: temp_pointer += temp_stride
stats.append(b.assign(p, b.add(p, temp_stride)))
inner_loop.body = b.stats(inner_loop.body, *stats)
outer_stats.append(outer_loop)
return b.stats(*outer_stats)
class StrengthReducingStridedFortranSpecializer(
StridedFortranInnerContigSpecializer, StrengthReducingStridedSpecializer):
"""
Specialize on Fortran order for strided operands and apply strength
reduction in the inner dimension.
"""
specialization_name = "strided_fortran"
order = "F"
vectorized_equivalents = None
class StridedSpecializer(StridedCInnerContigSpecializer):
"""
Specialize on strided operands. If some operands are contiguous in the
dimension compatible with the order we are specializing for (the first
if Fortran, the last if C), then perform a direct index into a temporary
date pointer.
"""
specialization_name = "strided"
order = "C"
vectorized_equivalents = None
is_strided_specializer = True
def matching_contiguity(self, type):
"""
Check whether the array operand for the given type can be directly
indexed.
"""
return ((type.is_c_contig and self.order == "C") or
(type.is_f_contig and self.order == "F"))
def _element_location(self, variable, loop_level):
"""
Generate a strided or directly indexed load of a single element.
"""
#if variable in self.pointers:
if self.matching_contiguity(variable.type):
return super(StridedSpecializer, self)._element_location(variable,
loop_level)
b = self.astbuilder
pointer = self.get_data_pointer(variable, loop_level)
indices = [self.contig_index()]
if self.order == "C":
inner_dim = variable.type.ndim - 1
else:
inner_dim = 0
strides = [b.stride(variable, inner_dim)]
return self._index_pointer(pointer, indices, strides)
class StridedFortranSpecializer(StridedFortranInnerContigSpecializer,
StridedSpecializer):
"""
Specialize on Fortran order for strided operands.
"""
specialization_name = "strided_fortran"
order = "F"
vectorized_equivalents = None
if strength_reduction:
StridedSpecializer = StrengthReducingStridedSpecializer
StridedFortranSpecializer = StrengthReducingStridedFortranSpecializer
class ContigSpecializer(OrderedSpecializer):
"""
Specialize on all specializations being contiguous (all F or all C).
"""
specialization_name = "contig"
is_contig_specializer = True
def visit_FunctionNode(self, node):
node = super(ContigSpecializer, self).visit_FunctionNode(node)
self.astbuilder.create_function_type(node, strides_args=False)
return node
def visit_NDIterate(self, node):
"""
Generate a single ForNode over the total data size.
"""
b = self.astbuilder
original_expr = specialize_ast(node.body)
node = super(ContigSpecializer, self).visit_NDIterate(node)
for_node = b.for_range_upwards(node.body,
upper=self.function.total_shape)
self.function.for_loops = [for_node]
self.indices = [for_node.index]
node = self.omp_for(b.pragma_for(for_node))
self.target = for_node.target
node = self.visit(node)
if self.is_vectorizing_specializer:
fixup_loop = self.process_inner_forloop(for_node, original_expr)
node = b.stats(node, fixup_loop)
return node
def visit_StridePointer(self, node):
return None
def _element_location(self, node, loop_level):
"Directly index the data pointer"
data_pointer = self.astbuilder.data_pointer(node)
return self.astbuilder.index(data_pointer, self.target)
def index(self, loop_level):
return self.target
def contig_index(self):
return self.target
class CTiledStridedSpecializer(StridedSpecializer):
"""
Generate tiled code for the last two (C) or first two (F) dimensions.
The blocksize may be overridden through the get_blocksize method, in
a specializer subclass or mixin (see miniast.Context.specializer_mixin_cls).
"""
specialization_name = "tiled"
order = "C"
is_tiled_specializer = True
vectorized_equivalents = None
def get_blocksize(self):
"""
Get the tile size. Override in subclasses to provide e.g. parametric
tiling.
"""
return self.astbuilder.constant(64)
def tiled_order(self):
"Tile in the last two dimensions"
return self.function.ndim - 1, self.function.ndim - 1 - 2, -1
def untiled_order(self):
return self.function.ndim - 1 - 2, -1, -1
def visit_NDIterate(self, node):
assert self.function.ndim >= 2
return self._tile_in_two_dimensions(node)
def _tile_in_two_dimensions(self, node):
"""
This version generates tiling loops in the first or last two dimensions
(depending on C or Fortran order).
"""
b = self.astbuilder
self.tiled_indices = []
self.indices = []
self.blocksize = self.get_blocksize()
# Generate the two outer tiling loops
tiled_loop_body = b.stats(b.constant(0)) # fake empty loop body
controlling_loops, body = self.ordered_loop(
tiled_loop_body, self.tiled_indices, step=self.blocksize,
loop_order=self.tiled_order())
del tiled_loop_body.stats[:]
# Generate some temporaries to store the upper limit of the inner
# tiled loops
upper_limits = {}
stats = []
# sort the indices in forward order, to match up with the ordered
# indices
tiled_order = sorted(range(*self.tiled_order()))
for i, index in zip(tiled_order, self.tiled_indices):
upper_limit = b.temp(index.type)
tiled_loop_body.stats.append(
b.assign(upper_limit, b.min(b.add(index, self.blocksize),
b.shape_index(i, self.function))))
upper_limits[i] = upper_limit
tiled_indices = dict(zip(tiled_order, self.tiled_indices))
def lower(i):
if i in tiled_indices:
return tiled_indices[i]
return None
def upper(i):
if i in upper_limits:
return upper_limits[i]
return b.shape_index(i, self.function)
# Generate the inner tiled loops
outer_for_node = node.body
inner_body = node.body
tiling_loops, inner_loops = self.ordered_loop(
node.body, self.indices,
lower=lower, upper=upper,
loop_order=self.tiled_order())
tiled_loop_body.stats.append(inner_loops)
innermost_loop = inner_loops.body
# Generate the outer loops (in case the array operands have more than
# two dimensions)
indices = []
outer_loops, body = self.ordered_loop(body, indices,
loop_order=self.untiled_order())
body = self.omp_for(body)
# At this point, 'self.indices' are the indices of the tiled loop
# (the indices in the first two dimensions for Fortran,
# the indices in the last two # dimensions for C)
# 'indices' are the indices of the outer loops
if self.order == "C":
self.indices = indices + self.indices
else:
self.indices = self.indices + indices
# if strength_reduction:
# body = self.strength_reduce_inner_dimension(body, innermost_loop)
for dim, for_node in enumerate(controlling_loops):
for_node.is_controlling_loop = True
for_node.blocksize = self.blocksize
for dim, for_node in enumerate(tiling_loops):
for_node.is_tiling_loop = True
self.set_dims(controlling_loops)
self.set_dims(tiling_loops)
self.function.controlling_loops = controlling_loops
self.function.tiling_loops = tiling_loops
self.function.outer_loops = outer_loops
self.function.for_loops = outer_loops + controlling_loops + tiling_loops
self.function.lower_tiling_limits = tiled_indices
self.function.upper_tiling_limits = upper_limits
return self.visit(body)
def set_dims(self, tiled_loops):
"Set the 'dim' attributes of the tiling and controlling loops"
# We need to reverse our tiled order, since this order is used to
# build up the for nodes in reverse. We have an ordered list of for
# nodes.
tiled_order = reversed(range(*self.tiled_order()))
for dim, for_node in zip(tiled_order, tiled_loops):
for_node.dim = dim
def _tile_in_all_dimensions(self, node):
"""
This version generates tiling loops in all dimensions.
"""
b = self.astbuilder
self.tiled_indices = []
self.indices = []
self.blocksize = self.get_blocksize()
tiled_loop_body = b.stats(b.constant(0)) # fake empty loop body
controlling_loops, body = self.ordered_loop(tiled_loop_body,
self.tiled_indices,
step=self.blocksize)
body = self.omp_for(body)
del tiled_loop_body.stats[:]
upper_limits = []
stats = []
for i, index in enumerate(self.tiled_indices):
upper_limit = b.temp(index.type)
tiled_loop_body.stats.append(
b.assign(upper_limit, b.min(b.add(index, self.blocksize),
b.shape_index(i, self.function))))
upper_limits.append(upper_limit)
tiling_loops, inner_body = self.ordered_loop(
node.body, self.indices,
lower=lambda i: self.tiled_indices[i],
upper=lambda i: upper_limits[i])
tiled_loop_body.stats.append(inner_body)
self.function.controlling_loops = controlling_loops
self.function.tiling_loops = tiling_loops
self.function.outer_loops = []
self.function.for_loops = tiling_loops
return self.visit(body)
def strided_indices(self):
return self.indices[:-1] + [self.tiled_indices[1]]
def _element_location(self, variable, loop_level):
"""
Return data + i * strides[0] + j * strides[1] when we are not using
strength reduction. Otherwise generate temp_data += strides[1]. For
this to work, temp_data must be set to
data + i * strides[0] + outer_j * strides[1]. This happens through
_compute_inner_dim_pointers with tiled=True.
"""
if strength_reduction:
return super(CTiledStridedSpecializer, self)._element_location(
variable, loop_level)
else:
return self._strided_element_location(variable)
def get_data_pointer(self, variable, loop_level):
return self.compute_inner_dim_pointer(variable, loop_level, tiled=True)
class FTiledStridedSpecializer(StridedFortranSpecializer,
#StrengthReducingStridedFortranSpecializer,
CTiledStridedSpecializer):
"Tile in Fortran order"
specialization_name = "tiled_fortran"
order = "F"
def tiled_order(self):
"Tile in the first two dimensions"
return 0, 2, 1
def untiled_order(self):
return 2, self.function.ndim, 1
def strided_indices(self):
return [self.tiled_indices[0]] + self.indices[1:]
#
### Vectorized specializer equivalents
#
def create_vectorized_specializers(specializer_cls):
"""
Creates Vectorizing specializer classes from the given specializer for
SSE and AVX.
"""
bases = (VectorizingSpecializer, specializer_cls)
d = dict(vectorized_equivalents=None)
name = 'Vectorized%%d%s' % specializer_cls.__name__
cls1 = type(name % 4, bases, dict(d, vector_size=4))
cls2 = type(name % 8, bases, dict(d, vector_size=8))
return cls1, cls2
ContigSpecializer.vectorized_equivalents = (
create_vectorized_specializers(ContigSpecializer))
StridedCInnerContigSpecializer.vectorized_equivalents = (
create_vectorized_specializers(StridedCInnerContigSpecializer))
StridedFortranInnerContigSpecializer.vectorized_equivalents = (
create_vectorized_specializers(StridedFortranInnerContigSpecializer))
#
### Create cict of all specializers
#
_specializer_list = [
ContigSpecializer,
StridedCInnerContigSpecializer, StridedFortranInnerContigSpecializer,
StridedSpecializer, StridedFortranSpecializer,
CTiledStridedSpecializer, FTiledStridedSpecializer,
]
specializers = {}
for sp in _specializer_list:
specializers[sp.specialization_name] = sp
vectorizers = getattr(sp, 'vectorized_equivalents', None)
if vectorizers:
specializers[sp.specialization_name + '_sse'] = vectorizers[0]
specializers[sp.specialization_name + '_avx'] = vectorizers[1]
|
markflorisson/minivect
|
minivect/specializers.py
|
Python
|
bsd-2-clause
| 56,778
|
[
"VisIt"
] |
dadc27d6154a00659b4ed30cbca211a13681b6f22ed3e23533870112daea5332
|
###############################
# This file is part of PyLaDa.
#
# Copyright (C) 2013 National Renewable Energy Lab
#
# PyLaDa is a high throughput computational platform for Physics. It aims to
# make it easier to submit large numbers of jobs on supercomputers. It
# provides a python interface to physical input, such as crystal structures,
# as well as to a number of DFT (VASP, CRYSTAL) and atomic potential programs.
# It is able to organise and launch computational jobs on PBS and SLURM.
#
# PyLaDa is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# PyLaDa is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# PyLaDa. If not, see <http://www.gnu.org/licenses/>.
###############################
from pytest import fixture, mark
from pylada.process.pool import PoolProcess
from .conftest import jobfolders, mpi4py_required
@fixture
def comm():
from pylada import default_comm
result = default_comm.copy()
result['n'] = 4
return result
def processalloc(job):
"""returns a random number between 1 and 4 included."""
from random import randint
return randint(1, 4)
@mpi4py_required
def test_failures(tmpdir, executable, comm):
"""Tests whether scheduling jobs works on known failure cases."""
from pylada import default_comm
root = jobfolders(executable, 0, 8)
def processalloc_test1(job):
d = {'1': 1, '0': 3, '3': 3, '2': 3, '5': 3, '4': 2, '7': 2, '6': 1}
return d[job.name[1:-1]]
program = PoolProcess(
root, processalloc=processalloc_test1, outdir=str(tmpdir))
program._comm = comm
for i in range(10000):
jobs = program._getjobs()
assert sum(program._alloc[u] for u in jobs) <= program._comm['n'],\
(jobs, [program._alloc[u] for u in jobs])
@mpi4py_required
@mark.parametrize('nprocs, njobs', [(8, 20), (16, 20)])
def test_getjobs(comm, tmpdir, executable, nprocs, njobs):
"""Test scheduling."""
root = jobfolders(executable, 0, 8)
def processalloc(job):
"""returns a random number between 1 and 4 included."""
from random import randint
return randint(1, comm['n'])
for j in range(100):
program = PoolProcess(
root, processalloc=processalloc, outdir=str(tmpdir))
program._comm = comm
for i in range(1000):
jobs = program._getjobs()
assert sum(program._alloc[u] for u in jobs) <= program._comm['n'],\
(jobs, [program._alloc[u] for u in jobs])
|
pylada/pylada-light
|
tests/process/test_pool.py
|
Python
|
gpl-3.0
| 2,948
|
[
"CRYSTAL",
"VASP"
] |
6f3d289d3f8e51553649ff1e903e6366be222b87aa6649e5a2c40cb34b51f545
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START mod1b_flask]
from flask import Flask, render_template, request
from google.appengine.api import wrap_wsgi_app
from google.appengine.ext import ndb
app = Flask(__name__)
app.wsgi_app = wrap_wsgi_app(app.wsgi_app)
class Visit(ndb.Model):
'Visit entity registers visitor IP address & timestamp'
visitor = ndb.StringProperty()
timestamp = ndb.DateTimeProperty(auto_now_add=True)
def store_visit(remote_addr, user_agent):
'create new Visit entity in Datastore'
Visit(visitor='{}: {}'.format(remote_addr, user_agent)).put()
def fetch_visits(limit):
'get most recent visits'
return Visit.query().order(-Visit.timestamp).fetch(limit)
@app.route('/')
def root():
'main application (GET) handler'
store_visit(request.remote_addr, request.user_agent)
visits = fetch_visits(10)
return render_template('index.html', visits=visits)
# [END mod1b_flask]
|
googlecodelabs/migrate-python2-appengine
|
mod1b-flask/main.py
|
Python
|
apache-2.0
| 1,472
|
[
"VisIt"
] |
93084f90849977e567bc018cda6ceaa6e8285c3ff1b4151df1eb6454626bdf65
|
# -*- coding: utf-8 -*-
"""
Copyright (c) 2013 wgx731 <wgx731@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
'Software'), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
""" Visit plugin test
Test Cases for xiaohuangji visit plugin
"""
__author__ = 'wgx731'
__copyright__ = 'Copyright (c) 2013 wgx731'
__license__ = 'MIT'
__version__ = '0.1'
__maintainer__ = 'wgx731'
__email__ = 'wgx731@gmail.com'
__status__ = 'development'
from nose.tools import ok_
from nose.tools import eq_
from test_config import *
from ..plugins import visit
sys.path = [TEST_DIR] + sys.path
class TestVisit(TestBase):
def setup(self):
pass
def teardown(self):
pass
def test_visit_test_1(self):
eq_(False, visit.test({'message': '别来访'}, None), WRONG_KEY_WORD_ERROR)
def test_visit_test_2(self):
eq_(True, visit.test({'message': '求来访'}, None), WRONG_RESULT_ERROR)
|
fxyzj/xiaohuangji
|
tests/test_visit.py
|
Python
|
mit
| 1,845
|
[
"VisIt"
] |
8d0111476b937e88abee52d49e6bfd3d3c197a61c9e9c8aabb6121ef64242f81
|
#
# Copyright (c) 2016 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import, division, print_function
from licensedcode.match import get_texts
from licensedcode.match import LicenseMatch
from licensedcode.seq import match_blocks
from licensedcode.spans import Span
TRACE = False
TRACE2 = False
def logger_debug(*args): pass
if TRACE:
import logging
import sys
logger = logging.getLogger(__name__)
def logger_debug(*args):
return logger.debug(' '.join(isinstance(a, basestring) and a or repr(a) for a in args))
logging.basicConfig(stream=sys.stdout)
logger.setLevel(logging.DEBUG)
"""
Matching strategy using pair-wise multiple local sequences alignment and diff-
like approaches.
"""
MATCH_SEQ = '3-seq'
def match_sequence(idx, candidate, query_run, start_offset=0):
"""
Return a list of LicenseMatch by matching the `query_run` tokens sequence
against the `idx` index for the `candidate` rule tuple (rid, rule,
intersection).
"""
if not candidate:
return []
rid, rule, _intersection = candidate
high_postings = idx.high_postings_by_rid[rid]
itokens = idx.tids_by_rid[rid]
len_junk = idx.len_junk
qbegin = query_run.start + start_offset
qfinish = query_run.end
qtokens = query_run.query.tokens
query = query_run.query
matches = []
qstart = qbegin
qlen = len(query_run)
# match as long as long we find alignments and have high matchable tokens
# this allows to find repeated instances of the same rule in the query run
query_run_matchables = query_run.matchables
while qstart <= qfinish:
if not query_run_matchables:
break
block_matches = match_blocks(qtokens, itokens, qstart, qlen, high_postings, len_junk, query_run_matchables)
if not block_matches:
break
if TRACE2:
logger_debug('block_matches:')
for m in block_matches:
i, j, k = m
print(m)
print('qtokens:', ' '.join(idx.tokens_by_tid[t] for t in qtokens[i:i + k]))
print('itokens:', ' '.join(idx.tokens_by_tid[t] for t in itokens[j:j + k]))
# create one match for each matching block: this not entirely correct
# but this will be sorted out at LicenseMatch merging and filtering time
for qpos, ipos, mlen in block_matches:
qspan = Span(range(qpos, qpos + mlen))
iposses = range(ipos, ipos + mlen)
hispan = Span(p for p in iposses if itokens[p] >= len_junk)
ispan = Span(iposses)
match = LicenseMatch(rule, qspan, ispan, hispan, qbegin, matcher=MATCH_SEQ, query=query)
if TRACE2:
qt, it = get_texts(
match, location=query.location, query_string=query.query_string, idx=idx)
print('###########################')
print(match)
print('###########################')
print(qt)
print('###########################')
print(it)
print('###########################')
matches.append(match)
qstart = max([qstart, qspan.end + 1])
if TRACE: map(logger_debug, matches)
return matches
|
yashdsaraf/scancode-toolkit
|
src/licensedcode/match_seq.py
|
Python
|
apache-2.0
| 4,607
|
[
"VisIt"
] |
ed886362936b0bce304888107d32929b87a84f810d44d9c9cb74214273cfe71e
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Functional tests using WebTest."""
import datetime as dt
from rest_framework import status as http_status
import logging
import unittest
import markupsafe
import mock
import pytest
from nose.tools import * # noqa: F403
import re
from django.utils import timezone
from addons.wiki.utils import to_mongo_key
from framework.auth import exceptions as auth_exc
from framework.auth.core import Auth
from tests.base import OsfTestCase
from tests.base import fake
from osf_tests.factories import (
fake_email,
AuthUserFactory,
NodeFactory,
PreprintFactory,
PreprintProviderFactory,
PrivateLinkFactory,
ProjectFactory,
RegistrationFactory,
SubjectFactory,
UserFactory,
UnconfirmedUserFactory,
UnregUserFactory,
)
from osf.utils import permissions
from addons.wiki.models import WikiPage, WikiVersion
from addons.wiki.tests.factories import WikiFactory, WikiVersionFactory
from website import settings, language
from addons.osfstorage.models import OsfStorageFile
from website.util import web_url_for, api_url_for
from api_tests import utils as test_utils
logging.getLogger('website.project.model').setLevel(logging.ERROR)
def assert_in_html(member, container, **kwargs):
"""Looks for the specified member in markupsafe-escaped HTML output"""
member = markupsafe.escape(member)
return assert_in(member, container, **kwargs)
def assert_not_in_html(member, container, **kwargs):
"""Looks for the specified member in markupsafe-escaped HTML output"""
member = markupsafe.escape(member)
return assert_not_in(member, container, **kwargs)
class TestDisabledUser(OsfTestCase):
def setUp(self):
super(TestDisabledUser, self).setUp()
self.user = UserFactory()
self.user.set_password('Korben Dallas')
self.user.is_disabled = True
self.user.save()
def test_profile_disabled_returns_401(self):
res = self.app.get(self.user.url, expect_errors=True)
assert_equal(res.status_code, 410)
class TestAnUnregisteredUser(OsfTestCase):
def test_cant_see_profile_if_not_logged_in(self):
url = web_url_for('profile_view')
res = self.app.get(url)
res = res.follow()
assert_equal(res.status_code, 308)
assert_in('/login/', res.headers['Location'])
@pytest.mark.enable_bookmark_creation
@pytest.mark.enable_quickfiles_creation
class TestAUser(OsfTestCase):
def setUp(self):
super(TestAUser, self).setUp()
self.user = AuthUserFactory()
self.auth = self.user.auth
def test_can_see_profile_url(self):
res = self.app.get(self.user.url).maybe_follow()
assert_in(self.user.url, res)
# `GET /login/` without parameters is redirected to `/dashboard/` page which has `@must_be_logged_in` decorator
# if user is not logged in, she/he is further redirected to CAS login page
def test_is_redirected_to_cas_if_not_logged_in_at_login_page(self):
res = self.app.get('/login/').follow()
assert_equal(res.status_code, 302)
location = res.headers.get('Location')
assert_in('login?service=', location)
def test_is_redirected_to_dashboard_if_already_logged_in_at_login_page(self):
res = self.app.get('/login/', auth=self.user.auth)
assert_equal(res.status_code, 302)
assert 'dashboard' in res.headers.get('Location')
def test_register_page(self):
res = self.app.get('/register/')
assert_equal(res.status_code, 200)
def test_is_redirected_to_dashboard_if_already_logged_in_at_register_page(self):
res = self.app.get('/register/', auth=self.user.auth)
assert_equal(res.status_code, 302)
assert 'dashboard' in res.headers.get('Location')
def test_sees_projects_in_her_dashboard(self):
# the user already has a project
project = ProjectFactory(creator=self.user)
project.add_contributor(self.user)
project.save()
res = self.app.get('/myprojects/', auth=self.user.auth)
assert_in('Projects', res) # Projects heading
def test_does_not_see_osffiles_in_user_addon_settings(self):
res = self.app.get('/settings/addons/', auth=self.auth, auto_follow=True)
assert_not_in('OSF Storage', res)
def test_sees_osffiles_in_project_addon_settings(self):
project = ProjectFactory(creator=self.user)
project.add_contributor(
self.user,
permissions=permissions.ADMIN,
save=True)
res = self.app.get('/{0}/addons/'.format(project._primary_key), auth=self.auth, auto_follow=True)
assert_in('OSF Storage', res)
def test_sees_correct_title_on_dashboard(self):
# User goes to dashboard
res = self.app.get('/myprojects/', auth=self.auth, auto_follow=True)
title = res.html.title.string
assert_equal('OSF | My Projects', title)
def test_can_see_make_public_button_if_admin(self):
# User is a contributor on a project
project = ProjectFactory()
project.add_contributor(
self.user,
permissions=permissions.ADMIN,
save=True)
# User goes to the project page
res = self.app.get(project.url, auth=self.auth).maybe_follow()
assert_in('Make Public', res)
def test_cant_see_make_public_button_if_not_admin(self):
# User is a contributor on a project
project = ProjectFactory()
project.add_contributor(
self.user,
permissions=permissions.WRITE,
save=True)
# User goes to the project page
res = self.app.get(project.url, auth=self.auth).maybe_follow()
assert_not_in('Make Public', res)
def test_can_see_make_private_button_if_admin(self):
# User is a contributor on a project
project = ProjectFactory(is_public=True)
project.add_contributor(
self.user,
permissions=permissions.ADMIN,
save=True)
# User goes to the project page
res = self.app.get(project.url, auth=self.auth).maybe_follow()
assert_in('Make Private', res)
def test_cant_see_make_private_button_if_not_admin(self):
# User is a contributor on a project
project = ProjectFactory(is_public=True)
project.add_contributor(
self.user,
permissions=permissions.WRITE,
save=True)
# User goes to the project page
res = self.app.get(project.url, auth=self.auth).maybe_follow()
assert_not_in('Make Private', res)
def test_sees_logs_on_a_project(self):
project = ProjectFactory(is_public=True)
# User goes to the project's page
res = self.app.get(project.url, auth=self.auth).maybe_follow()
# Can see log event
assert_in('created', res)
def test_no_wiki_content_message(self):
project = ProjectFactory(creator=self.user)
# Goes to project's wiki, where there is no content
res = self.app.get('/{0}/wiki/home/'.format(project._primary_key), auth=self.auth)
# Sees a message indicating no content
assert_in('Add important information, links, or images here to describe your project.', res)
# Sees that edit panel is open by default when home wiki has no content
assert_in('panelsUsed: ["view", "menu", "edit"]', res)
def test_wiki_content(self):
project = ProjectFactory(creator=self.user)
wiki_page_name = 'home'
wiki_content = 'Kittens'
wiki_page = WikiFactory(
user=self.user,
node=project,
)
wiki = WikiVersionFactory(
wiki_page=wiki_page,
content=wiki_content
)
res = self.app.get('/{0}/wiki/{1}/'.format(
project._primary_key,
wiki_page_name,
), auth=self.auth)
assert_not_in('Add important information, links, or images here to describe your project.', res)
assert_in(wiki_content, res)
assert_in('panelsUsed: ["view", "menu"]', res)
def test_wiki_page_name_non_ascii(self):
project = ProjectFactory(creator=self.user)
non_ascii = to_mongo_key('WöRlÐé')
WikiPage.objects.create_for_node(project, 'WöRlÐé', 'new content', Auth(self.user))
wv = WikiVersion.objects.get_for_node(project, non_ascii)
assert wv.wiki_page.page_name.upper() == non_ascii.upper()
def test_noncontributor_cannot_see_wiki_if_no_content(self):
user2 = UserFactory()
# user2 creates a public project and adds no wiki content
project = ProjectFactory(creator=user2, is_public=True)
# self navigates to project
res = self.app.get(project.url).maybe_follow()
# Should not see wiki widget (since non-contributor and no content)
assert_not_in('Add important information, links, or images here to describe your project.', res)
def test_wiki_does_not_exist(self):
project = ProjectFactory(creator=self.user)
res = self.app.get('/{0}/wiki/{1}/'.format(
project._primary_key,
'not a real page yet',
), auth=self.auth, expect_errors=True)
assert_in('Add important information, links, or images here to describe your project.', res)
def test_sees_own_profile(self):
res = self.app.get('/profile/', auth=self.auth)
td1 = res.html.find('td', text=re.compile(r'Public(.*?)Profile'))
td2 = td1.find_next_sibling('td')
assert_equal(td2.text, self.user.display_absolute_url)
def test_sees_another_profile(self):
user2 = UserFactory()
res = self.app.get(user2.url, auth=self.auth)
td1 = res.html.find('td', text=re.compile(r'Public(.*?)Profile'))
td2 = td1.find_next_sibling('td')
assert_equal(td2.text, user2.display_absolute_url)
@pytest.mark.enable_bookmark_creation
class TestComponents(OsfTestCase):
def setUp(self):
super(TestComponents, self).setUp()
self.user = AuthUserFactory()
self.consolidate_auth = Auth(user=self.user)
self.project = ProjectFactory(creator=self.user)
self.project.add_contributor(contributor=self.user, auth=self.consolidate_auth)
# A non-project componenet
self.component = NodeFactory(
category='hypothesis',
creator=self.user,
parent=self.project,
)
self.component.save()
self.component.set_privacy('public', self.consolidate_auth)
self.component.set_privacy('private', self.consolidate_auth)
self.project.save()
self.project_url = self.project.web_url_for('view_project')
def test_sees_parent(self):
res = self.app.get(self.component.url, auth=self.user.auth).maybe_follow()
parent_title = res.html.find_all('h2', class_='node-parent-title')
assert_equal(len(parent_title), 1)
assert_in(self.project.title, parent_title[0].text) # Bs4 will handle unescaping HTML here
def test_delete_project(self):
res = self.app.get(
self.component.url + 'settings/',
auth=self.user.auth
).maybe_follow()
assert_in(
'Delete {0}'.format(self.component.project_or_component),
res
)
def test_cant_delete_project_if_not_admin(self):
non_admin = AuthUserFactory()
self.component.add_contributor(
non_admin,
permissions=permissions.WRITE,
auth=self.consolidate_auth,
save=True,
)
res = self.app.get(
self.component.url + 'settings/',
auth=non_admin.auth
).maybe_follow()
assert_not_in(
'Delete {0}'.format(self.component.project_or_component),
res
)
def test_can_configure_comments_if_admin(self):
res = self.app.get(
self.component.url + 'settings/',
auth=self.user.auth,
).maybe_follow()
assert_in('Commenting', res)
def test_cant_configure_comments_if_not_admin(self):
non_admin = AuthUserFactory()
self.component.add_contributor(
non_admin,
permissions=permissions.WRITE,
auth=self.consolidate_auth,
save=True,
)
res = self.app.get(
self.component.url + 'settings/',
auth=non_admin.auth
).maybe_follow()
assert_not_in('Commenting', res)
def test_components_should_have_component_list(self):
res = self.app.get(self.component.url, auth=self.user.auth)
assert_in('Components', res)
@pytest.mark.enable_bookmark_creation
class TestPrivateLinkView(OsfTestCase):
def setUp(self):
super(TestPrivateLinkView, self).setUp()
self.user = AuthUserFactory() # Is NOT a contributor
self.project = ProjectFactory(is_public=False)
self.link = PrivateLinkFactory(anonymous=True)
self.link.nodes.add(self.project)
self.link.save()
self.project_url = self.project.web_url_for('view_project')
def test_anonymous_link_hide_contributor(self):
res = self.app.get(self.project_url, {'view_only': self.link.key})
assert_in('Anonymous Contributors', res.body.decode())
assert_not_in(self.user.fullname, res)
def test_anonymous_link_hides_citations(self):
res = self.app.get(self.project_url, {'view_only': self.link.key})
assert_not_in('Citation:', res)
def test_no_warning_for_read_only_user_with_valid_link(self):
link2 = PrivateLinkFactory(anonymous=False)
link2.nodes.add(self.project)
link2.save()
self.project.add_contributor(
self.user,
permissions=permissions.READ,
save=True,
)
res = self.app.get(self.project_url, {'view_only': link2.key},
auth=self.user.auth)
assert_not_in(
'is being viewed through a private, view-only link. '
'Anyone with the link can view this project. Keep '
'the link safe.',
res.body.decode()
)
def test_no_warning_for_read_only_user_with_invalid_link(self):
self.project.add_contributor(
self.user,
permissions=permissions.READ,
save=True,
)
res = self.app.get(self.project_url, {'view_only': 'not_valid'},
auth=self.user.auth)
assert_not_in(
'is being viewed through a private, view-only link. '
'Anyone with the link can view this project. Keep '
'the link safe.',
res.body.decode()
)
@pytest.mark.enable_bookmark_creation
@pytest.mark.enable_quickfiles_creation
class TestMergingAccounts(OsfTestCase):
def setUp(self):
super(TestMergingAccounts, self).setUp()
self.user = UserFactory.build()
self.user.fullname = "tess' test string"
self.user.set_password('science')
self.user.save()
self.dupe = UserFactory.build()
self.dupe.set_password('example')
self.dupe.save()
def test_merged_user_is_not_shown_as_a_contributor(self):
project = ProjectFactory(is_public=True)
# Both the master and dupe are contributors
project.add_contributor(self.dupe, log=False)
project.add_contributor(self.user, log=False)
project.save()
# At the project page, both are listed as contributors
res = self.app.get(project.url).maybe_follow()
assert_in_html(self.user.fullname, res)
assert_in_html(self.dupe.fullname, res)
# The accounts are merged
self.user.merge_user(self.dupe)
self.user.save()
# Now only the master user is shown at the project page
res = self.app.get(project.url).maybe_follow()
assert_in_html(self.user.fullname, res)
assert_true(self.dupe.is_merged)
assert_not_in(self.dupe.fullname, res)
def test_merged_user_has_alert_message_on_profile(self):
# Master merges dupe
self.user.merge_user(self.dupe)
self.user.save()
# At the dupe user's profile there is an alert message at the top
# indicating that the user is merged
res = self.app.get('/profile/{0}/'.format(self.dupe._primary_key)).maybe_follow()
assert_in('This account has been merged', res)
@pytest.mark.enable_bookmark_creation
class TestShortUrls(OsfTestCase):
def setUp(self):
super(TestShortUrls, self).setUp()
self.user = AuthUserFactory()
self.auth = self.user.auth
self.consolidate_auth = Auth(user=self.user)
self.project = ProjectFactory(creator=self.user)
# A non-project componenet
self.component = NodeFactory(parent=self.project, category='hypothesis', creator=self.user)
# Hack: Add some logs to component; should be unnecessary pending
# improvements to factories from @rliebz
self.component.set_privacy('public', auth=self.consolidate_auth)
self.component.set_privacy('private', auth=self.consolidate_auth)
self.wiki = WikiFactory(
user=self.user,
node=self.component,
)
def _url_to_body(self, url):
return self.app.get(
url,
auth=self.auth
).maybe_follow(
auth=self.auth,
).normal_body
def test_project_url(self):
assert_equal(
self._url_to_body(self.project.deep_url),
self._url_to_body(self.project.url),
)
def test_component_url(self):
assert_equal(
self._url_to_body(self.component.deep_url),
self._url_to_body(self.component.url),
)
def test_wiki_url(self):
assert_equal(
self._url_to_body(self.wiki.deep_url),
self._url_to_body(self.wiki.url),
)
@pytest.mark.enable_bookmark_creation
@pytest.mark.enable_implicit_clean
class TestClaiming(OsfTestCase):
def setUp(self):
super(TestClaiming, self).setUp()
self.referrer = AuthUserFactory()
self.project = ProjectFactory(creator=self.referrer, is_public=True)
def test_correct_name_shows_in_contributor_list(self):
name1, email = fake.name(), fake_email()
UnregUserFactory(fullname=name1, email=email)
name2, email = fake.name(), fake_email()
# Added with different name
self.project.add_unregistered_contributor(fullname=name2,
email=email, auth=Auth(self.referrer))
self.project.save()
res = self.app.get(self.project.url, auth=self.referrer.auth)
# Correct name is shown
assert_in_html(name2, res)
assert_not_in(name1, res)
def test_user_can_set_password_on_claim_page(self):
name, email = fake.name(), fake_email()
new_user = self.project.add_unregistered_contributor(
email=email,
fullname=name,
auth=Auth(self.referrer)
)
self.project.save()
claim_url = new_user.get_claim_url(self.project._primary_key)
res = self.app.get(claim_url)
self.project.reload()
assert_in('Set Password', res)
form = res.forms['setPasswordForm']
#form['username'] = new_user.username #Removed as long as E-mail can't be updated.
form['password'] = 'killerqueen'
form['password2'] = 'killerqueen'
res = form.submit().follow()
new_user.reload()
assert_true(new_user.check_password('killerqueen'))
def test_sees_is_redirected_if_user_already_logged_in(self):
name, email = fake.name(), fake_email()
new_user = self.project.add_unregistered_contributor(
email=email,
fullname=name,
auth=Auth(self.referrer)
)
self.project.save()
existing = AuthUserFactory()
claim_url = new_user.get_claim_url(self.project._primary_key)
# a user is already logged in
res = self.app.get(claim_url, auth=existing.auth, expect_errors=True)
assert_equal(res.status_code, 302)
def test_unregistered_users_names_are_project_specific(self):
name1, name2, email = fake.name(), fake.name(), fake_email()
project2 = ProjectFactory(creator=self.referrer)
# different projects use different names for the same unreg contributor
self.project.add_unregistered_contributor(
email=email,
fullname=name1,
auth=Auth(self.referrer)
)
self.project.save()
project2.add_unregistered_contributor(
email=email,
fullname=name2,
auth=Auth(self.referrer)
)
project2.save()
self.app.authenticate(*self.referrer.auth)
# Each project displays a different name in the contributor list
res = self.app.get(self.project.url)
assert_in_html(name1, res)
res2 = self.app.get(project2.url)
assert_in_html(name2, res2)
@unittest.skip('as long as E-mails cannot be changed')
def test_cannot_set_email_to_a_user_that_already_exists(self):
reg_user = UserFactory()
name, email = fake.name(), fake_email()
new_user = self.project.add_unregistered_contributor(
email=email,
fullname=name,
auth=Auth(self.referrer)
)
self.project.save()
# Goes to claim url and successfully claims account
claim_url = new_user.get_claim_url(self.project._primary_key)
res = self.app.get(claim_url)
self.project.reload()
assert_in('Set Password', res)
form = res.forms['setPasswordForm']
# Fills out an email that is the username of another user
form['username'] = reg_user.username
form['password'] = 'killerqueen'
form['password2'] = 'killerqueen'
res = form.submit().maybe_follow(expect_errors=True)
assert_in(
language.ALREADY_REGISTERED.format(email=reg_user.username),
res
)
def test_correct_display_name_is_shown_at_claim_page(self):
original_name = fake.name()
unreg = UnregUserFactory(fullname=original_name)
different_name = fake.name()
new_user = self.project.add_unregistered_contributor(
email=unreg.username,
fullname=different_name,
auth=Auth(self.referrer),
)
self.project.save()
claim_url = new_user.get_claim_url(self.project._primary_key)
res = self.app.get(claim_url)
# Correct name (different_name) should be on page
assert_in_html(different_name, res)
class TestConfirmingEmail(OsfTestCase):
def setUp(self):
super(TestConfirmingEmail, self).setUp()
self.user = UnconfirmedUserFactory()
self.confirmation_url = self.user.get_confirmation_url(
self.user.username,
external=False,
)
self.confirmation_token = self.user.get_confirmation_token(
self.user.username
)
def test_cannot_remove_another_user_email(self):
user1 = AuthUserFactory()
user2 = AuthUserFactory()
url = api_url_for('update_user')
header = {'id': user1.username, 'emails': [{'address': user1.username}]}
res = self.app.put_json(url, header, auth=user2.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_cannnot_make_primary_email_for_another_user(self):
user1 = AuthUserFactory()
user2 = AuthUserFactory()
email = 'test@cos.io'
user1.emails.create(address=email)
user1.save()
url = api_url_for('update_user')
header = {'id': user1.username,
'emails': [{'address': user1.username, 'primary': False, 'confirmed': True},
{'address': email, 'primary': True, 'confirmed': True}
]}
res = self.app.put_json(url, header, auth=user2.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_cannnot_add_email_for_another_user(self):
user1 = AuthUserFactory()
user2 = AuthUserFactory()
email = 'test@cos.io'
url = api_url_for('update_user')
header = {'id': user1.username,
'emails': [{'address': user1.username, 'primary': True, 'confirmed': True},
{'address': email, 'primary': False, 'confirmed': False}
]}
res = self.app.put_json(url, header, auth=user2.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_error_page_if_confirm_link_is_used(self):
self.user.confirm_email(self.confirmation_token)
self.user.save()
res = self.app.get(self.confirmation_url, expect_errors=True)
assert_in(auth_exc.InvalidTokenError.message_short, res)
assert_equal(res.status_code, http_status.HTTP_400_BAD_REQUEST)
@pytest.mark.enable_implicit_clean
@pytest.mark.enable_bookmark_creation
class TestClaimingAsARegisteredUser(OsfTestCase):
def setUp(self):
super(TestClaimingAsARegisteredUser, self).setUp()
self.referrer = AuthUserFactory()
self.project = ProjectFactory(creator=self.referrer, is_public=True)
name, email = fake.name(), fake_email()
self.user = self.project.add_unregistered_contributor(
fullname=name,
email=email,
auth=Auth(user=self.referrer)
)
self.project.save()
def test_claim_user_registered_with_correct_password(self):
reg_user = AuthUserFactory() # NOTE: AuthUserFactory sets password as 'queenfan86'
url = self.user.get_claim_url(self.project._primary_key)
# Follow to password re-enter page
res = self.app.get(url, auth=reg_user.auth).follow(auth=reg_user.auth)
# verify that the "Claim Account" form is returned
assert_in('Claim Contributor', res.body.decode())
form = res.forms['claimContributorForm']
form['password'] = 'queenfan86'
res = form.submit(auth=reg_user.auth)
res = res.follow(auth=reg_user.auth)
self.project.reload()
self.user.reload()
# user is now a contributor to the project
assert_in(reg_user, self.project.contributors)
# the unregistered user (self.user) is removed as a contributor, and their
assert_not_in(self.user, self.project.contributors)
# unclaimed record for the project has been deleted
assert_not_in(self.project, self.user.unclaimed_records)
def test_claim_user_registered_preprint_with_correct_password(self):
preprint = PreprintFactory(creator=self.referrer)
name, email = fake.name(), fake_email()
unreg_user = preprint.add_unregistered_contributor(
fullname=name,
email=email,
auth=Auth(user=self.referrer)
)
reg_user = AuthUserFactory() # NOTE: AuthUserFactory sets password as 'queenfan86'
url = unreg_user.get_claim_url(preprint._id)
# Follow to password re-enter page
res = self.app.get(url, auth=reg_user.auth).follow(auth=reg_user.auth)
# verify that the "Claim Account" form is returned
assert_in('Claim Contributor', res.body.decode())
form = res.forms['claimContributorForm']
form['password'] = 'queenfan86'
res = form.submit(auth=reg_user.auth)
preprint.reload()
unreg_user.reload()
# user is now a contributor to the project
assert_in(reg_user, preprint.contributors)
# the unregistered user (unreg_user) is removed as a contributor, and their
assert_not_in(unreg_user, preprint.contributors)
# unclaimed record for the project has been deleted
assert_not_in(preprint, unreg_user.unclaimed_records)
class TestResendConfirmation(OsfTestCase):
def setUp(self):
super(TestResendConfirmation, self).setUp()
self.unconfirmed_user = UnconfirmedUserFactory()
self.confirmed_user = UserFactory()
self.get_url = web_url_for('resend_confirmation_get')
self.post_url = web_url_for('resend_confirmation_post')
# test that resend confirmation page is load correctly
def test_resend_confirmation_get(self):
res = self.app.get(self.get_url)
assert_equal(res.status_code, 200)
assert_in('Resend Confirmation', res.body.decode())
assert_in('resendForm', res.forms)
# test that unconfirmed user can receive resend confirmation email
@mock.patch('framework.auth.views.mails.send_mail')
def test_can_receive_resend_confirmation_email(self, mock_send_mail):
# load resend confirmation page and submit email
res = self.app.get(self.get_url)
form = res.forms['resendForm']
form['email'] = self.unconfirmed_user.unconfirmed_emails[0]
res = form.submit()
# check email, request and response
assert_true(mock_send_mail.called)
assert_equal(res.status_code, 200)
assert_equal(res.request.path, self.post_url)
assert_in_html('If there is an OSF account', res)
# test that confirmed user cannot receive resend confirmation email
@mock.patch('framework.auth.views.mails.send_mail')
def test_cannot_receive_resend_confirmation_email_1(self, mock_send_mail):
# load resend confirmation page and submit email
res = self.app.get(self.get_url)
form = res.forms['resendForm']
form['email'] = self.confirmed_user.emails.first().address
res = form.submit()
# check email, request and response
assert_false(mock_send_mail.called)
assert_equal(res.status_code, 200)
assert_equal(res.request.path, self.post_url)
assert_in_html('has already been confirmed', res)
# test that non-existing user cannot receive resend confirmation email
@mock.patch('framework.auth.views.mails.send_mail')
def test_cannot_receive_resend_confirmation_email_2(self, mock_send_mail):
# load resend confirmation page and submit email
res = self.app.get(self.get_url)
form = res.forms['resendForm']
form['email'] = 'random@random.com'
res = form.submit()
# check email, request and response
assert_false(mock_send_mail.called)
assert_equal(res.status_code, 200)
assert_equal(res.request.path, self.post_url)
assert_in_html('If there is an OSF account', res)
# test that user cannot submit resend confirmation request too quickly
@mock.patch('framework.auth.views.mails.send_mail')
def test_cannot_resend_confirmation_twice_quickly(self, mock_send_mail):
# load resend confirmation page and submit email
res = self.app.get(self.get_url)
form = res.forms['resendForm']
form['email'] = self.unconfirmed_user.email
res = form.submit()
res = form.submit()
# check request and response
assert_equal(res.status_code, 200)
assert_in_html('Please wait', res)
class TestForgotPassword(OsfTestCase):
def setUp(self):
super(TestForgotPassword, self).setUp()
self.user = UserFactory()
self.auth_user = AuthUserFactory()
self.get_url = web_url_for('forgot_password_get')
self.post_url = web_url_for('forgot_password_post')
self.user.verification_key_v2 = {}
self.user.save()
# log users out before they land on forgot password page
def test_forgot_password_logs_out_user(self):
# visit forgot password link while another user is logged in
res = self.app.get(self.get_url, auth=self.auth_user.auth)
# check redirection to CAS logout
assert_equal(res.status_code, 302)
location = res.headers.get('Location')
assert_not_in('reauth', location)
assert_in('logout?service=', location)
assert_in('forgotpassword', location)
# test that forgot password page is loaded correctly
def test_get_forgot_password(self):
res = self.app.get(self.get_url)
assert_equal(res.status_code, 200)
assert_in('Forgot Password', res.body.decode())
assert_in('forgotPasswordForm', res.forms)
# test that existing user can receive reset password email
@mock.patch('framework.auth.views.mails.send_mail')
def test_can_receive_reset_password_email(self, mock_send_mail):
# load forgot password page and submit email
res = self.app.get(self.get_url)
form = res.forms['forgotPasswordForm']
form['forgot_password-email'] = self.user.username
res = form.submit()
# check mail was sent
assert_true(mock_send_mail.called)
# check http 200 response
assert_equal(res.status_code, 200)
# check request URL is /forgotpassword
assert_equal(res.request.path, self.post_url)
# check push notification
assert_in_html('If there is an OSF account', res)
assert_not_in_html('Please wait', res)
# check verification_key_v2 is set
self.user.reload()
assert_not_equal(self.user.verification_key_v2, {})
# test that non-existing user cannot receive reset password email
@mock.patch('framework.auth.views.mails.send_mail')
def test_cannot_receive_reset_password_email(self, mock_send_mail):
# load forgot password page and submit email
res = self.app.get(self.get_url)
form = res.forms['forgotPasswordForm']
form['forgot_password-email'] = 'fake' + self.user.username
res = form.submit()
# check mail was not sent
assert_false(mock_send_mail.called)
# check http 200 response
assert_equal(res.status_code, 200)
# check request URL is /forgotpassword
assert_equal(res.request.path, self.post_url)
# check push notification
assert_in_html('If there is an OSF account', res)
assert_not_in_html('Please wait', res)
# check verification_key_v2 is not set
self.user.reload()
assert_equal(self.user.verification_key_v2, {})
# test that non-existing user cannot receive reset password email
@mock.patch('framework.auth.views.mails.send_mail')
def test_not_active_user_no_reset_password_email(self, mock_send_mail):
self.user.disable_account()
self.user.save()
# load forgot password page and submit email
res = self.app.get(self.get_url)
form = res.forms['forgotPasswordForm']
form['forgot_password-email'] = self.user.username
res = form.submit()
# check mail was not sent
assert_false(mock_send_mail.called)
# check http 200 response
assert_equal(res.status_code, 200)
# check request URL is /forgotpassword
assert_equal(res.request.path, self.post_url)
# check push notification
assert_in_html('If there is an OSF account', res)
assert_not_in_html('Please wait', res)
# check verification_key_v2 is not set
self.user.reload()
assert_equal(self.user.verification_key_v2, {})
# test that user cannot submit forgot password request too quickly
@mock.patch('framework.auth.views.mails.send_mail')
def test_cannot_reset_password_twice_quickly(self, mock_send_mail):
# load forgot password page and submit email
res = self.app.get(self.get_url)
form = res.forms['forgotPasswordForm']
form['forgot_password-email'] = self.user.username
res = form.submit()
res = form.submit()
# check http 200 response
assert_equal(res.status_code, 200)
# check push notification
assert_in_html('Please wait', res)
assert_not_in_html('If there is an OSF account', res)
class TestForgotPasswordInstitution(OsfTestCase):
def setUp(self):
super(TestForgotPasswordInstitution, self).setUp()
self.user = UserFactory()
self.auth_user = AuthUserFactory()
self.get_url = web_url_for('redirect_unsupported_institution')
self.post_url = web_url_for('forgot_password_institution_post')
self.user.verification_key_v2 = {}
self.user.save()
# log users out before they land on institutional forgot password page
def test_forgot_password_logs_out_user(self):
# visit forgot password link while another user is logged in
res = self.app.get(self.get_url, auth=self.auth_user.auth)
# check redirection to CAS logout
assert_equal(res.status_code, 302)
location = res.headers.get('Location')
assert_in('campaign=unsupportedinstitution', location)
assert_in('logout?service=', location)
# test that institutional forgot password page redirects to CAS unsupported
# institution page
def test_get_forgot_password(self):
res = self.app.get(self.get_url)
assert_equal(res.status_code, 302)
location = res.headers.get('Location')
assert_in('campaign=unsupportedinstitution', location)
# test that user from disabled institution can receive reset password email
@mock.patch('framework.auth.views.mails.send_mail')
def test_can_receive_reset_password_email(self, mock_send_mail):
# submit email to institutional forgot-password page
res = self.app.post(self.post_url, {'forgot_password-email': self.user.username})
# check mail was sent
assert_true(mock_send_mail.called)
# check http 200 response
assert_equal(res.status_code, 200)
# check request URL is /forgotpassword
assert_equal(res.request.path, self.post_url)
# check push notification
assert_in_html('If there is an OSF account', res)
assert_not_in_html('Please wait', res)
# check verification_key_v2 is set
self.user.reload()
assert_not_equal(self.user.verification_key_v2, {})
# test that non-existing user cannot receive reset password email
@mock.patch('framework.auth.views.mails.send_mail')
def test_cannot_receive_reset_password_email(self, mock_send_mail):
# load forgot password page and submit email
res = self.app.post(self.post_url, {'forgot_password-email': 'fake' + self.user.username})
# check mail was not sent
assert_false(mock_send_mail.called)
# check http 200 response
assert_equal(res.status_code, 200)
# check request URL is /forgotpassword-institution
assert_equal(res.request.path, self.post_url)
# check push notification
assert_in_html('If there is an OSF account', res)
assert_not_in_html('Please wait', res)
# check verification_key_v2 is not set
self.user.reload()
assert_equal(self.user.verification_key_v2, {})
# test that non-existing user cannot receive institutional reset password email
@mock.patch('framework.auth.views.mails.send_mail')
def test_not_active_user_no_reset_password_email(self, mock_send_mail):
self.user.disable_account()
self.user.save()
res = self.app.post(self.post_url, {'forgot_password-email': self.user.username})
# check mail was not sent
assert_false(mock_send_mail.called)
# check http 200 response
assert_equal(res.status_code, 200)
# check request URL is /forgotpassword-institution
assert_equal(res.request.path, self.post_url)
# check push notification
assert_in_html('If there is an OSF account', res)
assert_not_in_html('Please wait', res)
# check verification_key_v2 is not set
self.user.reload()
assert_equal(self.user.verification_key_v2, {})
# test that user cannot submit forgot password request too quickly
@mock.patch('framework.auth.views.mails.send_mail')
def test_cannot_reset_password_twice_quickly(self, mock_send_mail):
# submit institutional forgot-password request in rapid succession
res = self.app.post(self.post_url, {'forgot_password-email': self.user.username})
res = self.app.post(self.post_url, {'forgot_password-email': self.user.username})
# check http 200 response
assert_equal(res.status_code, 200)
# check push notification
assert_in_html('Please wait', res)
assert_not_in_html('If there is an OSF account', res)
@unittest.skip('Public projects/components are dynamically loaded now.')
class TestAUserProfile(OsfTestCase):
def setUp(self):
OsfTestCase.setUp(self)
self.user = AuthUserFactory()
self.me = AuthUserFactory()
self.project = ProjectFactory(creator=self.me, is_public=True, title=fake.bs())
self.component = NodeFactory(creator=self.me, parent=self.project, is_public=True, title=fake.bs())
# regression test for https://github.com/CenterForOpenScience/osf.io/issues/2623
def test_has_public_projects_and_components(self):
# I go to my own profile
url = web_url_for('profile_view_id', uid=self.me._primary_key)
# I see the title of both my project and component
res = self.app.get(url, auth=self.me.auth)
assert_in_html(self.component.title, res)
assert_in_html(self.project.title, res)
# Another user can also see my public project and component
url = web_url_for('profile_view_id', uid=self.me._primary_key)
# I see the title of both my project and component
res = self.app.get(url, auth=self.user.auth)
assert_in_html(self.component.title, res)
assert_in_html(self.project.title, res)
def test_shows_projects_with_many_contributors(self):
# My project has many contributors
for _ in range(5):
user = UserFactory()
self.project.add_contributor(user, auth=Auth(self.project.creator), save=True)
# I go to my own profile
url = web_url_for('profile_view_id', uid=self.me._primary_key)
res = self.app.get(url, auth=self.me.auth)
# I see '3 more' as a link
assert_in('3 more', res)
res = res.click('3 more')
assert_equal(res.request.path, self.project.url)
def test_has_no_public_projects_or_components_on_own_profile(self):
# User goes to their profile
url = web_url_for('profile_view_id', uid=self.user._id)
res = self.app.get(url, auth=self.user.auth)
# user has no public components/projects
assert_in('You have no public projects', res)
assert_in('You have no public components', res)
def test_user_no_public_projects_or_components(self):
# I go to other user's profile
url = web_url_for('profile_view_id', uid=self.user._id)
# User has no public components/projects
res = self.app.get(url, auth=self.me.auth)
assert_in('This user has no public projects', res)
assert_in('This user has no public components', res)
# regression test
def test_does_not_show_registrations(self):
project = ProjectFactory(creator=self.user)
component = NodeFactory(parent=project, creator=self.user, is_public=False)
# User has a registration with public components
reg = RegistrationFactory(project=component.parent_node, creator=self.user, is_public=True)
for each in reg.nodes:
each.is_public = True
each.save()
# I go to other user's profile
url = web_url_for('profile_view_id', uid=self.user._id)
# Registration does not appear on profile
res = self.app.get(url, auth=self.me.auth)
assert_in('This user has no public components', res)
assert_not_in(reg.title, res)
assert_not_in(reg.nodes[0].title, res)
@pytest.mark.enable_bookmark_creation
class TestPreprintBannerView(OsfTestCase):
def setUp(self):
super(TestPreprintBannerView, self).setUp()
self.admin = AuthUserFactory()
self.write_contrib = AuthUserFactory()
self.read_contrib = AuthUserFactory()
self.non_contrib = AuthUserFactory()
self.provider_one = PreprintProviderFactory()
self.project_one = ProjectFactory(creator=self.admin, is_public=True)
self.project_one.add_contributor(self.write_contrib, permissions.WRITE)
self.project_one.add_contributor(self.read_contrib, permissions.READ)
self.subject_one = SubjectFactory()
self.preprint = PreprintFactory(creator=self.admin, filename='mgla.pdf', provider=self.provider_one, subjects=[[self.subject_one._id]], project=self.project_one, is_published=True)
self.preprint.add_contributor(self.write_contrib, permissions.WRITE)
self.preprint.add_contributor(self.read_contrib, permissions.READ)
def test_public_project_published_preprint(self):
url = self.project_one.web_url_for('view_project')
# Admin - preprint
res = self.app.get(url, auth=self.admin.auth)
assert_in('Has supplemental materials for', res.body.decode())
# Write - preprint
res = self.app.get(url, auth=self.write_contrib.auth)
assert_in('Has supplemental materials for', res.body.decode())
# Read - preprint
res = self.app.get(url, auth=self.read_contrib.auth)
assert_in('Has supplemental materials for', res.body.decode())
# Noncontrib - preprint
res = self.app.get(url, auth=self.non_contrib.auth)
assert_in('Has supplemental materials for', res.body.decode())
# Unauthenticated - preprint
res = self.app.get(url)
assert_in('Has supplemental materials for', res.body.decode())
def test_public_project_abandoned_preprint(self):
self.preprint.machine_state = 'initial'
self.preprint.save()
url = self.project_one.web_url_for('view_project')
# Admin - preprint
res = self.app.get(url, auth=self.admin.auth)
assert_not_in('Has supplemental materials for', res.body.decode())
# Write - preprint
res = self.app.get(url, auth=self.write_contrib.auth)
assert_not_in('Has supplemental materials for', res.body.decode())
# Read - preprint
res = self.app.get(url, auth=self.read_contrib.auth)
assert_not_in('Has supplemental materials for', res.body.decode())
# Noncontrib - preprint
res = self.app.get(url, auth=self.non_contrib.auth)
assert_not_in('Has supplemental materials for', res.body.decode())
# Unauthenticated - preprint
res = self.app.get(url)
assert_not_in('Has supplemental materials for', res.body.decode())
def test_public_project_deleted_preprint(self):
self.preprint.deleted = timezone.now()
self.preprint.save()
url = self.project_one.web_url_for('view_project')
# Admin - preprint
res = self.app.get(url, auth=self.admin.auth)
assert_not_in('Has supplemental materials for', res.body.decode())
# Write - preprint
res = self.app.get(url, auth=self.write_contrib.auth)
assert_not_in('Has supplemental materials for', res.body.decode())
# Read - preprint
res = self.app.get(url, auth=self.read_contrib.auth)
assert_not_in('Has supplemental materials for', res.body.decode())
# Noncontrib - preprint
res = self.app.get(url, auth=self.non_contrib.auth)
assert_not_in('Has supplemental materials for', res.body.decode())
# Unauthenticated - preprint
res = self.app.get(url)
assert_not_in('Has supplemental materials for', res.body.decode())
def test_public_project_private_preprint(self):
self.preprint.is_public = False
self.preprint.save()
url = self.project_one.web_url_for('view_project')
# Admin - preprint
res = self.app.get(url, auth=self.admin.auth)
assert_in('Has supplemental materials for', res.body.decode())
# Write - preprint
res = self.app.get(url, auth=self.write_contrib.auth)
assert_in('Has supplemental materials for', res.body.decode())
# Read - preprint
res = self.app.get(url, auth=self.read_contrib.auth)
assert_in('Has supplemental materials for', res.body.decode())
# Noncontrib - preprint
res = self.app.get(url, auth=self.non_contrib.auth)
assert_not_in('Has supplemental materials for', res.body.decode())
# Unauthenticated - preprint
res = self.app.get(url)
assert_not_in('Has supplemental materials for', res.body.decode())
def test_public_project_unpublished_preprint(self):
self.preprint.is_published = False
self.preprint.save()
url = self.project_one.web_url_for('view_project')
# Admin - preprint
res = self.app.get(url, auth=self.admin.auth)
assert_in('Has supplemental materials for', res.body.decode())
# Write - preprint
res = self.app.get(url, auth=self.write_contrib.auth)
assert_in('Has supplemental materials for', res.body.decode())
# Read - preprint
res = self.app.get(url, auth=self.read_contrib.auth)
assert_in('Has supplemental materials for', res.body.decode())
# Noncontrib - preprint
res = self.app.get(url, auth=self.non_contrib.auth)
assert_not_in('Has supplemental materials for', res.body.decode())
# Unauthenticated - preprint
res = self.app.get(url)
assert_not_in('Has supplemental materials for', res.body.decode())
def test_public_project_pending_preprint_post_moderation(self):
self.preprint.machine_state = 'pending'
provider = PreprintProviderFactory(reviews_workflow='post-moderation')
self.preprint.provider = provider
self.preprint.save()
url = self.project_one.web_url_for('view_project')
# Admin - preprint
res = self.app.get(url, auth=self.admin.auth)
assert_in('{}'.format(self.preprint.provider.name), res.body.decode())
assert_in('Pending\n', res.body.decode())
assert_in('This preprint is publicly available and searchable but is subject to removal by a moderator.', res.body.decode())
# Write - preprint
res = self.app.get(url, auth=self.write_contrib.auth)
assert_in('{}'.format(self.preprint.provider.name), res.body.decode())
assert_in('Pending\n', res.body.decode())
assert_in('This preprint is publicly available and searchable but is subject to removal by a moderator.', res.body.decode())
# Read - preprint
res = self.app.get(url, auth=self.read_contrib.auth)
assert_in('{}'.format(self.preprint.provider.name), res.body.decode())
assert_in('Pending\n', res.body.decode())
assert_in('This preprint is publicly available and searchable but is subject to removal by a moderator.', res.body.decode())
# Noncontrib - preprint
res = self.app.get(url, auth=self.non_contrib.auth)
assert_in('on {}'.format(self.preprint.provider.name), res.body.decode())
assert_not_in('Pending\n', res.body.decode())
assert_not_in('This preprint is publicly available and searchable but is subject to removal by a moderator.', res.body.decode())
# Unauthenticated - preprint
res = self.app.get(url)
assert_in('on {}'.format(self.preprint.provider.name), res.body.decode())
assert_not_in('Pending\n', res.body.decode())
assert_not_in('This preprint is publicly available and searchable but is subject to removal by a moderator.', res.body.decode())
def test_implicit_admins_can_see_project_status(self):
project = ProjectFactory(creator=self.admin)
component = NodeFactory(creator=self.admin, parent=project)
project.add_contributor(self.write_contrib, permissions.ADMIN)
project.save()
preprint = PreprintFactory(creator=self.admin, filename='mgla.pdf', provider=self.provider_one, subjects=[[self.subject_one._id]], project=component, is_published=True)
preprint.machine_state = 'pending'
provider = PreprintProviderFactory(reviews_workflow='post-moderation')
preprint.provider = provider
preprint.save()
url = component.web_url_for('view_project')
res = self.app.get(url, auth=self.write_contrib.auth)
assert_in('{}'.format(preprint.provider.name), res.body.decode())
assert_in('Pending\n', res.body.decode())
assert_in('This preprint is publicly available and searchable but is subject to removal by a moderator.', res.body.decode())
def test_public_project_pending_preprint_pre_moderation(self):
self.preprint.machine_state = 'pending'
provider = PreprintProviderFactory(reviews_workflow='pre-moderation')
self.preprint.provider = provider
self.preprint.save()
url = self.project_one.web_url_for('view_project')
# Admin - preprint
res = self.app.get(url, auth=self.admin.auth)
assert_in('{}'.format(self.preprint.provider.name), res.body.decode())
assert_in('Pending\n', res.body.decode())
assert_in('This preprint is not publicly available or searchable until approved by a moderator.', res.body.decode())
# Write - preprint
res = self.app.get(url, auth=self.write_contrib.auth)
assert_in('{}'.format(self.preprint.provider.name), res.body.decode())
assert_in('Pending\n', res.body.decode())
assert_in('This preprint is not publicly available or searchable until approved by a moderator.', res.body.decode())
# Read - preprint
res = self.app.get(url, auth=self.read_contrib.auth)
assert_in('{}'.format(self.preprint.provider.name), res.body.decode())
assert_in('Pending\n', res.body.decode())
assert_in('This preprint is not publicly available or searchable until approved by a moderator.', res.body.decode())
# Noncontrib - preprint
res = self.app.get(url, auth=self.non_contrib.auth)
assert_in('{}'.format(self.preprint.provider.name), res.body.decode())
assert_not_in('Pending\n', res.body.decode())
assert_not_in('This preprint is not publicly available or searchable until approved by a moderator.', res.body.decode())
# Unauthenticated - preprint
res = self.app.get(url)
assert_in('{}'.format(self.preprint.provider.name), res.body.decode())
assert_not_in('Pending\n', res.body.decode())
assert_not_in('This preprint is not publicly available or searchable until approved by a moderator.', res.body.decode())
if __name__ == '__main__':
unittest.main()
|
mfraezz/osf.io
|
tests/test_webtests.py
|
Python
|
apache-2.0
| 55,101
|
[
"VisIt"
] |
8ecf48e9b4e9a091e4a114c082f639478a93bad8aed79f92566f23d5cf58f99f
|
"""Class for calibrating the color-based red-sequence model.
"""
import os
import numpy as np
import fitsio
import time
import esutil
from scipy.optimize import least_squares
from ..configuration import Configuration
from ..fitters import MedZFitter, RedSequenceFitter, RedSequenceOffDiagonalFitter, CorrectionFitter, ErrorBinFitter
from ..redsequence import RedSequenceColorPar
from ..color_background import ColorBackground
from ..galaxy import GalaxyCatalog
from ..catalog import Catalog, Entry
from ..zred_color import ZredColor
from ..utilities import make_nodes, CubicSpline, interpol, RedGalInitialColors
class RedSequenceCalibrator(object):
"""
Class for calibrating the color-based red-sequence model.
Requires an input galfile that has the following fields:
z: host cluster redshift
pcol: probability of membership using color/luminosity
p: probability of membership using color/luminosity/radial filter
refmag: total magnitude in the reference band
mag: magnitude array
mag_err: magnitude error array
"""
def __init__(self, conf, galfile):
"""
Instantiate a RedSequenceCalibrator.
Parameters
----------
conf: `str` or `redmapper.Configuration`
Configuration yaml file or configuration object
galfile: `str`
Galaxy file with the required fields
"""
if not isinstance(conf, Configuration):
self.config = Configuration(conf)
else:
self.config = conf
self._galfile = galfile
def run(self, doRaise=True):
"""
Run the red-sequence calibration.
Parameters
----------
doRaise: `bool`, optional
Raise an error if background cannot be computed for any galaxies
Default is True. Can be set to False for certain testing.
"""
gals = GalaxyCatalog.from_galfile(self._galfile)
if self.config.calib_use_pcol:
use, = np.where((gals.z > self.config.zrange[0]) &
(gals.z < self.config.zrange[1]) &
(gals.pcol > self.config.calib_pcut))
else:
use, = np.where((gals.z > self.config.zrange[0]) &
(gals.z < self.config.zrange[1]) &
(gals.p > self.config.calib_pcut))
if use.size == 0:
raise RuntimeError("No good galaxies in %s!" % (self._galfile))
gals = gals[use]
nmag = self.config.nmag
ncol = nmag - 1
# Reference mag nodes for pivot
pivotnodes = make_nodes(self.config.zrange, self.config.calib_pivotmag_nodesize)
# Covmat nodes
covmatnodes = make_nodes(self.config.zrange, self.config.calib_covmat_nodesize)
# correction nodes
corrnodes = make_nodes(self.config.zrange, self.config.calib_corr_nodesize)
# correction slope nodes
corrslopenodes = make_nodes(self.config.zrange, self.config.calib_corr_slope_nodesize)
# volume factor (hard coded)
volnodes = make_nodes(self.config.zrange, 0.01)
# Start building the par dtype
dtype = [('pivotmag_z', 'f4', pivotnodes.size),
('pivotmag', 'f4', pivotnodes.size),
('minrefmag', 'f4', pivotnodes.size),
('maxrefmag', 'f4', pivotnodes.size),
('medcol', 'f4', (pivotnodes.size, ncol)),
('medcol_width', 'f4', (pivotnodes.size, ncol)),
('medcol_err_ratio', 'f4', (ncol, )),
('covmat_z', 'f4', covmatnodes.size),
('sigma', 'f4', (ncol, ncol, covmatnodes.size)),
('covmat_amp', 'f4', (ncol, ncol, covmatnodes.size)),
('covmat_slope', 'f4', (ncol, ncol, covmatnodes.size)),
('mag_err_ratio_int', 'f4', (nmag, )),
('mag_err_ratio_slope', 'f4', (nmag, )),
('mag_err_ratio_pivot', 'f4'),
('corr_z', 'f4', corrnodes.size),
('corr', 'f4', corrnodes.size),
('corr_slope_z', 'f4', corrslopenodes.size),
('corr_slope', 'f4', corrslopenodes.size),
('corr_r', 'f4', corrslopenodes.size),
('corr2', 'f4', corrnodes.size),
('corr2_slope', 'f4', corrslopenodes.size),
('corr2_r', 'f4', corrslopenodes.size),
('volume_factor_z', 'f4', volnodes.size),
('volume_factor', 'f4', volnodes.size)]
# And for each color, make the nodes
node_dict = {}
self.ztag = [None] * ncol
self.ctag = [None] * ncol
self.zstag = [None] * ncol
self.stag = [None] * ncol
for j in range(ncol):
self.ztag[j] = 'z%02d' % (j)
self.ctag[j] = 'c%02d' % (j)
self.zstag[j] = 'zs%02d' % (j)
self.stag[j] = 'slope%02d' % (j)
node_dict[self.ztag[j]] = make_nodes(self.config.zrange, self.config.calib_color_nodesizes[j],
maxnode=self.config.calib_color_maxnodes[j])
node_dict[self.zstag[j]] = make_nodes(self.config.zrange, self.config.calib_slope_nodesizes[j],
maxnode=self.config.calib_color_maxnodes[j])
dtype.extend([(self.ztag[j], 'f4', node_dict[self.ztag[j]].size),
(self.ctag[j], 'f4', node_dict[self.ztag[j]].size),
(self.zstag[j], 'f4', node_dict[self.zstag[j]].size),
(self.stag[j], 'f4', node_dict[self.zstag[j]].size)])
# Make the pars ... and fill them with the defaults
self.pars = Entry(np.zeros(1, dtype=dtype))
self.pars.pivotmag_z = pivotnodes
self.pars.covmat_z = covmatnodes
self.pars.corr_z = corrnodes
self.pars.corr_slope_z = corrslopenodes
self.pars.volume_factor_z = volnodes
self.pars.mag_err_ratio_pivot = self.config.calib_err_ratio_pivot
for j in range(ncol):
self.pars._ndarray[self.ztag[j]] = node_dict[self.ztag[j]]
self.pars._ndarray[self.zstag[j]] = node_dict[self.zstag[j]]
# And a special subset of color galaxies
if self.config.calib_use_pcol:
coluse, = np.where(gals.pcol > self.config.calib_color_pcut)
else:
coluse, = np.where(gals.p > self.config.calib_color_pcut)
colgals = gals[coluse]
# And a placeholder zredstr which allows us to do stuff
self.zredstr = RedSequenceColorPar(None, config=self.config)
# And read the color background
self.bkg = ColorBackground(self.config.bkgfile_color)
# And prepare for luptitude corrections
if self.config.b[0] == 0.0:
self.do_lupcorr = False
else:
self.do_lupcorr = True
self.bnmgy = self.config.b * 1e9
self.lupzp = 22.5
# Compute pivotmags
self._calc_pivotmags(colgals)
# Compute median colors
self._calc_medcols(colgals)
# Compute diagonal parameters
self._calc_diagonal_pars(gals, doRaise=doRaise)
# Compute off-diagonal parameters
self._calc_offdiagonal_pars(gals, doRaise=doRaise)
# Compute volume factor
self._calc_volume_factor(self.config.zrange[1])
# Write out the parameter file
self.save_pars(self.config.parfile, clobber=False)
# Compute zreds without corrections
# Later will want this parallelized, I think
self._calc_zreds(gals, do_correction=False)
# Compute correction (mode1)
self._calc_corrections(gals)
# Compute correction (mode2)
self._calc_corrections(gals, mode2=True)
# And re-save the parameter file
self.save_pars(self.config.parfile, clobber=True)
# Recompute zreds with corrections
# Later will want this parallelized, I think
self._calc_zreds(gals, do_correction=True)
# And want to save galaxies and zreds
zredfile = os.path.join(self.config.outpath, os.path.basename(self._galfile.rstrip('.fit') + '_zreds.fit'))
gals.to_fits_file(zredfile)
# Make diagnostic plots
self._make_diagnostic_plots(gals)
def _compute_startvals(self, nodes, z, val, xval=None, err=None, median=False, fit=False, mincomp=3):
"""
Compute the starting fit values using a simple algorithm.
Must select one (and only one) of median=True (median fit) or
fit=True (weighted mean fit).
Parameters
----------
nodes: `np.array`
Float array of redshift nodes
z: `np.array`
Float array of redshifts
val: `np.array`
Float array of values to fit (e.g. refmag, color)
xval: `np.array`, optional
X-axis value for color-magnitude relation if fitting slope.
Usually refmag.
Default is None, which means not fitting a slope.
err: `np.array`, optional
Float array of error on val. Not used if fitting median.
Default is None.
median: `bool`, optional
Perform median fit. Default is False.
fit: `bool`, optional
Perform weighted mean fit. Default is False.
"""
def _linfunc(p, x, y):
return (p[1] + p[0] * x) - y
if (not median and not fit) or (median and fit):
raise RuntimeError("Must select one and only one of median and fit")
if median:
mvals = np.zeros(nodes.size)
scvals = np.zeros(nodes.size)
else:
cvals = np.zeros(nodes.size)
svals = np.zeros(nodes.size)
if err is not None:
if err.size != val.size:
raise ValueError("val and err must be the same length")
# default all to 0.1
evals = np.zeros(nodes.size) + 0.1
else:
evals = None
for i in range(nodes.size):
if i == 0:
zlo = nodes[0]
else:
zlo = (nodes[i - 1] + nodes[i]) / 2.
if i == nodes.size - 1:
zhi = nodes[i]
else:
zhi = (nodes[i] + nodes[i + 1]) / 2.
u, = np.where((z > zlo) & (z < zhi))
if u.size < mincomp:
if i > 0:
if median:
mvals[i] = mvals[i - 1]
scvals[i] = scvals[i - 1]
else:
cvals[i] = cvals[i - 1]
svals[i] = svals[i - 1]
if err is not None:
evals[i] = evals[i - 1]
else:
if median:
mvals[i] = np.median(val[u])
scvals[i] = np.median(np.abs(val[u] - mvals[i]))
else:
fit = least_squares(_linfunc, [0.0, 0.0], loss='soft_l1', args=(xval[u], val[u]))
cvals[i] = fit.x[1]
svals[i] = np.clip(fit.x[0], None, 0.0)
if err is not None:
evals[i] = np.median(err[u])
if median:
return mvals, scvals
else:
return cvals, svals, evals
def _compute_single_lupcorr(self, j, cvals, svals, gals, dmags, mags, lups, mind, sign):
"""
Compute the luptitude correction for a single color
Parameters
----------
j: `int`
Color index
cvals: `np.array`
Float array of spline values for color at pivotmag
svals: `np.array`
Float array of slope values
gals: `redmapper.GalaxyCatalog`
Galaxy catalog being fit
dmags: `np.array`
Float array of refmag - pivotmag
mags: `np.array`
2d Float array of true (model) magnitudes
lups: `np.array`
2d Float array of true (model) luptitudes
mind: `int`
magnitude index, currently being worked on.
sign: `int`, -1 or 1
Sign of color; -1 if band is redder than ref_ind,
+1 if band is bluer than ref_ind
Returns
-------
lupcorr: `np.array`
Float array of luptitude color corrections
"""
spl = CubicSpline(self.pars._ndarray[self.ztag[j]], cvals)
cv = spl(gals.z)
spl = CubicSpline(self.pars._ndarray[self.zstag[j]], svals)
sv = spl(gals.z)
mags[:, mind] = mags[:, mind + sign] + sign * (cv + sv * dmags)
flux = 10.**((mags[:, mind] - self.lupzp) / (-2.5))
lups[:, mind] = 2.5 * np.log10(1.0 / self.config.b[mind]) - np.arcsinh(0.5 * flux / self.bnmgy[mind]) / (0.4 * np.log(10.0))
magcol = mags[:, j] - mags[:, j + 1]
lupcol = lups[:, j] - lups[:, j + 1]
lupcorr = lupcol - magcol
return lupcorr
def _calc_pivotmags(self, gals):
"""
Calculate the pivot magnitude parameters.
These are put into self.pars.pivotmag, self.pars.maxrefmag, and
self.pars.minrefmag
Parameters
----------
gals: `redmapper.GalaxyCatalog`
Galaxy catalog with fields required for fit.
"""
self.config.logger.info("Calculating pivot magnitudes...")
# With binning, approximate the positions for starting the fit
pivmags = np.zeros_like(self.pars.pivotmag_z)
for i in range(pivmags.size):
pivmags, _ = self._compute_startvals(self.pars.pivotmag_z, gals.z, gals.refmag, median=True)
medfitter = MedZFitter(self.pars.pivotmag_z, gals.z, gals.refmag)
pivmags = medfitter.fit(pivmags)
self.pars.pivotmag = pivmags
# and min and max...
self.pars.minrefmag = self.zredstr.mstar(self.pars.pivotmag_z) - 2.5 * np.log10(30.0)
lval_min = np.clip(self.config.lval_reference - 0.1, 0.001, None)
self.pars.maxrefmag = self.zredstr.mstar(self.pars.pivotmag_z) - 2.5 * np.log10(lval_min)
def _calc_medcols(self, gals):
"""
Calculate the median color spline parameters.
Sets self.pars.medcol, self.pars.medcol_width
Parameters
----------
gals: `redmapper.GalaxyCatalog`
Galaxy catalog with fields required for fit.
"""
self.config.logger.info("Calculating median colors...")
ncol = self.config.nmag - 1
galcolor = gals.galcol
for j in range(ncol):
col = galcolor[:, j]
# get the start values
mvals, scvals = self._compute_startvals(self.pars.pivotmag_z, gals.z, col, median=True)
# compute the median
medfitter = MedZFitter(self.pars.pivotmag_z, gals.z, col)
mvals = medfitter.fit(mvals)
# and the scatter
spl = CubicSpline(self.pars.pivotmag_z, mvals)
med = spl(gals.z)
medfitter = MedZFitter(self.pars.pivotmag_z, gals.z, np.abs(col - med))
scvals = medfitter.fit(scvals, min_val=0.01)
self.pars.medcol[:, j] = mvals
self.pars.medcol_width[:, j] = 1.4826 * scvals
if (self.config.calib_fit_err_ratio):
# Compute overall pulls...
spl = CubicSpline(self.pars.pivotmag_z, self.pars.medcol[:, j])
model = spl(gals.z)
delta = gals.galcol[:, j] - model
col_err = gals.galcol_err[:, j]
# Step over values of r, look for closest to pulls of 1.0
err_ratios = np.arange(0.5, 10.0, 0.1)
sigma_pulls = np.zeros_like(err_ratios)
# Assume first bin median width is close to sig_int
sig_int = self.pars.medcol_width[0, j]
# Only use galaxies from the first 3 redshift bins
for i in range(err_ratios.size):
err = np.sqrt((err_ratios[i]*col_err)**2. + sig_int**2.)
pulls = delta/err
sigma_pulls[i] = 1.4826*np.median(np.abs(pulls - np.median(pulls)))
argmin = np.argmin(np.abs(sigma_pulls - 1.0))
self.pars.medcol_err_ratio[j] = err_ratios[argmin]
else:
self.pars.medcol_err_ratio[j] = 1.0
def _calc_diagonal_pars(self, gals, doRaise=True):
"""
Calculate the model parameters and diagonal elements of the covariance
matrix (one color at a time).
Sets self.pars.sigma, self.pars.covmat_amp, self.pars.cXX, self.pars.slopeXX
Parameters
----------
gals: `redmapper.GalaxyCatalog`
Galaxy catalog with fields required for fit.
doRaise: `bool`, optional
Raise if there's a problem with the background? Default is True.
"""
# The main routine to compute the red sequence on the diagonal
ncol = self.config.nmag - 1
galcolor = gals.galcol
galcolor_err = gals.galcol_err
# compute the pivot mags
spl = CubicSpline(self.pars.pivotmag_z, self.pars.pivotmag)
pivotmags = spl(gals.z)
# And set the right probabilities
if self.config.calib_use_pcol:
probs = gals.pcol
else:
probs = gals.p
# Figure out the order of the colors for luptitude corrections
mags = np.zeros((gals.size, self.config.nmag))
col_indices = np.zeros(ncol, dtype=np.int32)
sign_indices = np.zeros(ncol, dtype=np.int32)
mind_indices = np.zeros(ncol, dtype=np.int32)
c = 0
for j in range(self.config.ref_ind + 1, self.config.nmag):
col_indices[c] = j - 1
sign_indices[c] = -1
mind_indices[c] = j
c += 1
for j in range(self.config.ref_ind, 0, -1):
col_indices[c] = j - 1
sign_indices[c] = 1
mind_indices[c] = j - 1
c += 1
if self.do_lupcorr:
lups = np.zeros_like(mags)
mags[:, self.config.ref_ind] = gals.mag[:, self.config.ref_ind]
flux = 10.**((mags[:, self.config.ref_ind] - self.lupzp) / (-2.5))
lups[:, self.config.ref_ind] = 2.5 * np.log10(1.0 / self.config.b[self.config.ref_ind]) - np.arcsinh(0.5 * flux / self.bnmgy[self.config.ref_ind]) / (0.4 * np.log(10.0))
# One color at a time along the diagonal
for c in range(ncol):
starttime = time.time()
# The order is given by col_indices, which ensures that we work from the
# reference mag outward
j = col_indices[c]
sign = sign_indices[c]
mind = mind_indices[c]
self.config.logger.info("Working on diagonal for color %d" % (j))
col = galcolor[:, j]
col_err = galcolor_err[:, j]
mag_err = gals.mag_err[:, j: j + 2].copy()
# Need to go through the _ndarray because ztag and zstag are strings
cvals = np.zeros(self.pars._ndarray[self.ztag[j]].size)
svals = np.zeros(self.pars._ndarray[self.zstag[j]].size)
photo_err = np.zeros_like(cvals)
# Calculate median truncation
spl = CubicSpline(self.pars.pivotmag_z, self.pars.medcol[:, j])
med = spl(gals.z)
spl = CubicSpline(self.pars.pivotmag_z, self.pars.medcol_width[:, j])
sc = spl(gals.z)
# What is the maximum scatter in each node?
# This is based on the median fit, which does not include photometric
# error, and should always be larger. This helps regularize the edges
# where things otherwise can run away.
scatter_max = spl(self.pars.covmat_z)
# Initial guess for scvals should be halfway between 0.01 and scatter_max
scvals = (scatter_max - 0.01) / 2.0 + 0.01
u, = np.where((galcolor[:, j] > (med - self.config.calib_color_nsig * sc)) &
(galcolor[:, j] < (med + self.config.calib_color_nsig * sc)))
trunc = self.config.calib_color_nsig * sc[u]
dmags = gals.refmag - pivotmags
# And the starting values...
# Note that this returns the slope values (svals) at the nodes from the cvals
# but these might not be the same nodes, so we have to approximate
cvals_temp, svals_temp, _ = self._compute_startvals(self.pars._ndarray[self.ztag[j]],
gals.z[u], col[u],
xval=dmags[u],
fit=True, mincomp=5)
cvals[:] = cvals_temp
inds = np.searchsorted(self.pars._ndarray[self.ztag[j]],
self.pars._ndarray[self.zstag[j]])
svals[:] = svals_temp[inds]
# And do the luptitude correction if necessary.
if self.do_lupcorr:
lupcorr = self._compute_single_lupcorr(j, cvals, svals, gals, dmags, mags, lups, mind, sign)
else:
lupcorr = np.zeros(gals.size)
dmags_err_ratio = gals.refmag - self.config.calib_err_ratio_pivot
if self.config.calib_fit_err_ratio:
# When we are not doing the first color, we have a mag error modification
# for one magnitude
if c > 0:
err_ratios = self.pars.mag_err_ratio_int[mind + sign] + self.pars.mag_err_ratio_slope[mind + sign]*dmags_err_ratio
if sign == 1:
# Apply to the redward mag_err
mag_err[:, 1] *= err_ratios
# This is the index of color to fit the error ratio
fit_err_ratio_ind = [0]
else:
# Apply to the blueward mag_err
mag_err[:, 0] *= err_ratios
fit_err_ratio_ind = [1]
# The fit start values from the neighboring color
err_ratio_pars = [self.pars.mag_err_ratio_int[mind + sign],
self.pars.mag_err_ratio_slope[mind + sign]]
else:
fit_err_ratio_ind = [0, 1]
# The fit start values from the median fit
err_ratio_pars = [self.pars.medcol_err_ratio[j], 0.0]
else:
err_ratio_pars = None
fit_err_ratio_ind = []
# We fit in stages: first the mean, then the slope, then the scatter,
# and finally all three
rsfitter = RedSequenceFitter(self.pars._ndarray[self.ztag[j]],
gals.z[u], col[u], mag_err[u, :],
dmags=dmags[u],
trunc=trunc,
slope_nodes=self.pars._ndarray[self.zstag[j]],
scatter_nodes=self.pars.covmat_z,
lupcorrs=lupcorr[u],
probs=probs[u],
bkgs=self.bkg.lookup_diagonal(j, col[u], gals.refmag[u], doRaise=doRaise),
scatter_max=scatter_max, use_scatter_prior=True,
dmags_err_ratio=dmags_err_ratio[u])
# fit the mean
cvals, = rsfitter.fit(cvals, svals, scvals, fit_mean=True, err_ratio_pars=err_ratio_pars, fit_err_ratio_ind=fit_err_ratio_ind)
# Update the lupcorr...
if self.do_lupcorr:
rsfitter._lupcorrs[:] = self._compute_single_lupcorr(j, cvals, svals, gals, dmags, mags, lups, mind, sign)[u]
# fit the slope
svals, = rsfitter.fit(cvals, svals, scvals, fit_slope=True, err_ratio_pars=err_ratio_pars, fit_err_ratio_ind=fit_err_ratio_ind)
# fit the scatter
scvals, = rsfitter.fit(cvals, svals, scvals, fit_scatter=True, err_ratio_pars=err_ratio_pars, fit_err_ratio_ind=fit_err_ratio_ind)
if self.config.calib_fit_err_ratio:
err_ratios = scvals[-2: ]
scvals = scvals[: -2]
# fit combined
cvals, svals, scvals = rsfitter.fit(cvals, svals, scvals,
fit_mean=True, fit_slope=True, fit_scatter=True, err_ratio_pars=err_ratio_pars, fit_err_ratio_ind=fit_err_ratio_ind)
if self.config.calib_fit_err_ratio:
err_ratio_int_fit = scvals[-2]
err_ratio_slope_fit = scvals[-1]
scvals = scvals[: -2]
spl = CubicSpline(self.pars._ndarray[self.ztag[j]],
cvals)
gmean = spl(gals.z)
spl = CubicSpline(self.pars._ndarray[self.zstag[j]],
svals)
gslope = spl(gals.z)
spl = CubicSpline(self.pars.covmat_z,
scvals)
gsig = np.clip(spl(gals.z), 0.001, None)
delta_col = gals.galcol[:, j] - (gmean + gslope*dmags)
err_0 = gals.mag_err[:, j].copy()
err_1 = gals.mag_err[:, j + 1].copy()
if 0 not in fit_err_ratio_ind:
# We are not fitting 0, so we already have the parameters.
err_0 *= (self.pars.mag_err_ratio_int[j] +
self.pars.mag_err_ratio_slope[j]*dmags_err_ratio)
if 1 not in fit_err_ratio_ind:
# We are not fitting 1, so we already have the parameters.
err_1 *= (self.pars.mag_err_ratio_int[j + 1] +
self.pars.mag_err_ratio_slope[j + 1]*dmags_err_ratio)
ebinfitter = ErrorBinFitter(delta_col,
dmags_err_ratio,
err_0,
err_1,
gsig**2.)
ebinpars = ebinfitter.fit([1.0, 0.0], scale_indices=fit_err_ratio_ind)
err_ratio_int = ebinpars[0]
err_ratio_slope = ebinpars[1]
if c == 0:
self.pars.mag_err_ratio_int[j] = err_ratio_int
self.pars.mag_err_ratio_slope[j] = err_ratio_slope
self.pars.mag_err_ratio_int[j + 1] = err_ratio_int
self.pars.mag_err_ratio_slope[j + 1] = err_ratio_slope
self.config.logger.info('Mag %d/%d error ratio = %.3f + %.3f*(refmag - %.2f)' %
(j, j + 1, err_ratio_int, err_ratio_slope, self.pars.mag_err_ratio_pivot))
else:
self.pars.mag_err_ratio_int[mind] = err_ratio_int
self.pars.mag_err_ratio_slope[mind] = err_ratio_slope
self.config.logger.info('Mag %d error ratio = %.3f + %.3f*(refmag - %.2f)' %
(j, err_ratio_int, err_ratio_slope, self.pars.mag_err_ratio_pivot))
else:
self.pars.mag_err_ratio_int[j] = 1.0
self.pars.mag_err_ratio_slope[j] = 0.0
self.pars.mag_err_ratio_int[j + 1] = 1.0
self.pars.mag_err_ratio_slope[j + 1] = 0.0
# And record in the parameters
self.pars._ndarray[self.ctag[j]] = cvals
self.pars._ndarray[self.stag[j]] = svals
self.pars.sigma[j, j, :] = scvals
self.pars.covmat_amp[j, j, :] = scvals**2.
# And print the time taken
self.config.logger.info('Done in %.2f seconds.' % (time.time() - starttime))
def _calc_offdiagonal_pars(self, gals, doRaise=True):
"""
Set the off-diagonal elements of the covariance matrix.
These are just set to self.config.calib_covmat_constant
Parameters
----------
gals: `redmapper.GalaxyCatalog`
Galaxy catalog with fields required for fit.
doRaise: `bool`, optional
Raise if there's a problem with the background? Default is True.
"""
ncol = self.config.nmag - 1
for j in range(ncol):
for k in range(j + 1, ncol):
self.pars.sigma[j, k, :] = self.config.calib_covmat_constant
self.pars.sigma[k, j, :] = self.pars.sigma[j, k, :]
self.pars.covmat_amp[j, k, :] = self.config.calib_covmat_constant * self.pars.sigma[j, j, :] * self.pars.sigma[k, k, :]
self.pars.covmat_amp[k, j, :] = self.pars.covmat_amp[j, k, :]
def _calc_offdiagonal_pars_old(self, gals, doRaise=True):
"""
DEPRECATED, this doesn't work right.
Calculate the off-diagonal elements of the covariance matrix.
Sets self.pars.sigma, self.pars.covmat_amp (off-diagonal).
Parameters
----------
gals: `redmapper.GalaxyCatalog`
Galaxy catalog with fields required for fit.
doRaise: `bool`, optional
Raise if there's a problem with the background? Default is True.
"""
# The routine to compute the off-diagonal elements
ncol = self.config.nmag - 1
galcolor = gals.galcol
galcolor_err = gals.galcol_err
# compute the pivot mags
spl = CubicSpline(self.pars.pivotmag_z, self.pars.pivotmag)
pivotmags = spl(gals.z)
# And set the right probabilities
if self.config.calib_use_pcol:
probs = gals.pcol
else:
probs = gals.p
# Compute c, slope, and median and width for all galaxies/colors
ci = np.zeros((gals.size, ncol))
si = np.zeros_like(ci)
medci = np.zeros_like(ci)
medwidthi = np.zeros_like(ci)
gsig = np.zeros_like(ci)
for j in range(ncol):
spl = CubicSpline(self.pars._ndarray[self.ztag[j]],
self.pars._ndarray[self.ctag[j]])
ci[:, j] = spl(gals.z)
spl = CubicSpline(self.pars._ndarray[self.zstag[j]],
self.pars._ndarray[self.stag[j]])
si[:, j] = spl(gals.z)
spl = CubicSpline(self.pars.pivotmag_z, self.pars.medcol[:, j])
medci[:, j] = spl(gals.z)
spl = CubicSpline(self.pars.pivotmag_z, self.pars.medcol_width[:, j])
medwidthi[:, j] = spl(gals.z)
spl = CubicSpline(self.pars.covmat_z, self.pars.sigma[j, j, :])
gsig[:, j] = spl(gals.z)
if self.do_lupcorr:
mags = np.zeros((gals.size, self.config.nmag))
lups = np.zeros_like(mags)
mags[:, self.config.ref_ind] = gals.refmag
for j in range(self.config.ref_ind + 1, self.config.nmag):
mags[:, j] = mags[:, j - 1] - (ci[:, j - 1] + si[:, j - 1] * (gals.refmag - pivotmags))
for j in range(self.config.ref_ind - 1, -1, -1):
mags[:, j] = mags[:, j + 1] + (ci[:, j] + si[:, j] * (gals.refmag - pivotmags))
for j in range(self.config.nmag):
flux = 10.**((mags[:, j] - self.lupzp) / (-2.5))
lups[:, j] = 2.5 * np.log10(1.0 / self.config.b[j]) - np.arcsinh(0.5 * flux / self.bnmgy[j]) / (0.5 * np.log(10.0))
magcol = mags[:, :-1] - mags[:, 1:]
lupcol = lups[:, :-1] - lups[:, 1:]
lupcorr = lupcol - magcol
else:
lupcorr = np.zeros((gals.size, ncol))
template_col = np.zeros((gals.size, ncol))
for j in range(ncol):
template_col[:, j] = ci[:, j] + si[:, j] * (gals.refmag - pivotmags) + lupcorr[:, j]
res = galcolor - template_col
# figure out order with a ranking based on the configured order
bits = 2**np.arange(ncol, dtype=np.int32)
covmat_rank = np.zeros((ncol * ncol - ncol) // 2, dtype=np.int32)
covmat_order = self.config.calib_color_order
ctr = 0
for j in range(ncol):
for k in range(j + 1, ncol):
covmat_rank[ctr] = bits[covmat_order[j]] + bits[covmat_order[k]]
ctr += 1
covmat_rank = np.sort(covmat_rank)
full_covmats = self.pars.covmat_amp.copy()
for ctr in range(covmat_rank.size):
starttime = time.time()
j = -1
k = -1
for tctr in range(ncol):
if ((covmat_rank[ctr] & bits[tctr]) > 0):
if j < 0:
j = covmat_order[tctr]
else:
k = covmat_order[tctr]
# swap if necessary
if k < j:
temp = k
k = j
j = temp
self.config.logger.info("Working on %d, %d" % (j, k))
u, = np.where((galcolor[:, j] > medci[:, j] - self.config.calib_color_nsig * medwidthi[:, j]) &
(galcolor[:, j] < medci[:, j] + self.config.calib_color_nsig * medwidthi[:, j]) &
(galcolor[:, k] > medci[:, k] - self.config.calib_color_nsig * medwidthi[:, k]) &
(galcolor[:, k] < medci[:, k] + self.config.calib_color_nsig * medwidthi[:, k]))
bvals = self.bkg.lookup_offdiag(j, k, galcolor[:, j], galcolor[:, k], gals.refmag, doRaise=doRaise)
odfitter = RedSequenceOffDiagonalFitter(self.pars.covmat_z,
gals.z[u],
res[u, j], res[u, k],
gsig[u, j], gsig[u, k],
gals.mag_err[u, :],
j, k,
probs[u],
bvals[u],
self.config.calib_covmat_prior,
min_eigenvalue=self.config.calib_covmat_min_eigenvalue)
#rvals = odfitter.fit(np.zeros(self.pars.covmat_z.size), full_covmats=full_covmats)
rvals = np.zeros(self.pars.covmat_z.size) + 0.9
self.pars.sigma[j, k, :] = rvals
self.pars.sigma[k, j, :] = rvals
self.pars.covmat_amp[j, k, :] = rvals * self.pars.sigma[j, j, :] * self.pars.sigma[k, k, :]
self.pars.covmat_amp[k, j, :] = self.pars.covmat_amp[j, k, :]
full_covmats[j, k, :] = self.pars.covmat_amp[j, k, :]
full_covmats[k, j, :] = self.pars.covmat_amp[k, j, :]
self.config.logger.info("Done in %.2f seconds." % (time.time() - starttime))
def _calc_volume_factor(self, zref):
"""
Calculate the volume factor (delta-comoving volume in redshift steps)
Sets self.pars.volume_factor
Parameters
----------
zref: `float`
Highest redshift in the model (for reference)
"""
dz = 0.01
self.pars.volume_factor = ((self.config.cosmo.Dl(0.0, zref + dz) / (1. + (zref + dz)) -
self.config.cosmo.Dl(0.0, zref) / (1. + zref)) /
(self.config.cosmo.Dl(0.0, self.pars.volume_factor_z + dz) / (1. + (self.pars.volume_factor_z + dz)) -
self.config.cosmo.Dl(0.0, self.pars.volume_factor_z) / (1. + self.pars.volume_factor_z)))
def save_pars(self, filename, clobber=False):
"""
Save the parameters to a fits file.
Parameters
----------
filename: `str`
Filename to save to.
clobber: `bool`, optional
Clobber any existing file? Default is False.
"""
if self.config.calib_redgal_template is not None:
rg = RedGalInitialColors(self.config.calib_redgal_template)
zmax = rg.zmax
else:
zmax = None
hdr = fitsio.FITSHDR()
hdr['NCOL'] = self.config.nmag - 1
hdr['MSTARSUR'] = self.config.mstar_survey
hdr['MSTARBAN'] = self.config.mstar_band
hdr['LIMMAG'] = self.config.limmag_catalog
# Saved with larger cushion that seems to work well
hdr['ZRANGE0'] = np.clip(self.config.zrange[0] - 0.1, 0.01, None)
hdr['ZRANGE1'] = np.clip(self.config.zrange[1] + 0.25, None, zmax)
hdr['ALPHA'] = self.config.calib_lumfunc_alpha
hdr['ZBINFINE'] = self.config.zredc_binsize_fine
hdr['ZBINCOAR'] = self.config.zredc_binsize_coarse
hdr['LOWZMODE'] = 0
hdr['REF_IND'] = self.config.ref_ind
hdr['BANDS'] = ','.join(self.config.bands)
if self.config.calib_redgal_template is not None:
hdr['TEMPLATE'] = self.config.calib_redgal_template
# Only save the b values if they're > 0 (that means we have
# luptitudes).
if self.config.b[0] > 0.0:
for j, b in enumerate(self.config.b):
hdr['BVALUE%d' % (j + 1)] = b
self.pars.to_fits_file(filename, header=hdr, clobber=clobber)
def _calc_zreds(self, gals, do_correction=True):
"""
Calculate the zreds for a set of galaxies, using the newly fit model.
Parameters
----------
gals: `redmapper.GalaxyCatalog`
Galaxy catalog being fit
do_corrections: `bool`, optional
Do redshift afterburner corrections? Default is True.
"""
# This is temporary
zredstr = RedSequenceColorPar(self.config.parfile)
zredc = ZredColor(zredstr, do_correction=do_correction)
gals.add_zred_fields(self.config.zred_nsamp)
starttime = time.time()
zredc.compute_zreds(gals)
self.config.logger.info('Computed zreds in %.2f seconds.' % (time.time() - starttime))
def _calc_corrections(self, gals, mode2=False):
"""
Calculate zred afterburner correction parameters.
Sets self.pars.corr, self.pars.corr_slope, self.pars.corr_r or
self.pars.corr2, self.pars.corr2_slope, self.pars.corr2_r
Parameters
----------
gals: `redmapper.GalaxyCatalog`
Galaxy catalog being fit. Must contain zred_uncorr information.
mode2: `bool`, optional
Default is False. When False, corrections are computed such that
<zred|ztrue> is unbiased. When True, corrections are computed
such that <ztrue|zred> is unbiased.
"""
# p or pcol
if self.config.calib_use_pcol:
probs = gals.pcol
else:
probs = gals.p
# Set a threshold removing 5% worst lkhd outliers
st = np.argsort(gals.lkhd)
thresh = gals.lkhd[st[int(0.05 * gals.size)]]
# This is an arbitrary 2sigma cut...
guse, = np.where((gals.lkhd > thresh) &
(np.abs(gals.z - gals.zred) < 2. * gals.zred_e))
spl = CubicSpline(self.pars.pivotmag_z, self.pars.pivotmag)
pivotmags = spl(gals.z)
w = 1. / (np.exp((thresh - gals.lkhd[guse]) / 0.2) + 1.0)
# The offset cvals
cvals = np.zeros(self.pars.corr_z.size)
# The slope svals
svals = np.zeros(self.pars.corr_slope_z.size)
# And the r value to be multiplied by error
rvals = np.ones(self.pars.corr_slope_z.size)
# And the background vals
bkg_cvals = np.zeros(self.pars.corr_slope_z.size)
cvals[:], _ = self._compute_startvals(self.pars.corr_z, gals.z, gals.z - gals.zred, median=True)
# Initial guess for bkg_cvals is trickier and not generalizable (sadly)
for i in range(self.pars.corr_slope_z.size):
if i == 0:
zlo = self.pars.corr_slope_z[0]
else:
zlo = (self.pars.corr_slope_z[i - 1] + self.pars.corr_slope_z[i]) / 2.
if i == (self.pars.corr_slope_z.size - 1):
zhi = self.pars.corr_slope_z[i]
else:
zhi = (self.pars.corr_slope_z[i] + self.pars.corr_slope_z[i + 1]) / 2.
if mode2:
u, = np.where((gals.zred[guse] > zlo) & (gals.zred[guse] < zhi))
else:
u, = np.where((gals.z[guse] > zlo) & (gals.z[guse] < zhi))
if u.size < 3:
if i > 0:
bkg_cvals[i] = bkg_cvals[i - 1]
else:
st = np.argsort(probs[guse[u]])
uu = u[st[0:u.size // 2]]
bkg_cvals[i] = np.std(gals.z[guse[uu]] - gals.zred[guse[uu]])**2.
if mode2:
self.config.logger.info("Fitting zred2 corrections...")
z = gals.zred
else:
self.config.logger.info("Fitting zred corrections...")
z = gals.z
corrfitter = CorrectionFitter(self.pars.corr_z,
z[guse],
gals.z[guse] - gals.zred[guse],
gals.zred_e[guse],
slope_nodes=self.pars.corr_slope_z,
probs=np.clip(probs[guse], None, 0.99),
dmags=gals.refmag[guse] - pivotmags[guse],
ws=w)
# self.config.calib_corr_nocorrslope
# first fit the mean
cvals, = corrfitter.fit(cvals, svals, rvals, bkg_cvals, fit_mean=True)
# fit the slope (if desired)
if not self.config.calib_corr_nocorrslope:
svals, = corrfitter.fit(cvals, svals, rvals, bkg_cvals, fit_slope=True)
# Fit r
rvals, = corrfitter.fit(cvals, svals, rvals, bkg_cvals, fit_r=True)
# Fit bkg
bkg_cvals, = corrfitter.fit(cvals, svals, rvals, bkg_cvals, fit_bkg=True)
# Combined fit
if not self.config.calib_corr_nocorrslope:
cvals, svals, rvals, bkg_cvals = corrfitter.fit(cvals, svals, rvals, bkg_cvals, fit_mean=True, fit_slope=True, fit_r=True, fit_bkg=True)
else:
cvals, rvals, bkg_cvals = corrfitter.fit(cvals, svals, rvals, bkg_cvals, fit_mean=True, fit_r=True, fit_bkg=True)
# And record the values
if mode2:
self.pars.corr2 = cvals
self.pars.corr2_slope = svals
self.pars.corr2_r = rvals
else:
self.pars.corr = cvals
self.pars.corr_slope = svals
self.pars.corr_r = rvals
def _make_diagnostic_plots(self, gals):
"""
Make diagnostic plots.
Parameters
----------
gals: `redmapper.GalaxyCatalog`
Galaxy catalog being fit. Must contain zred information.
"""
import matplotlib.pyplot as plt
# what plots do we want?
# We want to split this out into different modules?
# For each color, plot
# Color(z)
# Slope(z)
# scatter(z)
# And a combined
# All off-diagonal r value plots
zredstr = RedSequenceColorPar(self.config.parfile, zbinsize=0.005)
for j in range(self.config.nmag - 1):
fig = plt.figure(figsize=(10, 6))
fig.clf()
zredstr.plot_redsequence_diag(fig, j, self.config.bands)
fig.savefig(os.path.join(self.config.outpath, self.config.plotpath,
'%s_%s-%s.png' % (self.config.d.outbase,
self.config.bands[j], self.config.bands[j + 1])))
plt.close(fig)
fig = plt.figure(figsize=(10, 6))
fig.clf()
zredstr.plot_redsequence_offdiags(fig, self.config.bands)
fig.savefig(os.path.join(self.config.outpath, self.config.plotpath,
'%s_offdiags.png' % (self.config.d.outbase)))
# And two panel plot with
# left panel is offset, scatter, outliers as f(z)
# Right panel is zred vs z (whichever)
# We need to do this for both zred and zred2.
zbinsize = 0.02
pcut = 0.9
ntrial = 1000
mlim = zredstr.mstar(gals.zred) - 2.5 * np.log10(0.2)
use, = np.where((gals.p > pcut) &
(gals.refmag < mlim) &
(gals.zred < 2.0))
ugals = gals[use]
zbins = np.arange(self.config.zrange[0], self.config.zrange[1], zbinsize)
dtype = [('ztrue', 'f4'),
('zuse', 'f4'),
('delta', 'f4'),
('delta_err', 'f4'),
('delta_std', 'f4'),
('zuse_e', 'f4'),
('f_out', 'f4')]
# There are two modes to plot
for mode in range(2):
if mode == 0:
zuse = ugals.z
dzuse = ugals.zred - ugals.z
zuse_e = ugals.zred_e
xlabel = r'$z_{\mathrm{true}}$'
ylabel_left = r'$z_{\mathrm{red}} - z_{\mathrm{true}}$'
ylabel_right = r'$z_{\mathrm{red}}$'
xcol = 'ztrue'
modename = 'zred'
else:
zuse = ugals.zred2
dzuse = ugals.z - ugals.zred2
zuse_e = ugals.zred2_e
xlabel = r'$z_{\mathrm{red2}}$'
ylabel_left = r'$z_{\mathrm{true}} - z_{\mathrm{red2}}$'
ylabel_right = r'$z_{\mathrm{true}}$'
xcol = 'zuse'
modename = 'zred2'
zstr = np.zeros(zbins.size, dtype=dtype)
for i, z in enumerate(zbins):
# Get all the galaxies in the bin
u1, = np.where((zuse >= z) & (zuse < (z + zbinsize)))
if u1.size < 3:
self.config.logger.info('Warning: not enough galaxies in zbin: %.2f, %.2f' % (z, z + zbinsize))
continue
med = np.median(dzuse[u1])
gsigma = 1.4826 * np.abs(dzuse[u1] - med) / zuse_e[u1]
u2, = np.where(np.abs(gsigma) < 3.0)
if u2.size < 3:
self.config.logger.info('Warning: not enough galaxies in zbin: %.2f, %.2f' % (z, z + zbinsize))
use = u1[u2]
zstr['ztrue'][i] = np.median(ugals.z[use])
zstr['zuse'][i] = np.median(zuse[use])
zstr['delta'][i] = np.median(dzuse[use])
barr = np.zeros(ntrial)
for t in range(ntrial):
r = np.random.choice(dzuse[use], size=use.size, replace=True)
barr[t] = np.median(r)
# Error on median as determined from bootstrap resampling
zstr['delta_err'][i] = np.std(barr)
# The typical error
zstr['delta_std'][i] = 1.4826 * np.median(np.abs(dzuse[use] - zstr['delta'][i]))
# And outliers ...
u4, = np.where(np.abs(dzuse[u1] - zstr['delta'][i]) > 4.0 * zstr['delta_std'][i])
zstr['f_out'][i] = float(u4.size) / float(u1.size)
zstr['zuse_e'][i] = np.median(zuse_e[use])
# Cut out bins that didn't get a fit
cut, = np.where(zstr['ztrue'] > 0.0)
zstr = zstr[cut]
# Now we can make the plots
fig = plt.figure(figsize=(10, 6))
fig.clf()
# Left panel is offset, scatter, etc.
ax = fig.add_subplot(121)
ax.errorbar(zstr[xcol], zstr['delta'], yerr=zstr['delta_err'], fmt='k^')
ax.plot(self.config.zrange, [0.0, 0.0], 'k:')
ax.plot(zstr[xcol], zstr['delta_std'], 'r-')
ax.plot(zstr[xcol], zstr['zuse_e'], 'b-')
ax.plot(zstr[xcol], zstr['f_out'], 'm-')
ax.set_xlim(self.config.zrange)
ax.set_ylim(-0.05, 0.05)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel_left)
ax = fig.add_subplot(122)
if mode == 0:
ax.hexbin(ugals.z, ugals.zred, bins='log', extent=[self.config.zrange[0], self.config.zrange[1], self.config.zrange[0], self.config.zrange[1]])
else:
ax.hexbin(ugals.zred2, ugals.z, bins='log', extent=[self.config.zrange[0], self.config.zrange[1], self.config.zrange[0], self.config.zrange[1]])
ax.plot(self.config.zrange, self.config.zrange, 'r--')
ax.set_xlim(self.config.zrange)
ax.set_ylim(self.config.zrange)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel_right)
fig.tight_layout()
fig.savefig(os.path.join(self.config.outpath, self.config.plotpath,
'%s_%s_plots.png' % (self.config.d.outbase, modename)))
plt.close(fig)
if self.config.calib_fit_err_ratio:
# We want to plot the error ratio values
fig = plt.figure(figsize=(8, 6))
fig.clf()
ax = fig.add_subplot(111)
ax.plot(np.arange(self.config.nmag), zredstr.mag_err_ratio_intercept, 'r.')
ax.plot(np.array([-0.5, self.config.nmag + 0.5]), [1.0, 1.0], 'k--')
ax.set_xlim(-0.5, self.config.nmag - 0.5)
ax.set_ylim(0.0, np.max(zredstr.mag_err_ratio_intercept) + 1.0)
ax.set_xlabel('Magnitude Index')
ax.set_ylabel('Error Ratio')
fig.tight_layout()
fig.savefig(os.path.join(self.config.outpath, self.config.plotpath,
'%s_err_ratios.png' % (self.config.d.outbase)))
plt.close(fig)
# Always make error plots; only do modified version if we fit it.
# And we make one pull plot per color
zindex = zredstr.zindex(gals.zred)
for j in range(self.config.nmag - 1):
# Make the raw plot (with intrinsic error)
delta = gals.galcol[:, j] - (zredstr.c[zindex, j] + zredstr.slope[zindex, j]*(gals.refmag - zredstr.pivotmag[zindex]))
delta_err = np.sqrt(gals.galcol_err[:, j]**2. + zredstr.sigma[j, j, zindex]**2.)
pulls = delta/delta_err
mags = gals.mag[:, j + 1]
st = np.argsort(mags)
magrange = [mags[st[int(0.01*mags.size)]], mags[st[int(0.99*mags.size)]]]
fig = plt.figure(figsize=(8, 6))
fig.clf()
ax = fig.add_subplot(111)
self._plot_pulls(ax, mags, pulls, self.config.bands[j + 1],
self.config.bands[j] + ' - ' + self.config.bands[j + 1],
magrange)
ax.set_title('Raw Error Ratio')
fig.tight_layout()
fig.savefig(os.path.join(self.config.outpath, self.config.plotpath,
'%s_raw_error_ratio_%s-%s.png' % (self.config.d.outbase,
self.config.bands[j],
self.config.bands[j + 1])))
plt.close(fig)
if not self.config.calib_fit_err_ratio:
continue
# Make the scaled plot (with intrinsic error)
delta = gals.galcol[:, j] - (zredstr.c[zindex, j] + zredstr.slope[zindex, j]*(gals.refmag - zredstr.pivotmag[zindex]))
err_ratios0 = zredstr.mag_err_ratio_intercept[j] + zredstr.mag_err_ratio_slope[j]*(gals.refmag - zredstr.mag_err_ratio_pivot)
err_ratios1 = zredstr.mag_err_ratio_intercept[j + 1] + zredstr.mag_err_ratio_slope[j + 1]*(gals.refmag - zredstr.mag_err_ratio_pivot)
delta_err = np.sqrt((err_ratios0*gals.mag_err[:, j])**2. +
(err_ratios1*gals.mag_err[:, j + 1])**2. +
zredstr.sigma[j, j, zindex]**2.)
pulls = delta/delta_err
mags = gals.mag[:, j + 1]
st = np.argsort(mags)
magrange = [mags[st[int(0.01*mags.size)]], mags[st[int(0.99*mags.size)]]]
fig = plt.figure(figsize=(8, 6))
fig.clf()
ax = fig.add_subplot(111)
self._plot_pulls(ax, mags, pulls, self.config.bands[j + 1],
self.config.bands[j] + ' - ' + self.config.bands[j + 1],
magrange)
ax.set_title('Scaled Error Ratio (r_err = %.3f/%.3f, %.3f/%.3f)' %
(zredstr.mag_err_ratio_intercept[j], zredstr.mag_err_ratio_slope[j],
zredstr.mag_err_ratio_intercept[j + 1], zredstr.mag_err_ratio_slope[j + 1]))
fig.tight_layout()
fig.savefig(os.path.join(self.config.outpath, self.config.plotpath,
'%s_scaled_error_ratio_%s-%s.png' % (self.config.d.outbase,
self.config.bands[j],
self.config.bands[j + 1])))
plt.close(fig)
def _plot_pulls(self, ax, mags, pulls, magname, colname, magrange, binsize=0.5, pullcut=10.0):
gd, = np.where((np.abs(pulls) < pullcut) & (mags > magrange[0]) & (mags < magrange[1]))
ax.hexbin(mags[gd], pulls[gd], bins='log', extent=[magrange[0], magrange[1],
-pullcut, pullcut])
h, rev = esutil.stat.histogram(mags[gd], binsize=binsize, rev=True)
binmags = np.zeros(h.size)
sigs = np.zeros(h.size)
lo = np.zeros(h.size)
hi = np.zeros(h.size)
med = np.zeros(h.size)
u, = np.where(h > 0)
for i, ind in enumerate(u):
i1a = rev[rev[ind]: rev[ind + 1]]
sigs[i] = 1.4826*np.median(np.abs(pulls[gd[i1a]] - np.median(pulls[gd[i1a]])))
st = np.argsort(pulls[gd[i1a]])
lo[i] = pulls[gd[i1a[st[int(0.05*i1a.size)]]]]
hi[i] = pulls[gd[i1a[st[int(0.95*i1a.size)]]]]
med[i] = pulls[gd[i1a[st[int(0.50*i1a.size)]]]]
binmags[i] = np.median(mags[gd[i1a]])
ok, = np.where(sigs > 0.0)
ax.plot(binmags[ok], sigs[ok], 'r-', label='Width of Pulls')
ax.plot(binmags[ok], lo[ok], 'k--', label='5/95th percentiles')
ax.plot(binmags[ok], hi[ok], 'k--')
ax.plot(binmags[ok], med[ok], 'r--', label='Median Pull')
ax.plot(magrange, [1.0, 1.0], 'k:')
ax.set_xlabel(magname)
ax.set_ylabel('delta ' + colname)
|
erykoff/redmapper
|
redmapper/calibration/redsequencecal.py
|
Python
|
apache-2.0
| 55,079
|
[
"Galaxy"
] |
26722be2854a7ae3554243a7d40395c93d3f040442c8b554f3eb72011c9f99fa
|
#!/usr/bin/env python
traindat = '../data/fm_train_real.dat'
testdat = '../data/fm_test_real.dat'
parameter_list=[[traindat,testdat,1.9],[traindat,testdat,1.7]]
def kernel_io (train_fname=traindat,test_fname=testdat,width=1.9):
from shogun import CSVFile
from tempfile import NamedTemporaryFile
import shogun as sg
feats_train=sg.features(CSVFile(train_fname))
feats_test=sg.features(CSVFile(test_fname))
kernel=sg.kernel("GaussianKernel", log_width=width)
kernel.init(feats_train, feats_train)
km_train=kernel.get_kernel_matrix()
tmp_train_csv = NamedTemporaryFile(suffix='train.csv')
f=CSVFile(tmp_train_csv.name, "w")
kernel.save(f)
del f
kernel.init(feats_train, feats_test)
km_test=kernel.get_kernel_matrix()
tmp_test_csv = NamedTemporaryFile(suffix='test.csv')
f=CSVFile(tmp_test_csv.name,"w")
kernel.save(f)
del f
return km_train, km_test, kernel
if __name__=='__main__':
print('Gaussian')
kernel_io(*parameter_list[0])
|
lambday/shogun
|
examples/undocumented/python/kernel_io.py
|
Python
|
bsd-3-clause
| 955
|
[
"Gaussian"
] |
dda3db11e39a3cd188ab302b721b86a94f99b901ab1b4c5eeadb7355aeb80ffc
|
from __future__ import absolute_import
import numpy as np
from mdtraj.testing import eq
from numpy.testing import assert_approx_equal
from numpy.testing import assert_array_almost_equal
from sklearn.pipeline import Pipeline
from sklearn.decomposition import PCA as PCAr
from msmbuilder.example_datasets import AlanineDipeptide
from ..cluster import KCenters
from ..decomposition import (FactorAnalysis, FastICA, KernelTICA,
MiniBatchSparsePCA, PCA, SparsePCA, tICA)
from ..decomposition.kernel_approximation import LandmarkNystroem
from ..featurizer import DihedralFeaturizer
random = np.random.RandomState(42)
trajs = [random.randn(10, 3) for _ in range(5)]
def test_tica_fit_transform():
X = random.randn(10, 3)
tica = tICA(n_components=2, lag_time=1)
y2 = tica.fit_transform([np.copy(X)])[0]
def test_tica_singular_1():
tica = tICA(n_components=1)
# make some data that has one column repeated twice
X = random.randn(100, 2)
X = np.hstack((X, X[:, 0, np.newaxis]))
tica.fit([X])
assert tica.components_.dtype == np.float64
assert tica.eigenvalues_.dtype == np.float64
def test_tica_singular_2():
tica = tICA(n_components=1)
# make some data that has one column of all zeros
X = random.randn(100, 2)
X = np.hstack((X, np.zeros((100, 1))))
tica.fit([X])
assert tica.components_.dtype == np.float64
assert tica.eigenvalues_.dtype == np.float64
def test_tica_shape():
model = tICA(n_components=3).fit([random.randn(100, 10)])
eq(model.eigenvalues_.shape, (3,))
eq(model.eigenvectors_.shape, (10, 3))
eq(model.components_.shape, (3, 10))
def test_tica_score_1():
X = random.randn(100, 5)
for n in range(1, 5):
tica = tICA(n_components=n, shrinkage=0)
tica.fit([X])
assert_approx_equal(
tica.score([X]),
tica.eigenvalues_.sum())
assert_approx_equal(tica.score([X]), tica.score_)
def test_tica_score_2():
X = random.randn(100, 5)
Y = random.randn(100, 5)
model = tICA(shrinkage=0.0, n_components=2).fit([X])
s1 = model.score([Y])
s2 = tICA(shrinkage=0.0).fit(model.transform([Y])).eigenvalues_.sum()
eq(s1, s2)
def test_tica_multiple_components():
X = random.randn(100, 5)
tica = tICA(n_components=1, shrinkage=0)
tica.fit([X])
Y1 = tica.transform([X])[0]
tica.n_components = 4
Y4 = tica.transform([X])[0]
tica.n_components = 3
Y3 = tica.transform([X])[0]
assert Y1.shape == (100, 1)
assert Y4.shape == (100, 4)
assert Y3.shape == (100, 3)
eq(Y1.flatten(), Y3[:, 0])
eq(Y3, Y4[:, :3])
def test_tica_kinetic_mapping():
X = random.randn(10, 3)
tica1 = tICA(n_components=2, lag_time=1)
tica2 = tICA(n_components=2, lag_time=1, kinetic_mapping=True)
y1 = tica1.fit_transform([np.copy(X)])[0]
y2 = tica2.fit_transform([np.copy(X)])[0]
assert eq(y2, y1 * tica1.eigenvalues_)
def test_tica_commute_mapping():
X = random.randn(10, 3)
tica1 = tICA(n_components=2, lag_time=1)
tica2 = tICA(n_components=2, lag_time=1, commute_mapping=True)
y1 = tica1.fit_transform([np.copy(X)])[0]
y2 = tica2.fit_transform([np.copy(X)])[0]
regularized_timescales = 0.5 * tica2.timescales_ *\
np.tanh( np.pi *((tica2.timescales_ - tica2.lag_time)
/tica2.lag_time) + 1)
assert eq(y2, y1 * np.sqrt(regularized_timescales/2))
def test_pca_vs_sklearn():
# Compare msmbuilder.pca with sklearn.decomposition
pcar = PCAr()
pcar.fit(np.concatenate(trajs))
pca = PCA()
pca.fit(trajs)
y_ref1 = pcar.transform(trajs[0])
y1 = pca.transform(trajs)[0]
np.testing.assert_array_almost_equal(y_ref1, y1)
np.testing.assert_array_almost_equal(pca.components_, pcar.components_)
np.testing.assert_array_almost_equal(pca.explained_variance_,
pcar.explained_variance_)
np.testing.assert_array_almost_equal(pca.mean_, pcar.mean_)
np.testing.assert_array_almost_equal(pca.n_components_, pcar.n_components_)
np.testing.assert_array_almost_equal(pca.noise_variance_,
pcar.noise_variance_)
def test_pca_pipeline():
# Test that PCA it works in a msmbuilder pipeline
p = Pipeline([('pca', PCA()), ('cluster', KCenters())])
p.fit(trajs)
def test_pca_generator():
# Check to see if it works with a generator
traj_dict = dict((i, t) for i, t in enumerate(trajs))
pcar = PCAr()
pcar.fit(np.concatenate(trajs))
pca = PCA()
# on python 3, dict.values() returns a generator
pca.fit(traj_dict.values())
y_ref1 = pcar.transform(trajs[0])
y1 = pca.transform(trajs)[0]
np.testing.assert_array_almost_equal(y_ref1, y1)
np.testing.assert_array_almost_equal(pca.components_, pcar.components_)
np.testing.assert_array_almost_equal(pca.explained_variance_,
pcar.explained_variance_)
np.testing.assert_array_almost_equal(pca.mean_, pcar.mean_)
np.testing.assert_array_almost_equal(pca.n_components_, pcar.n_components_)
np.testing.assert_array_almost_equal(pca.noise_variance_,
pcar.noise_variance_)
def test_sparsepca():
pca = SparsePCA()
pca.fit_transform(trajs)
pca.summarize()
def test_minibatchsparsepca():
pca = MiniBatchSparsePCA()
pca.fit_transform(trajs)
pca.summarize()
def test_fastica():
ica = FastICA()
ica.fit_transform(trajs)
ica.summarize()
def test_factoranalysis():
fa = FactorAnalysis()
fa.fit_transform(trajs)
fa.summarize()
def test_ktica_compare_to_tica():
trajectories = AlanineDipeptide().get_cached().trajectories
featurizer = DihedralFeaturizer(sincos=True)
features = featurizer.transform(trajectories[0:1])
features = [features[0][::10]]
tica = tICA(lag_time=1, n_components=2)
ktica = KernelTICA(lag_time=1, kernel='linear', n_components=2,
random_state=42)
tica_out = tica.fit_transform(features)[0]
ktica_out = ktica.fit_transform(features)[0]
assert_array_almost_equal(ktica_out, tica_out, decimal=1)
def test_ktica_compare_to_pipeline():
X = random.randn(100, 5)
ktica = KernelTICA(kernel='rbf', lag_time=5, n_components=1,
random_state=42)
y1 = ktica.fit_transform([X])[0]
u = np.arange(X.shape[0])[5::1]
v = np.arange(X.shape[0])[::1][:u.shape[0]]
lndmrks = X[np.unique((u, v))]
assert_array_almost_equal(lndmrks, ktica.landmarks, decimal=3)
nystroem = LandmarkNystroem(kernel='rbf', landmarks=lndmrks,
random_state=42)
tica = tICA(lag_time=5, n_components=1)
y2_1 = nystroem.fit_transform([X])
y2_2 = tica.fit_transform(y2_1)[0]
assert_array_almost_equal(y1, y2_2)
|
mpharrigan/mixtape
|
msmbuilder/tests/test_decomposition.py
|
Python
|
lgpl-2.1
| 6,977
|
[
"MDTraj"
] |
8314bc4fd095998ce992312d32e1eeb8c7570ed8ba6a6f9eb47a2af288b4b110
|
import os, sys, shutil, re, glob, time, commands
_part_filename = '._%s%03d'
_part_filename_wildcard = '._*[0-9][0-9][0-9]'
_registered_command_line_options = [
('--help',
'Print all options to the doconce program.'),
('--debug',
"""Write a debugging file _doconce_debugging.log with lots
of intermediate results"""),
('--no_abort',
'Do not abort the execution if syntax errors are found.'),
('--verbose=',
"""Write progress of intermediate steps if they take longer than X seconds.
0: X=15
1: X=5
2: 0.5"""),
('--syntax_check=',
"""Values: on/off. Turns on/off fix of illegal constructions and the syntax check
(may be time consuming for large books)."""),
('--skip_inline_comments',
'Remove all inline comments of the form [ID: comment].'),
('--exercise_numbering=',
"""absolute: exercises numbered as 1, 2, ... (default)
chapter: exercises numbered as 1.1, 1.2, ... , 3.1, 3.2, ..., B.1, B.2, etc.
with a chapter or appendix prefix."""),
('--exercises_in_zip',
'Place each exercises as an individual DocOnce file in a zip archive.'),
('--exercises_in_zip_filename=',
"""Filenames of individual exercises in zip archive.
logical: use the (first) logical filename specified by file=...
number: use either absolute exercise number or chapter.localnumber."""),
('--encoding=',
'Specify encoding (e.g., latin1 or utf-8).'),
('--no_ampersand_quote', 'Turn off special treatment of ampersand (&). Needed, e.g., when native latex code for tables are inserted in the document.'),
('--no_mako',
'Do not run the Mako preprocessor program.'),
('--no_preprocess',
'Do not run the Preprocess preprocessor program.'),
('--mako_strict_undefined',
'Make Mako report on undefined variables.'),
('--no_header_footer',
'Do not include header and footer in (LaTeX and HTML) documents.'),
('--no_emoji', 'Remove all emojis.'),
('--runestone',
'Make a RunestoneInteractive version of a Sphinx document.'),
('--max_bc_linelength=',
"""Strip lines in !bc environments that are longer than specified
(to prevent too long lines). Default: None (no length restriction)."""),
('--replace_ref_by_latex_auxno=',
"""Replace all ref{...} by hardcoded numbers from a latex .aux file.
Makes it possible for a notebook or html page to refer to a latex textbook.
Recommended syntax: see (ref{my:eq1}) in cite{MyBook}, or see
Section ref{my:sec2} in cite{MyBook}."""),
('--keep_pygments_html_bg',
"""Do not allow change of background in code blocks in HTML."""),
('--minted_latex_style=',
"""Specify the minted style to be used for typesetting code in LaTeX.
See pygmetize -L styles for legal names."""),
('--pygments_html_style=',
"""Specify the minted/pygments style to be used for typesetting code
in HTML.
Default: default (other values: monokai, manni, rrt, perldoc,
borland, colorful, murphy, trac, tango, fruity, autumn, emacs,
vim, pastie, friendly, native, see pygmentize -L styles).
none, no, off: turn off pygments to typeset computer code in HTML,
use plain <pre> tags.
highlight.js: use highlight.js syntax highlighting, not pygments."""),
('--pygments_html_linenos',
"""Turn on line numbers in pygmentized computer code in HTML.
(In LaTeX line numbers can be added via doconce subst or
doconce replace such that the verbatim environments get
the linenos=true parameter.)"""),
('--xhtml', 'Use BeautifulSoap to try to produce XHTML output. It inserts end tags (e.g. </p>) and guesses where to do it.'),
('--html_output=',
'Alternative basename of files associated with the HTML format.'),
('--html_style=', """Name of theme for HTML style:
plain, blueish, blueish2, bloodish, tactile-black, tactile-red, rossant
solarized, solarized2_light, solarized2_dark,
bootstrap, bootswatch,
bootstrap_X, X=bloodish, blue, bluegray, brown, cbc, FlatUI, red,
bootswatch_X, X=cerulean, cosmo, flatly, journal, lumen, readable,
simplex, spacelab, united, yeti
(dark:) amelia, cyborg, darkly, slate, spruce,
superhero (demos at bootswatch.com)"""),
('--html_template=',
"""Specify an HTML template with header/footer in which the doconce
document is embedded. (Often preferred to run with --no_title)"""),
('--no_title', 'Comment out TITLE, AUTHOR, DATE.\nOften used with HTML templates.'),
('--html_code_style=',
"""off, inherit, or transparent: enable normal inline verbatim font
where foreground and background color is inherited from the
surroundnings (e.g., to avoid the red Boostrap color).
Default: on (use the css-specified typesetting of <pre> tags).
NOTE: the naming "html_code_style" is not optimal: it has nothing
to do with code block style, but the <code> tag for inline verbatim text
in the context of bootstrap css styles.
"""),
('--html_pre_style=',
"""off, inherit, or transparent: let code blocks inside <pre> tags have
foreground and background color inherited from the surroundnings.
Default: on (use the css-specified typesetting of <pre> tags).
This option is most relevant for Bootstrap styles to
avoid white background in code blocks inside colorful admons.
"""),
('--html_toc_depth=',
"""No of levels in the table of contents in HTML output. Default: 2 (includes subsections but not subsubsections)."""),
('--html_toc_indent=',
"""No of spaces for indentation of subsections in the table of
contents in HTML output. Default: 3 (0 gives toc as nested list
in Bootstrap-based styles)."""),
('--html_body_font=',
"""Specify HTML font for text body. =? lists available fonts."""),
('--html_heading_font=',
"""Specify HTML font for headings. =? lists available fonts."""),
('--html_video_autoplay=',
"""True for autoplay when HTML is loaded, otherwise False (default)."""),
('--html_admon=',
"""\
Type of admonition and color:
colors, gray, yellow, apricot, lyx, paragraph.
For html_style=bootstrap*,bootswatch*,
the two legal values are boostrap_panel, bootstrap_alert."""),
('--html_admon_shadow',
'Add a shadow effect to HTML admon boxes (gray, yellow, apricot).'),
('--html_admon_bg_color=',
'Background color of admon in HTML.'),
('--html_admon_bd_color=',
'Boundary color of admon in HTML.'),
('--css=',
"""Specify a .css style file for HTML output.
If the file does not exist, the default or specified style
(--html_style=) is written to it."""),
('--html_box_shadow',
'Add a shadow effect in HTML box environments.'),
('--html_share=',
"""Specify URL and there will be Facebook, Twitter, etc. buttons
at the end of the HTML document.
--html_share=http://mysite.com/specials shares on email, Facebook, Google+,
LinkedIn, Twitter, and enables a print button too.
--html_share=http://mysite.com/specials,twitter,facebook shares on
Twitter and Facebook only. Sites are separated by comma. The following
names are allowed: email, facebook, google+, linkedin, twitter, print."""),
('--html_exercise_icon=',
"""Specify a question icon (as a filename in the bundled/html_images
directory in the doconce repo) for being inserted to the right in exercises.
default: turn on predefined question icons according to the chosen style.
none: no icons (this is the default value)."""),
('--html_exercise_icon_width=',
"""Width of the icon image in pixels (must be used with --html_exercise_icon)."""),
('--html_raw_github_url=', """URLs to files hosted on the doconce github account.
Internet Explorer (and perhaps other browsers) will not show raw.github.com
files. Instead on should use rawgit.com. For development of HTML sites
in Safari and Chrome and can use rawgit.com.
Values of --html_raw_github_url=:
safe or cdn.rawgit: use this for ready-made sites with potentially some traffic.
The URL becomes https://cdn.rawgit.com/hplgit/doconce/...
test or rawgit: use this for test purposes and development with low traffic.
The URL becomes https://rawgit.com/hplgit/doconce/...
github or raw.github: the URL becomes https://raw.github.com and may fail to
load properly.
githubusercontent or raw.githubusercontent: The URL becomes
https://raw.githubusercontent.com and may fail to load properly.
"""),
('--html_DOCTYPE', """Insert <!DOCTYPE HTML> in the top of the HTML file.
This is required for Internet Explorer and Mozilla.
However, some of the CSS files used by DocOnce may not load properly if
they are not well formed. That is why no doctype is default in the
generated HTML files."""),
('--html_links_in_new_window',
"""Open HTML links in a new window/tab."""),
('--html_quiz_button_text=',
"""Text on buttons for collapsing/expanding answers and explanations
in quizzes (with bootstrap styles).
Default: Empty (just pencil glyphion)."""),
('--html_bootstrap_navbar=',
'Turns the Bootstrap navigation bar on/off. Default: on.'),
('--html_bootstrap_jumbotron=',
"""Turns the Bootstrap jumbotron intro on/off and governs the
size of the document title. Default: on. Other values: h2, off
(h2 gives h2 heading instead of h1, off gives no jumbotron)."""),
('--html_figure_hrule=', """Set horizontal rule(s) above and/or below a figure.
none, off: no rules
top: rule at top (default)
bottom: rule at bottom
top+bottom: rule at top and bottom"""),
('--device=',
"""Set device to paper, screen, or other (paper impacts LaTeX output)."""),
('--number_all_equations',
"""Switch latex environments such that all equations get a number."""),
('--denumber_all_equations',
"""Switch latex environments such no equations get a number (useful for removing equation labels in slides). Error messages are issued about references to numbered equations in the text."""),
('--latex_style=',
"""LaTeX style package used for the document.
std: standard LaTeX article or book style,
Springer_lncse: Springer's Lecture Notes in Computational Science and
Engineering (LNCSE) style,
Springer_llncs: Springer's Lecture Notes in Computer Science style,
Springer_T2: Springer's T2 book style,
Springer_collection: Springer's style for chapters in LNCSE proceedings,
Korma_Script: Korma Script style,
siamltex: SIAM's standard LaTeX style for papers,
siamltexmm: SIAM's extended (blue) multimedia style for papers."""),
('--latex_font=',
"""LaTeX font choice: helvetica, palatino, std (Computer Modern, default)."""),
('--latex_code_style=', """Typesetting of code blocks.
pyg: use pygments (minted), style is set with --minted_latex_style=
lst: use lstlistings
vrb: use Verbatim (default)
Specifications across languages:
pyg-blue1
lst, lst-yellowgray[style=redblue]
vrb[frame=lines,framesep=2.5mm,framerule=0.7pt]
Detailed specification for each language:
default:vrb-red1[frame=lines]@pycod:lst[style=redblue]@pypro:lst-blue1[style=default]@sys:vrb[frame=lines,label=\\fbox{{\\tiny Terminal}},framesep=2.5mm,framerule=0.7pt]
Here, Verbatim[frame=lines] is used for all code environments, except
pycod, pypro and sys, which have their own specifications.
pycod: lst package with redblue style (and white background)
pypro: lst package with default style and blue1 background
style, sys: Verbatim with the specified arguments and white background.
(Note: @ is delimiter for the language specifications, syntax is
envir:package-background[style parameters]@)
"""),
('--latex_code_leftmargin=', 'Sets the left margin in code blocks. Default: 7 (mm).'),
('--latex_code_bg=', 'Background color code blocks. Default: white.'),
('--latex_code_bg_vpad', 'Vertical padding of background. Has only effect for vrb/pyg-bgcolor styles (not lst!).'),
('--latex_code_lststyles=', """Filename with LaTeX definitions of lst styles."""),
('--latex_bibstyle=',
'LaTeX bibliography style. Default: plain.'),
('--section_numbering=',
'Turn section numbering on/off. Default: off for all formats except latex and pdflatex (on for those).'),
('--latex_table_format=',
'Default: quote. Other values: left, center, footnotesize, tiny.'),
('--latex_title_layout=',
"""Layout of the title, authors, and date:
std: traditional LaTeX layout,
titlepage: separate page,
doconce_heading (default): authors with "footnotes" for institutions,
beamer: layout for beamer slides."""),
('--latex_title_reference=', """latex code placed in a footnote for the title,
typically used for acknowledging publisher/source of original
version of the document."""),
('--latex_encoding=', 'Encoding for \\usepackage[encoding]{inputenc}.\nValues: utf8 (default) or latin1.'),
('--latex_packages=',
"""Comma-separated list of latex packages to be included in \\usepackage commands.."""),
('--latex_papersize=',
"""Geometry of page size: a6, a4, std (default)."""),
('--latex_list_of_exercises=',
"""LaTeX typesetting of list of exercises:
loe: special, separate list of exercises,
toc: exercises included as part of the table of contents,
none (default): no list of exercises."""),
('--latex_movie=',
"""Specify package for handling movie/video content.
Default: href (hyperlink to movie file).
Other options: media9, movie15, multimedia (Beamer's \\movie command)."""),
('--latex_movie_controls=',
'Specify control panel for movies. Default: on. Other options: off.'),
('--latex_external_movie_viewer',
'Allow external movie viewer for movie15 package.'),
('--latex_fancy_header',
"""Typesetting of headers on each page:
If article: section name to the left and page number to the right
on even page numbers, the other way around on odd page numners.
If book: section name to the left and page numner to the right
on even page numbers, chapter name to the right and page number to
the left on odd page numbers."""),
('--latex_section_headings=',
"""Typesetting of title/section/subsection headings:
std (default): standard LaTeX,
blue: gray blue color,
strongblue: stronger blue color,
gray: white text on gray background, fit to heading width,
gray-wide: white text on gray background, wide as the textwidth."""),
('--latex_colored_table_rows=',
"""Colors on every two line in tables: no (default), gray, blue."""),
('--latex_line_numbers',
"""Include line numbers for the running text (only active if there
are inline comments."""),
('--latex_todonotes',
"""Use the todonotes package to typeset inline comments.
Gives colored bubbles in the margin for small inline comments and
in the text for larger comments."""),
('--latex_double_spacing',
"""Sets the LaTeX linespacing to 1.5 (only active if there are
inline comments)."""),
('--latex_labels_in_margin',
"""Print equation, section and other LaTeX labels in the margin."""),
('--latex_index_in_margin',
'Place entries in the index also in the margin.'),
('--latex_preamble=',
"""User-provided LaTeX preamble file, either complete or additions
to the doconce-generated preamble."""),
('--latex_no_program_footnotelink',
"""If --device=paper, this option removes footnotes with links to
computer programs."""),
('--latex_admon=',
"""Type of admonition in LaTeX:
colors1:
(inspired by the NumPy User Guide) applies different colors
for the different admons with an embedded icon,
colors2:
like `colors1` but the text is wrapped around the icon,
mdfbox:
rounded boxes with a optional title and no icon (default),
graybox2:
box with square corners, gray background, and narrower
than mdfbox, if code it reduces to something like mdfbox
(mdframed based); the summary admon is in case of A4 format
only half of the text width with text wrapped around
(effective for proposals and articles),
grayicon:
box with gray icons and a default light gray background,
yellowicon:
box yellow icons and a default light yellow background,
paragraph: plain paragraph with boldface heading.
Note: the colors in mdfbox and other boxes can customized.
"""),
('--latex_admon_color=',
"""The color to be used as background in admonitions.
A single value applies to all admons:
Either rgb tuple or saturated color a la yellow!5:
--latex_admon_color=0.1,0.1,0.4
'--latex_admon_color=yellow!5'
Note the quotes, needed for bash, in the latter example.
Multiple values can be assigned, one for each admon (all admons must
be specified):
'--latex_admon_color=warning:darkgreen!40!white;notice:darkgray!20!white;summary:tucorange!20!white;question:red!50!white;block:darkgreen!40!white'
If --latex_admon=mdfbox, the specification above with color1!X!color2
will automatically trigger 2*X as the background color of the frametitle.
There are predefined multiple values, e.g.,
--latex_admon_color=colors1
gives red warnings, blue notice, orange questions, green summaries and
yellow blocks, automatically adjusted with darker frametitles for
If --latex_admon=mdfbox, the background of the title and
the color of the border of box can also be customized by
direct editing. For example, a dark blue border and light
blue title background is obtained by editing the .tex file as
doconce replace 'linecolor=black,' 'linecolor=darkblue,' mydoc.tex
doconce subst 'frametitlebackgroundcolor=.*?,' 'frametitlebackgroundcolor=blue!5,' mydoc.tex
Actually, this particular (and common) edit is automatically done by the option
--latex_admon_color=bluestyle
--latex_admon_color=yellowstyle
(the latter has color yellow!5 instead and yellow!20 for the border)
"""),
('--latex_admon_title_no_period',
"""By default, a period is added to title admons that do not have a period, question mark, or similar. This option prevents adding a period such that the title acts like a heading."""),
('--latex_admon_envir_map=',
"""Mapping of code envirs to new envir names inside admons, e.g.,
to get a different code typesetting inside admons. This is useful
if admons have a special color and the color background of code
blocks does not fit will with the color background inside admons.
Then it is natural to use a different verbatim code style inside
admons.
If specifying a number, say 2, as in --latex_admon_envir_map=2,
an envir like pycod gets the number appended: pycod2. One can
then in --latex_code_style= or in doconce ptex2tex or ptex2tex
specify the typesetting of pycod2 environments.
Otherwise the specification must be a mapping for each envir
that should be changed inside the admons:
--latex_admon_envir_map=pycod-pycod_yellow,fpro-fpro2
(from-to,from-to,... syntax)."""),
('--latex_subex_header_postfix=',
"""Default: ).
Gives headers a), b), etc. Can be set to period, colon, etc."""),
('--xelatex', 'Use xelatex instead of latex/pdflatex.'),
('--latex_double_hyphen',
"""Replace single dash - by double dash -- in LaTeX output.
Somewhat intelligent, but may give unwanted edits. Use with great care!"""),
('--latex_elsevier_journal=',
"""Sets the journal name for the --latex_style=elsevier style.
Default: none (no journal name)."""),
('--ipynb_version=', 'ipynb version 3 (default) or 4.'),
('--ipynb_split_pyshell=', """Split interactive sessions into multiple cells after each output.
Applies to pyshell and ipy code environments.
on, True, yes: split (default).
off, False, no: do not split.
Note that pyshell-t and ipy-t environments just displays the session,
while default pyshell and ipy removes all output (all output from print
statements will come after the entire session).
"""),
('--ipynb_cite=', """Typesetting of bibliography.
plain: simple native typesetting (same as pandoc) (default)
latex: ipynb support for latex-style bibliographies (not mature)."""),
('--ipynb_admon=',
"""\
Typesetting of admonitions (hint, remarks, box, notice, summary,
warning, question, block - quotes are typeset as quotes).
quote: as Markdown quote (default) with gray line on the left.
paragraph: just the content with the title as paragraph heading.
hrule: title with horizontal rule above and below, then text and
horozontal rule."""),
('--ipynb_figure=', """\
How to typeset figures in ipynb:
md (plain Markdown syntax),
imgtag (<img src="..." width=...> tag, default)
Image (python cell with Image object)."""),
('--ipynb_movie=', """\
How to typeset movies in ipynb:
md (plain Markdown syntax, default)
HTML: python cell with notebook `HTML` object containing the raw HTML code
that is used in the DocOnce HTML format
ipynb: python cell with notebook `HTML` object with simple/standard
ipynb HTML code for showing a YouTube or local video with a <video>
tag."""),
('--verbose',
'Write out all OS commands run by doconce.'),
('--examples_as_exercises',
"""Treat examples of the form "==== Example: ..."
as in exercise environments."""),
('--solutions_at_end',
'Place solutions to exercises at the end of the document.'),
('--without_solutions',
'Leave out solution environments from exercises.'),
('--without_answers',
'Leave out answer environments from exercises.'),
('--without_hints',
'Leave out hints from exercises.'),
('--wordpress',
'Make HTML output for wordpress.com pages.'),
('--tables2csv',
"""Write each table to a CSV file table_X.csv,
where X is the table number (autonumbered in according to
appearance in the DocOnce source file)."""),
('--sections_up',
"""Upgrade all sections: sections to chapters, subsections
to sections, etc."""),
('--sections_down',
"""Downgrade all sections: chapters to sections, sections
to subsections, etc."""),
('--os_prompt=',
"""Terminal prompt in output from running OS commands (the
@@@OSCMD instruction). None or empty: no prompt, just the command;
nocmd: no command, just the output. Default is "Terminal>"."""),
('--code_skip_until=', '@@@CODE import: skip lines in files up to (and incuding) specified line.'),
('--code_prefix=',
'Prefix all @@@CODE imports with some path.'),
('--figure_prefix=',
'Prefix all figure filenames with, e.g., an URL.'),
('--movie_prefix=',
'Prefix all movie filenames with, e.g., an URL.'),
('--no_mp4_webm_ogg_alternatives',
"""Use just the specified (.mp4, .webm, .ogg) movie file;
do not allow alternatives in HTML5 video tag.
Used if the just the specified movie format should be played."""),
('--handout',
'Makes slides output suited for printing.'),
('--urlcheck',
'Check that all URLs referred to in the document are valid.'),
('--labelcheck=',
'Check that all ref{X} has a corresponding label{X}. Fake examples will fail this check and so will generalized references.\nTurn on when useful. Values: off (default), on.'),
('--short_title=',
"Short version of the document's title."),
('--markdown',
'Allow Markdown (and some Extended Markdown) syntax as input.'),
('--md2do_output=',
"""Dump to file the DocOnce code arising from converting from
Markdown. Default value is None (no dump).
Any filename can be specified: --md2do_output=myfile.do.txt"""),
('--github_md',
'Turn on github-flavored-markdown dialect of the pandoc translator'),
('--strapdown',
"""Wrap Markdown output in HTML header/footer such that the
output file (renamed as .html) can automatically be rendered as
an HTML via strapdownjs.com technology. Combine with --github_md
for richer output. Styles are set with --bootswatch_theme=cyborg
(for instance)."""),
('--bootswatch_theme=', 'Bootswatch theme for use with --strapdown option.'),
('--strict_markdown_output', 'Ensure strict/basic Markdown as output.'),
('--multimarkdown_output', 'Allow MultiMarkdown as output.'),
('--quiz_question_prefix=', """\
Prefix/title before question in quizzes. Default: "Question:".
Can also be set in square brackets for each individual question.
("Q: [] What is 1+1?"
results in no prefix/title before the "What is 1+1?"."""),
('--quiz_choice_prefix=', """\
Prefix/title before choices in quizzes.
Default for HTML: "Choice", resulting in numbered choices
"Choice 1:", "Choice 2:", etc.
A value with colon, period, or question mark (e.g., "Answer:")
leaves out the numbering.
Default for latex/pdflatex: letter or letter+checkbox.
Other values: number, number+checkbox, number+circle, letter+circle,
letter.
The checkbox or circle is always omitted if answers or solutions are
included (i.e., if none of the --without_answers and
--without_solutions is set).
The choice prefix can also be set in square brackets for each
individual choice.
("Cr: [] Two"
results in no prefix/title before the the answer "Two".
"""),
('--quiz_horizontal_rule=',
'on (default): <hr> before and after quiz in HTML. off: no <hr>.'),
('--quiz_explanations=',
"""on/off
(some output formats do not support explanations with figures,
math and/or code, this option turns all explanations off."""),
('--rst_uio',
'Univ. of Oslo version of rst files for their Vortex system.'),
('--rst_mathjax',
'Use raw HTML with MathJax for LaTeX mathematics in rst files.'),
('--sphinx_keep_splits',
"""Respect user's !split commands. Default: Override user's !split
and insert new !split before all topmost sections. This is what
makes sense in a Sphinx Table of Contents if one wants to split
the document into multiple parts."""),
('--oneline_paragraphs',
'Combine paragraphs to one line (does not work well).'),
]
_legal_command_line_options = \
[opt for opt, help in _registered_command_line_options]
def get_legal_command_line_options():
"""Return list of legal command-line options."""
return _legal_command_line_options
def help_format():
print """
doconce format X doconcefile
where X can be any of the formats
html, latex, pdflatex, rst, sphinx, plain, gwiki, mwiki, cwiki,
pandoc, epytext.
"""
for opt, help in _registered_command_line_options:
if opt.endswith('='):
opt += '...'
print '\n%s\n\n%s\n' % (opt, help)
# Import options from config file instead of the command line
try:
import doconce_config
# Above module must do from doconce.doconce_config_default import *
except ImportError:
# No doconce_config module, rely on this package's default
import doconce_config_default as doconce_config
# Challenge: want different doconce_config files: just
# use different dirs and have one local in each
# or have system wide directories that one adjusts in PYTHONPATH
def option(name, default=None):
"""
Return value of command-line option with the given name.
If name ends with a = (as in ``--name=value``), return the value,
otherwise return True or False whether the option ``--name``
is found or not. If value of `default` is returned
in case the option was not found.
"""
# Note: Do not use fancy command-line parsers as much functionality
# is dependent on command-line info (preprocessor options for instance)
# that is not compatible with simple options( --name).
option_name = '--' + name
if not option_name in _legal_command_line_options:
print 'test for illegal option:', option_name
_abort()
# Check if a command-line option has dash instead of underscore,
# which is a common mistake
for arg in sys.argv[1:]:
if arg.startswith('--'):
if '=' in arg:
arg = arg.split('=')[0] + '='
if arg not in _legal_command_line_options and \
('--' + arg[2:].replace('-', '_')) in _legal_command_line_options:
print 'found option %s, should be %s' % \
(arg, '--' + arg[2:].replace('-', '_'))
_abort()
value = None # initialization
# Check if name is in configuration file (doconce_config)
# and get a default value from there
name_dash2underscore = name.replace('-', '_')
if hasattr(doconce_config, name_dash2underscore):
value = getattr(doconce_config, name_dash2underscore)
# Let the user's default value override that in the config file
if default is not None:
value = default
# Finally, let the command line override everything
if option_name.endswith('='):
for arg in sys.argv[1:]:
if arg.startswith(option_name):
parts = arg.split('=')
opt = parts[0]
value = '='.join(parts[1:])
break
elif option_name in sys.argv:
value = True
return value
def check_command_line_options(option_start):
# Error handling: check if all command-line options are of known types
for arg in sys.argv[option_start:]:
arg_user = arg
if '=' in arg:
arg = arg.split('=')[0] + '='
if arg[:2] == '--':
if not arg in _legal_command_line_options:
print '*** warning: unrecognized command-line option'
print ' ', arg_user
def misc_option(name, default=None):
"""
As option, but for options related to other doconce programs
than doconce format.
"""
option_name = '--' + name
value = default
if option_name.endswith('='):
for arg in sys.argv[1:]:
if arg.startswith(option_name):
parts = arg.split('=')
opt = parts[0]
value = '='.join(parts[1:])
break
elif option_name in sys.argv:
value = True
return value
def _abort():
if '--no_abort' in sys.argv:
print 'avoided abortion because of --no-abort'
else:
print 'Abort! (add --no_abort on the command line to avoid this abortion)'
sys.exit(1)
def system(cmd, abort_on_failure=True, verbose=False, failure_info=''):
"""
Run OS command cmd.
If abort_on_failure: abort when cmd gives failure and print
command and failure_info (to explain what the command does).
If verbose: print cmd.
"""
if verbose or '--verbose' in sys.argv:
print 'running', cmd
failure = os.system(cmd)
if failure:
print 'could not run', cmd, failure_info
if abort_on_failure:
_abort()
return failure
def remove_verbatim_blocks(text, format):
if format in ("latex", "pdflatex"):
envirs = r'([Vv]erbatim|minted|lstlisting|latex|tex)'
pattern = r'^\\begin\{%s\}.*^\\end\{%s\}' % (envirs, envirs)
text = re.sub(pattern, '', text, flags=re.MULTILINE|re.DOTALL)
# Do not remove comments too (they are actively used when searching
# text returned from this function)
##text = re.sub(r'(?<=[^!?@|\\])%.*', '', text)
return text
def recommended_html_styles_and_pygments_styles():
"""
List good combinations of HTML slide styles and
pygments styles for typesetting code.
"""
combinations = {
'html': {
'blueish': ['default'],
'bloodish': ['default'],
'solarized': ['perldoc'],
'solarized2': ['perldoc'],
'solarized3': ['perldoc'],
'solarized3_dark': ['native'],
},
'deck': {
'neon': ['fruity', 'native'],
'sandstone.aurora': ['fruity'],
'sandstone.dark': ['native', 'fruity'],
'sandstone.mdn': ['fruity'],
'sandstone.mightly': ['default', 'autumn', 'manni', 'emacs'],
'beamer': ['autumn', 'perldoc', 'manni', 'default', 'emacs'],
'mnml': ['default', 'autumn', 'manni', 'emacs'],
'sandstone.firefox': ['default', 'manni', 'autumn', 'emacs'],
'sandstone.default': ['perldoc', 'autumn', 'manni', 'default'],
'sandstone.light': ['emacs', 'autumn'], # purple
'swiss': ['autumn', 'default', 'perldoc', 'manni', 'emacs'],
'web-2.0': ['autumn', 'default', 'perldoc', 'emacs'],
'cbc': ['default', 'autumn'],
},
'reveal': {
'beige': ['perldoc',],
'beigesmall': ['perldoc',],
'solarized': ['perldoc',],
'serif': ['perldoc'],
'simple': ['autumn', 'default', 'perldoc'],
'blood': ['monokai', 'native'],
'sky': ['default'],
'moon': ['fruity', 'native'],
'night': ['fruity', 'native'],
'moon': ['fruity', 'native'],
'darkgray': ['native', 'monokai'],
'cbc': ['default', 'autumn'],
'simula': ['autumn', 'default'],
},
'csss': {
'csss_default': ['monokai'],
},
'dzslides': {
'dzslides_default': ['autumn', 'default'],
},
'html5slides': {
'template-default': ['autumn', 'default'],
'template-io2011': ['autumn', 'default'],
},
'remark': {
'light': ['autumn', 'default'],
'dark': ['native', 'monokai'],
},
}
return combinations
# -------------- functions used by the doconce program -------------
def remove_inline_comments():
try:
filename = sys.argv[1]
except IndexError:
print 'Usage: doconce remove_inline_comments myfile.do.txt'
_abort()
if not os.path.isfile(filename):
print '*** error: file %s does not exist!' % filename
sys.exit(1)
shutil.copy(filename, filename + '.old~~')
f = open(filename, 'r')
filestr = f.read()
f.close()
import doconce
filestr = doconce.subst_away_inline_comments(filestr)
f = open(filename, 'w')
f.write(filestr)
f.close()
print 'inline comments removed in', filename
def apply_inline_edits():
try:
filename = sys.argv[1]
except IndexError:
print 'Usage: doconce apply_inline_comments_edits myfile.do.txt'
_abort()
if not os.path.isfile(filename):
print '*** error: file %s does not exist!' % filename
sys.exit(1)
shutil.copy(filename, filename + '.old~~')
f = open(filename, 'r')
filestr = f.read()
f.close()
# pattern is taken as INLINE_TAGS['inlinecomment'] but with
# modified names and comments patterns.
# 1. Replacements
pattern = r'''\[(?P<name>[A-Za-z0-9 '+-]+?):(?P<space>\s+)(?P<subst>[^\]]+?) -> (?P<replacement>.+?)\]'''
filestr = re.sub(pattern, r'\g<replacement>', filestr, flags=re.DOTALL)
# 2. Deletes
pattern = r'''\[del:\s+(.*?)\]'''
filestr = re.sub(pattern, '', filestr, flags=re.DOTALL)
# 3. Adds
pattern = r'''\[add:\s+(.*?)\]'''
filestr = re.sub(pattern, r'\g<1>', filestr, flags=re.DOTALL)
f = open(filename, 'w')
f.write(filestr)
f.close()
print 'inline comments removed in', filename
def latin2html():
"""
Substitute latin characters by their equivalent HTML encoding
in an HTML file. See doconce.html.latin2html for more
documentation.
"""
from doconce.html import latin2html
import os, shutil, sys
for filename in sys.argv[1:]:
if not os.path.isfile(filename):
print '*** error: file %s does not exist!' % filename
continue
oldfilename = filename + '.old~~'
shutil.copy(filename, oldfilename)
print 'transformin latin characters to HTML encoding in', filename
f = open(oldfilename, 'r')
try:
text = f.read()
newtext = latin2html(text)
f.close()
f = open(filename, 'w')
f.write(newtext)
f.close()
except Exception, e:
print e.__class__.__name__, ':', e,
# replace is taken from scitools
def _usage_find_nonascii_chars():
print 'Usage: doconce find_non_ascii_chars file1 file2 ...'
def find_nonascii_chars():
if len(sys.argv) <= 1:
usage_find_nonascii_chars()
sys.exit(0)
filenames = wildcard_notation(sys.argv[1:])
for filename in filenames:
if os.path.isfile(filename):
with open(filename, 'r') as f:
text = f.read()
else:
print 'File %s not found' & filename
sys.exit(1)
for i, c in enumerate(text):
if ord(c) > 127:
print 'non-ascii character', c, ' (ord=%d)' % ord(c)
print 'appearing in the text from %s:' % filename
print text[i-40:i], '--> %s <--' % c, text[i:i+40]
def gwiki_figsubst():
try:
gwikifile = sys.argv[1]
URLstem = sys.argv[2]
except IndexError:
print 'Usage: %s wikifile URL-stem' % sys.argv[0]
print 'Ex: %s somefile.gwiki http://code.google.com/p/myproject/trunk/doc/somedir' % sys.argv[0]
_abort()
if not os.path.isfile(gwikifile):
print '*** error: file %s does not exist!' % gwikifile
sys.exit(1)
# first grep out all filenames with local path:
shutil.copy(gwikifile, gwikifile + '.old~~')
f = open(gwikifile, 'r')
fstr = f.read()
f.close()
pattern = r'\(the URL of the image file (.+?) must be inserted here\)'
#figfiles = re.findall(pattern, fstr)
replacement = r'%s/\g<1>' % URLstem
fstr, n = re.subn(pattern, replacement, fstr)
pattern = re.compile(r'<wiki:comment>\s+Put the figure file .*?</wiki:comment>', re.DOTALL)
fstr, n2 = pattern.subn('', fstr)
f = open(gwikifile, 'w')
f.write(fstr)
f.close()
print 'Replaced %d figure references in' % n, gwikifile
if n != n2:
print 'Something strange: %d fig references and %g comments... Bug.' % \
(n, n2)
# subst is taken from scitools
def _usage_subst():
print 'Usage: doconce subst [-s -m -x --restore] pattern '\
'replacement file1 file2 file3 ...'
print '--restore brings back the backup files'
print '-s is the re.DOTALL or re.S modifier'
print '-m is the re.MULTILINE or re.M modifier'
print '-x is the re.VERBODE or re.X modifier'
def _scitools_subst(patterns, replacements, filenames,
pattern_matching_modifiers=0):
"""
Replace a set of patterns by a set of replacement strings (regular
expressions) in a series of files.
The function essentially performs::
for filename in filenames:
file_string = open(filename, 'r').read()
for pattern, replacement in zip(patterns, replacements):
file_string = re.sub(pattern, replacement, file_string)
A copy of the original file is taken, with extension `.old~~`.
"""
# if some arguments are strings, convert them to lists:
if isinstance(patterns, basestring):
patterns = [patterns]
if isinstance(replacements, basestring):
replacements = [replacements]
if isinstance(filenames, basestring):
filenames = [filenames]
# pre-compile patterns:
cpatterns = [re.compile(pattern, pattern_matching_modifiers) \
for pattern in patterns]
modified_files = dict([(p,[]) for p in patterns]) # init
messages = [] # for return info
for filename in filenames:
if not os.path.isfile(filename):
print '*** error: file %s does not exist!' % filename
continue
f = open(filename, 'r');
filestr = f.read()
f.close()
for pattern, cpattern, replacement in \
zip(patterns, cpatterns, replacements):
if cpattern.search(filestr):
filestr = cpattern.sub(replacement, filestr)
shutil.copy2(filename, filename + '.old~~') # backup
f = open(filename, 'w')
f.write(filestr)
f.close()
modified_files[pattern].append(filename)
# make a readable return string with substitution info:
for pattern in sorted(modified_files):
if modified_files[pattern]:
replacement = replacements[patterns.index(pattern)]
if replacement == '':
replacement = '<empty string>'
messages.append('%s replaced by %s in %s' % \
(pattern, replacement,
', '.join(modified_files[pattern])))
return ', '.join(messages) if messages else 'no substitutions'
def wildcard_notation(files):
"""
On Unix, a command-line argument like *.py is expanded
by the shell. This is not done on Windows, where we must
use glob.glob inside Python. This function provides a
uniform solution.
"""
if isinstance(files, basestring):
files = [files] # ensure list when single filename is given
if sys.platform[:3] == 'win':
import glob, operator
filelist = [glob.glob(arg) for arg in files]
files = reduce(operator.add, filelist) # flatten
return files
def subst():
if len(sys.argv) < 3:
_usage_subst()
sys.exit(0)
from getopt import getopt
optlist, args = getopt(sys.argv[1:], 'smx', ['restore'])
if not args:
print 'no filename(s) given'
sys.exit(1)
restore = False
pmm = 0 # pattern matching modifiers (re.compile flags)
for opt, value in optlist:
if opt in ('-s',):
if not pmm: pmm = re.DOTALL
else: pmm = pmm|re.DOTALL
if opt in ('-m',):
if not pmm: pmm = re.MULTILINE
else: pmm = pmm|re.MULTILINE
if opt in ('-x',):
if not pmm: pmm = re.VERBOSE
else: pmm = pmm|re.VERBOSE
if opt in ('--restore',):
restore = True
if restore:
for oldfile in args:
newfile = re.sub(r'\.old~~$', '', oldfile)
if not os.path.isfile(oldfile):
print '%s is not a file!' % oldfile; continue
os.rename(oldfile, newfile)
print 'restoring %s as %s' % (oldfile,newfile)
else:
pattern = args[0]; replacement = args[1]
s = _scitools_subst(pattern, replacement,
wildcard_notation(args[2:]), pmm)
print s # print info about substitutions
# replace is taken from scitools
def _usage_replace():
print 'Usage: doconce replace from-text to-text file1 file2 ...'
def replace():
if len(sys.argv) < 4:
_usage_replace()
sys.exit(0)
from_text = sys.argv[1]
to_text = sys.argv[2]
filenames = wildcard_notation(sys.argv[3:])
for filename in filenames:
if not os.path.isfile(filename):
print '*** error: file %s does not exist!' % filename
continue
f = open(filename, 'r')
text = f.read()
f.close()
if from_text in text:
backup_filename = filename + '.old~~'
shutil.copy(filename, backup_filename)
print 'replacing %s by %s in' % (from_text, to_text), filename
text = text.replace(from_text, to_text)
f = open(filename, 'w')
f.write(text)
f.close()
def _usage_replace_from_file():
print 'Usage: doconce replace_from_file file-with-from-to file1 file2 ...'
print '\nThe file must contain two columns with the from and to parts'
print 'for each substitution. Comment lines starting with # are allowed.'
print 'The output from doconce list_labels has a form suitable for'
print 'being extended with a second column with new labels and run'
print 'with this command to clean up label names.'
def replace_from_file():
"""
Replace one set of words by another set of words in a series
of files. The set of words are stored in a file (given on
the command line). The data format of the file is
word replacement-word
word
# possible comment line, recognized by starting with #
word
word replacement-word
That is, there are either one or two words on each line. In case
of two words, the first is to be replaced by the second.
(This format fits well with the output of list_labels.)
"""
if len(sys.argv) < 3:
_usage_replace_from_file()
sys.exit(0)
fromto_file = sys.argv[1]
f = open(fromto_file, 'r')
fromto_lines = f.readlines()
f.close()
filenames = wildcard_notation(sys.argv[2:])
for filename in filenames:
f = open(filename, 'r')
text = f.read()
f.close()
replacements = False
for line in fromto_lines:
if line.startswith('#'):
continue
words = line.split()
if len(words) == 2:
from_text, to_text = words
if from_text in text:
backup_filename = filename + '.old~~'
shutil.copy(filename, backup_filename)
print 'replacing %s by %s in' % (from_text, to_text), filename
text = text.replace(from_text, to_text)
replacements = True
if replacements:
f = open(filename, 'w')
f.write(text)
f.close()
def _usage_expand_mako():
print 'Usage: doconce expand_mnako mako_code_file.txt funcname mydoc.do.txt'
# This replacement function for re.sub must be global since expand_mako,
# where it is used, has an exec statement
def expand_mako():
if len(sys.argv) < 4:
_usage_expand_mako()
sys.exit(0)
mako_filename = sys.argv[1]
funcname = sys.argv[2]
filenames = wildcard_notation(sys.argv[3:])
# Get mako function code
f = open(mako_filename, 'r')
mako_text = f.read()
f.close()
func_lines = []
inside_func = False
for line in mako_text.splitlines():
if re.search(r'^\s*def\s+%s' % funcname, line): # starts with funcname?
inside_func = True
func_lines.append(line)
elif inside_func:
if line == '' or line[0] == ' ': # indented line?
func_lines.append(line)
else:
inside_func = False
funcname_text = '\n'.join(func_lines)
print 'Extracted function %s from %s:\n' % (funcname, mako_filename), funcname_text
print func_lines
try:
exec(funcname_text)
except Exception as e:
print '*** error: could not turn function code into a Python function'
print e
_abort()
# Note: if funcname has FORMAT tests the exec will fail, but
# one can make an alternative version of funcname in another file
# where one returns preprocess # #if FORMAT in ... statements
# in the returned text.
# Substitute ${funcname(..., ..., ...)}
pattern = r'(\$\{(%(funcname)s\s*\(.+?\))\})' % vars()
for filename in filenames:
# Just the filestem without .do.txt is allowed
if not filename.endswith('.do.txt'):
filename += '.do.txt'
if not os.path.isfile(filename):
print '*** error: file %s does not exist!' % filename
continue
f = open(filename, 'r')
text = f.read()
f.close()
m = re.search(pattern, text, flags=re.DOTALL)
if m:
backup_filename = filename + '.old~~'
shutil.copy(filename, backup_filename)
print 'expanding mako function %s in' % funcname, filename
calls = re.findall(pattern, text, flags=re.DOTALL)
for mako_call, python_call in calls:
try:
replacement = eval(python_call)
except Exception as e:
print '*** error: could not run call\n%s' % python_call
_abort()
text = text.replace(mako_call, replacement)
f = open(filename, 'w')
f.write(text)
f.close()
def _usage_linkchecker():
print 'Usage: doconce linkchecker file1.html|file1.do.txt|tmp_mako__file1.do.txt ...'
print 'Check if URLs or links to local files in DocOnce or HTML files are valid.'
def linkchecker():
if len(sys.argv) <= 1:
_usage_linkchecker()
sys.exit(0)
from common import is_file_or_url
prefix = '(file:///|https?://|ftp://)'
pattern_html = r'href="(%s.+?)"' % prefix
pattern_do = r'''"[^"]+?" ?:\s*"(%s.+?)"''' % prefix
missing = []
for filename in sys.argv[1:]:
ext = os.path.splitext(filename)[1]
if not ext in ('.html', '.htm', '.txt'):
print '*** error: %s is not a DocOnce or HTML file' % filename
continue
f = open(filename, 'r')
text = f.read()
f.close()
if filename.endswith('.do.txt'):
pattern = pattern_do
else:
patterh = pattern_html
links = re.findall(pattern, text, flags=re.IGNORECASE)
missing.append([filename, []])
for link in links:
check = is_file_or_url(link, msg=None)
if check in ('file', 'url'):
print '%s:' % filename, link, 'exists as', check
else:
print '%s:' % filename, link, 'WAS NOT FOUND'
missing[-1][1].append(link)
for filename, missing_links in missing:
if missing_links:
print '\n\n*** missing links in %s:\n%s' % \
(filename, '\n'.join(['"%s"' % link
for link in missing_links]))
def _dofix_localURLs(filename, exclude_adr):
if os.path.splitext(filename)[1] != '.rst':
print 'Wrong filename extension in "%s" - must be a .rst file' \
% filename
_abort()
f = open(filename, 'r')
text = f.read()
f.close()
"""
# This is for doconce format:
link1 = r'''"(?P<link>[^"]+?)" ?:\s*"(?P<url>([^"]+?\.html?|[^"]+?\.txt|[^"]+?\.pdf|[^"]+?\.f|[^"]+?\.c|[^"]+?\.cpp|[^"]+?\.cxx|[^"]+?\.py|[^"]+?\.java|[^"]+?\.pl))"'''
link2 = r'("URL"|"url"|URL|url) ?:\s*"(?P<url>.+?)"'
groups1 = [(link, url) for link, url, url in re.findall(link1, text)]
print groups1
print groups2
"""
link_pattern = r'<([A-Za-z0-9/._-]+?)>`_'
links = re.findall(link_pattern, text)
num_fixed_links = 0
for link in links:
if link in exclude_adr:
print 'not modifying', link
if link.endswith('htm') or link.endswith('html'):
print 'Note: %s\n is an HTML file that may link to other files.\n This may require copying many files! Better: link to _static directly in the doconce document.' % link
continue
if not (link.startswith('http') or link.startswith('file:/') or \
link.startswith('_static')):
if os.path.isfile(link):
if not os.path.isdir('_static'):
os.mkdir('_static')
newlink = os.path.join('_static', os.path.basename(link))
text = text.replace('<%s>' % link, '<%s>' % newlink)
print 'fixing link to %s as link to %s' % \
(link, newlink)
print ' copying %s to _static' % os.path.basename(link)
shutil.copy(link, newlink)
if link.endswith('htm') or link.endswith('html'):
print 'Note: %s\n is an HTML file that may link to other files.\n This may require copying many files! Better: link to _static directly in the doconce document.' % link
num_fixed_links += 1
if num_fixed_links > 0:
os.rename(filename, filename + 'old~~')
f = open(filename, 'w')
f.write(text)
f.close()
return num_fixed_links
def _usage_sphinxfix_localURLs():
print """\
Usage: doconce sphinxfix_localURLs file1.rst file2.rst ... -not adr1 adr2 ...
Each link to a local file, e.g., "link": "src/dir1/myfile.txt",
is replaced by a link to the file placed in _static:
"link": "_static/myfile.txt". The file myfile.txt is copied
from src/dir1 to _static. The user must later copy all _static/*
files to the _static subdirectory in the sphinx directory.
Note that local links to files in _static are not modified.
The modification of links is not always wanted. The -not adr1 adr2 makes
it possible to exclude modification of a set of addresses adr1, adr2, ...
Example: doconce sphinxfix_localURLs file1.rst file2.rst \
-not src/dir1/mymod1.py src/dir2/index.html
The old files are available as file1.rst.old~~, file2.rst.old~~ etc.
Note that local links to HTML files which are linked to other local HTML
documents (say a Sphinx document) demand all relevant files to be
copied to _static. In such cases it is best to physically place
the HTML documents in _static and let the DocOnce document link
directly to _static.
In general, it is better to link to _static from the DocOnce document
rather than relying on the fixes in this script...
"""
def sphinxfix_localURLs():
if len(sys.argv) < 2:
_usage_sphinxfix_localURLs()
sys.exit(0)
# Find addresses to exclude
idx = -1 # index in sys.argv for the -not option
for i, arg in enumerate(sys.argv[1:]):
if arg.endswith('-not'):
idx = i+1
exclude_adr = sys.argv[idx+1:] if idx > 0 else []
if idx > 0:
del sys.argv[idx:]
for filename in sys.argv[1:]:
if os.path.dirname(filename) != '':
print 'doconce sphinxfix_localURLs must be run from the same directory as %s is located in' % filename
num_fixed_links = _dofix_localURLs(filename, exclude_adr)
if num_fixed_links > 0:
print "\nYou must copy _static/* to the sphinx directory's _static directory"
def _usage_latex_exercise_toc():
print 'Usage: doconce latex_exercise_toc myfile.do.txt ["List of exercises"]'
print """
Can insert
# Short: My own short title
in the text of an exercise and this defines a short version of the
title of the exercise to be used in the toc table.
This is convenient when the automatic truncation of (long) titles
fails (happens if truncated in the middle of mathematical $...$
constructions). Any short title is appearing in the table exactly
how it is written, so this is also a method to avoid truncating
a title.
"""
def latex_exercise_toc():
if len(sys.argv) < 2:
_usage_latex_exercise_toc()
sys.exit(0)
dofile = sys.argv[1]
if dofile.endswith('.do.txt'):
dofile = dofile[:-7]
exerfile = '.' + dofile + '.exerinfo'
if not os.path.isfile(exerfile):
print 'no file %s with exercises from %s found' % (exerfile, dofile)
return
f = open(exerfile, 'r')
exer = eval(f.read())
f.close()
try:
heading = sys.argv[2]
except IndexError:
# Build default heading from types of environments found
types_of_exer = set()
for ex in exer:
if ex['type'] != 'Example':
types_of_exer.add(ex['type'])
types_of_exer = list(types_of_exer)
types_of_exer = ['%ss' % tp for tp in types_of_exer] # plural
types_of_exer = [tp for tp in sorted(types_of_exer)] # alphabetic order
if len(types_of_exer) == 1:
types_of_exer = types_of_exer[0]
elif len(types_of_exer) == 2:
types_of_exer = ' and '.join(types_of_exer)
elif len(types_of_exer) > 2:
types_of_exer[-1] = 'and ' + types_of_exer[-1]
types_of_exer = ', '.join(types_of_exer)
heading = "List of %s" % types_of_exer
latex = r"""
\clearpage %% pagebreak before list of exercises
\subsection*{%s}
\\begin{tabular}{lrll}
""" % heading
max_title_length = 45
for ex in exer:
if ex['type'] == 'Example':
continue
title = ex['title']
# Short title?
short = ''
for line in ex['text'].splitlines():
m = re.search(r'#\s*[Ss]hort:\s*(.+)', line)
if m:
short = m.group(1).strip()
title = short
break
if not short:
# Truncate long titles
if len(title) > max_title_length:
words = title.split()
title = []
for word in words:
title.append(word)
if len(' '.join(title)) > max_title_length - 5:
title.append('...')
break
title = ' '.join(title)
title = title.replace('\\', '\\\\') # re.sub later swallows \
latex += ex['type'] + ' & ' + str(ex['no']) + ' & ' + title
if ex['label']:
latex += r' & p.~\pageref{%s}' % ex['label']
else:
# Leave pageref empty
latex += ' &'
latex += ' \\\\\\\\' + '\n'
# (need 8 \ for \\ to survive because re.sub below eats them)
latex += r"""\end{tabular}
% --- end of table of exercises
\clearpage % pagebreak after list of exercises
"""
ptexfile = dofile + '.p.tex'
f = open(ptexfile, 'r')
shutil.copy(ptexfile, ptexfile + '.old~~')
filestr = f.read()
f.close()
if r'\tableofcontents' in filestr:
# Insert table of exercises on the next line
filestr = re.sub(r'(tableofcontents.*$)', '\g<1>\n' + latex,
filestr, flags=re.MULTILINE)
f = open(ptexfile, 'w')
f.write(filestr)
print 'table of exercises inserted in', ptexfile
f.close()
else:
print '*** error: cannot insert table of exercises because there is no'
print ' table of contents requested in the', dofile, 'document'
def _usage_combine_images():
print """\
Usage: doconce combine_images [pdf|png] [-4] image1 image2 ... output_file
Applies montage if not PDF or EPS images, else
pdftk, pdfnup and pdfcrop.
Images are combined with two each row, by default, but
doconce combine_images -3 ... gives 3 images in each row.
The first command-line argument can be a file extension and
the filenames can then be given without extension:
doconce combine_images pdf -2 u1 u2 u12
"""
def combine_images():
if len(sys.argv) < 3:
_usage_combine_images()
sys.exit(0)
if sys.argv[1] in ('pdf', 'png', 'jpg', 'eps', 'ps', 'jpeg', 'tif', 'tiff'):
extension = sys.argv[1]
del sys.argv[1]
else:
extension = None
if sys.argv[1].startswith('-'):
num_columns = int(sys.argv[1][1:])
del sys.argv[1]
else:
num_columns = 2
bitmap_formats = '.png', '.tif.', '.tiff', '.gif', '.jpeg', 'jpg'
imagefiles = sys.argv[1:-1]
# See if files have extension
for i in range(len(imagefiles)):
basename, ext = os.path.splitext(imagefiles[i])
if not ext and extension is not None:
imagefiles[i] = imagefiles[i] + '.' + extension
for name in imagefiles:
if not os.path.isfile(name):
print '*** error: file "%s" is non-existing' % name
_abort()
output_file = sys.argv[-1]
basename, ext = os.path.splitext(output_file)
if not ext and extension is not None:
output_file += '.' + extension
ext = [os.path.splitext(f)[1] for f in imagefiles]
montage = False
# If one of the formats in formats: montage = True
for format in bitmap_formats:
if format in ext:
montage = True
cmds = []
if montage:
cmds.append('montage -background white -geometry 100%% -tile %dx %s %s' % (num_columns, ' '.join(imagefiles), output_file))
cmds.append('convert -trim %s %s' % (output_file, output_file))
else:
# Assume all are .pdf or .eps
# Convert EPS to PDF
for i in range(len(imagefiles)):
f = imagefiles[i]
if '.eps' in f:
cmds.append('ps2pdf -DEPSCrop %s' % f)
imagefiles[i] = f.replace('.eps', '.pdf')
# Combine PDF images
num_rows = int(round(len(imagefiles)/float(num_columns)))
cmds.append('pdftk %s output tmp.pdf' % ' '.join(imagefiles))
cmds.append('pdfnup --nup %dx%d --outfile tmp.pdf tmp.pdf' % (num_columns, num_rows))
cmds.append('pdfcrop tmp.pdf %s' % output_file)
cmds.append('rm -f tmp.pdf')
print
for cmd in cmds:
system(cmd, verbose=True)
print 'output in', output_file
def _usage_expand_commands():
print 'Usage: doconce expand_commands file1 file2 ...'
print """
A file .expand_commands may define _replace and _regex_subst lists
for str.replace and re.sub substitutions (respectively) to be applied
to file1 file2 ...
By default we use some common LaTeX math abbreviations:
_replace = [
(r'\bals', r'\begin{align*}'), # must appear before \bal
(r'\eals', r'\end{align*}'),
(r'\bal', r'\begin{align}'),
(r'\eal', r'\end{align}'),
(r'\beq', r'\begin{equation}'),
(r'\eeq', r'\end{equation}'),
]
_regex_subst = []
"""
def expand_commands():
if len(sys.argv) < 2:
_usage_expand_commands()
sys.exit(0)
# Default set of str.replace and re.sub substitutions
_replace = [
(r'\bals', r'\begin{align*}'), # must appear before \bal
(r'\eals', r'\end{align*}'),
(r'\bal', r'\begin{align}'),
(r'\eal', r'\end{align}'),
(r'\beq', r'\begin{equation}'),
(r'\eeq', r'\end{equation}'),
]
# These \ep subst don't work properly
_regex_subst = [
(r'^\ep\n', r'\\thinspace .\n', re.MULTILINE),
(r'\ep\n', r' \\thinspace .\n'),
(r'\ep\s*\\\]', r' \\thinspace . \]'),
(r'\ep\s*\\e', r' \\thinspace . \e'),
(r' \\thinspace', 'thinspace'),
]
_regex_subst = []
expand_commands_file = '.expand_commands'
if os.path.isfile(expand_commands_file):
execfile(expand_commands_file)
else:
replace = []
regex_subst = []
# Add standard definitions (above)
replace += _replace
regex_subst += _regex_subst
filenames = sys.argv[1:]
for filename in filenames:
changed = False
f = open(filename, 'r')
text = f.read()
f.close()
for from_, to_ in replace:
if from_ in text:
text = text.replace(from_, to_)
print 'replacing %s by %s in %s' % (from_, to_, filename)
changed = True
for item in regex_subst:
if len(item) == 2:
from_, to_ = item
if re.search(from_, text):
text = re.sub(from_, to_, text)
print 'substituting %s by %s in %s' % (from_, to_, filename)
changed = True
elif len(item) == 3:
frm_, to_, modifier = item
if re.search(from_, text, flags=modifier):
text = re.sub(from_, to_, text, flags=modifier)
print 'substituting %s by %s in %s' % (from_, to_, filename)
changed = True
if changed:
shutil.copy(filename, filename + '.old~~')
f = open(filename, 'w')
f.write(text)
f.close()
def copy_latex_packages(packages):
"""
Copy less well-known latex packages to the current directory
if the packages are not found on the (Unix) system.
"""
datafile = latexstyle_files # global variable (latex_styles.zip)
missing_files = []
import commands
for style in packages:
stem, ext = os.path.splitext(style)
if ext == '':
style += '.sty'
failure, output = commands.getstatusoutput('kpsewhich %s' % style)
if output == '':
missing_files.append(style)
if missing_files:
# Copy zipfile with styles to current dir
print '*** missing style files:'
print ' ', ', '.join(missing_files)
import doconce
doconce_dir = os.path.dirname(doconce.__file__)
doconce_datafile = os.path.join(doconce_dir, datafile)
shutil.copy(doconce_datafile, os.curdir)
import zipfile
for filename in missing_files:
# Extract file from zip archive
if not os.path.isfile(filename):
try:
zipfile.ZipFile(datafile).extract(filename)
msg = 'extracted'
except:
msg = 'could not extract'
print '%s %s (from %s in the doconce installation)' % \
(msg, filename, latexstyle_files)
if os.path.isfile(datafile):
os.remove(datafile)
def _usage_ptex2tex():
print r"""\
Usage: doconce ptex2tex [file | file.p.tex] [-Dvar1=val1 ...] \
[cod=\begin{quote}\begin{verbatim}@\end{verbatim}\end{quote} \
pypro=Verbatim fcod=minted ccod=ans cpppro=anslistings:nt]'
or
doconce ptex2tex file -Dvar1=val1 ... envir=ans:nt
or
doconce ptex2tex file "sys=\begin{Verbatim}[frame=lines,label=\fbox{{\tiny Terminal}},framesep=2.5mm,framerule=0.7pt]@\end{Verbatim}" envir=minted --minted_leftmargin=2
(recall quotes in arguments with backslash), or
doconce ptex2tex file envir=Verbatim
Here the Verbatim (from fancyvrb) is used for all environments, with
some options added (base linestretch 0.85 and font size 9pt).
The last command is equivalent to the default
doconce ptex2tex
Note that specifications of how "!bc environment" is to be typeset
in latex is done by environment=begin@end, where begin is the latex
begin command, end is the latex end command, and the two must
be separated by the at sign (@). Writing just environment=package implies
the latex commands \begin{package} and \end{package}.
Choosing environment=minted gives the minted environment with
the specified language inserted. Similarly, environment=ans,
environment=ans:nt, environment=anslistings, or environment=anslistings:nt
imply the anslistings package with the right environment
(\begin{c++:nt} for instance for !bc cppcod or !bc cpppro,
environment=ans:nt - :nt means no title over the code box).
If environment is simply the string "envir", the value applies to all
registered environments. Specifying (e.g.) sys=... and then envir=ans,
will substitute the sys environment by the specified syntax and all
other environments will apply the latex construct from anslistings.sty.
"""
def ptex2tex():
if len(sys.argv) <= 1:
_usage_ptex2tex()
sys.exit(0)
filename = sys.argv[1]
if filename.endswith('.p.tex'):
filename = filename[:-6]
if not os.path.isfile(filename + '.p.tex'):
print 'no file %s' % (filename + '.p.tex')
_abort()
f = open(filename + '.p.tex', 'r')
ptex2tex_filestr = f.read()
f.close()
# All envirs in the .ptex2tex.cfg file as of June 2012.
# (Recall that the longest names must come first so that they
# are substituted first, e.g., \bcc after \bccod)
envirs = 'pro pypro cypro cpppro cpro fpro plpro shpro mpro cod pycod cycod cppcod ccod fcod plcod shcod mcod rst cppans pyans fans bashans swigans uflans sni dat dsni sys slin ipy pyshell rpy plin ver warn rule summ ccq cc ccl txt'.split()
envirs += ['htmlcod', 'htmlpro', 'html',
'rbpro', 'rbcod', 'rb',
'xmlpro', 'xmlcod', 'xml',
'latexpro', 'latexcod', 'latex']
# envirs is not longer used - we just read what the user has in the file
# Accept all envirs in envir2pygments, plus all
# registered lexers in pygments
from common import get_legal_pygments_lexers
ptex2tex_begin_pattern = r'^\\b([a-z0-9+_]+)$'
user_envirs = re.findall(ptex2tex_begin_pattern, ptex2tex_filestr,
flags=re.MULTILINE)
# Process command-line options
preprocess_options = [] # -Dvariable or -Dvariable=value
envir_user_spec = [] # user's specified environments
for arg in sys.argv[2:]:
if arg.startswith('-D') or arg.startswith('-U'):
preprocess_options.append(arg)
elif '=' in arg:
# envir
items = arg.split('=')
envir, value = items[0], '='.join(items[1:])
if '@' in value:
begin, end = value.split('@')
if envir == 'envir':
# User specifies all ptex2tex environments at once
# as "envir=begin@end"
for e in user_envirs:
envir_user_spec.append((e, begin, end))
else:
envir_user_spec.append((envir, begin, end))
else:
# Fix value=minted and value=ans*:
# they need the language explicitly
if value == 'minted':
envir2pygments = dict(
pyshell='python',
py='python', cy='cython', f='fortran',
c='c', cpp='c++', sh='bash', rst='rst',
m ='matlab', pl='perl', swig='c++',
latex='latex', html='html', js='js',
java='java',
xml='xml', rb='ruby', sys='console',
dat='text', txt='text', csv='text',
ipy='ipy', do='doconce',
# pyopt and pysc are treated in latex.py
)
# Find substitutes for ipy and doconce if these lexers
# are not installed
# (third-party repos, does not come with pygments, but
# warnings have been issued by doconce format, with
# URLs to where the code can be obtained)
from pygments.lexers import get_lexer_by_name
try:
get_lexer_by_name('ipy')
except:
envir2pygments['ipy'] = 'python'
try:
get_lexer_by_name('doconce')
except:
envir2pygments['do'] = 'text'
legal_lexers = get_legal_pygments_lexers()
for user_envir in user_envirs:
if user_envir in envir2pygments:
pass
elif user_envir in legal_lexers:
envir2pygments[user_envir] = user_envir
if envir == 'envir':
leftmargin = '7' # mm
for arg in sys.argv[1:]:
if arg.startswith('--minted_leftmargin='):
leftmargin = arg.split('=')[1]
for lang in envir2pygments:
# mathescape can be used with minted and lstlisting
# see http://tex.stackexchange.com/questions/149710/how-to-write-math-symbols-in-a-verbatim, minted can only have math in comments within the code
# but mathescape make problems with bash and $#
# (can perhaps be fixed with escapechar=... but
# I haven't found out)
if lang != 'sh':
begin = '\\' + 'begin{minted}[fontsize=\\fontsize{9pt}{9pt},linenos=false,mathescape,baselinestretch=1.0,fontfamily=tt,xleftmargin=%smm]{' % leftmargin + envir2pygments[lang] + '}'
else:
begin = '\\' + 'begin{minted}[fontsize=\\fontsize{9pt}{9pt},linenos=false,baselinestretch=1.0,fontfamily=tt,xleftmargin=%smm]{' % leftmargin + envir2pygments[lang] + '}'
end = '\\' + 'end{minted}'
envir_user_spec.append((lang, begin, end))
else:
for lang in envir2pygments:
if envir.startswith(lang + 'cod') or \
envir.startswith(lang + 'pro'):
begin = '\\' + 'begin{' + value + '}{' \
+ envir2pygments[lang] + '}'
end = '\\' + 'end{' + value + '}'
envir_user_spec.append((envir, begin, end))
elif value.startswith('ans'):
# Mapping from code envirs to valid anslistings names
envir2listings = dict(
pyshell='python',
py='python', cy='python', f='fortran',
cpp='c++', sh='bash', swig='swigcode',
ufl='uflcode', m='matlab', c='c++',
latex='latexcode', xml='xml',
pyopt='python', ipy='python')
if envir == 'envir':
for lang in envir2listings:
language = envir2listings[lang]
if value.endswith(':nt'):
language += ':nt'
begin = '\\' + 'begin{' + language + '}'
end = '\\' + 'end{' + language + '}'
envir_user_spec.append((lang, begin, end))
else:
for lang in envir2listings:
if envir.startswith(lang + 'cod') or \
envir.startswith(lang + 'pro'):
lang = envir2listings[lang]
if value.endswith(':nt'):
lang += ':nt'
begin = '\\' + 'begin{' + lang + '}'
end = '\\' + 'end{' + lang + '}'
envir_user_spec.append((envir, begin, end))
else:
# value is not minted or ans*
options = ''
if value == 'Verbatim':
# provide lots of options
options = r'[numbers=none,fontsize=\fontsize{9pt}{9pt},baselinestretch=0.95,xleftmargin=0mm]'
elif value == 'Verbatim-0.85':
# provide lots of options
options = r'[numbers=none,fontsize=\fontsize{9pt}{9pt},baselinestretch=0.85,xleftmargin=0mm]'
elif value == 'Verbatim-indent':
options = r'[numbers=none,fontsize=\fontsize{9pt}{9pt},baselinestretch=0.95,xleftmargin=8mm]'
begin = '\\' + 'begin{' + value + '}' + options
end = '\\' + 'end{' + value + '}'
if envir == 'envir':
for e in user_envirs:
envir_user_spec.append((e, begin, end))
else:
envir_user_spec.append((envir, begin, end))
# Find which environments that will be defined and which
# latex packages that must be included.
ans = ['c++', 'c', 'fortran', 'python', 'cython', 'xml',
'bash', 'swigcode', 'uflcode', 'matlab', 'progoutput',
'latexcode', 'anycode']
ans = ans + [i+':nt' for i in ans]
package2envir = dict(fancyvrb='Verbatim', anslistings=ans, minted='minted')
latexenvir2package = {}
for package in package2envir:
if isinstance(package2envir[package], list):
for latexenvir in package2envir[package]:
latexenvir2package[latexenvir] = package
else: # str
latexenvir2package[package2envir[package]] = package
#print 'envir_user_spec:' #
#import pprint; pprint.pprint(envir_user_spec)
#print 'latex2envir2package:'; pprint.pprint(latexenvir2package)
# Run through user's specifications and grab latexenvir from
# end = \end{latexenvir}, find corresponding package and add to set
packages = set()
for envir, begin, end in envir_user_spec:
m = re.search(r'\\end\{(.+?)\}', end)
if m:
latexenvir = m.group(1)
if latexenvir in latexenvir2package:
packages.add(latexenvir2package[latexenvir])
else:
print 'No package known for latex environment "%s" ' % latexenvir
packages = list(packages)
# fancyvrb is needed for \code{...} -> \Verb!...! translation
if not 'fancyvrb' in packages:
packages.append('fancyvrb')
#print 'packages:'; pprint.pprint(packages)
# Run preprocess
if not preprocess_options:
if 'minted' in packages:
preprocess_options += ['-DMINTED']
if '-DMINTED' in preprocess_options and 'minted' in packages:
packages.remove('minted') # nicer with just one \usepackage{minted}
output_filename = filename + '.tex'
cmd = 'preprocess %s %s > %s' % \
(' '.join(preprocess_options),
filename + '.p.tex',
output_filename)
system(cmd, failure_info="""
preprocess failed or is not installed;
download preprocess from http://code.google.com/p/preprocess""")
# Mimic ptex2tex by replacing all code environments by
# a plain verbatim command
f = open(output_filename, 'r')
filestr = f.read()
f.close()
# Replace the environments specified by the user
from latex import fix_latex_command_regex
for envir, begin, end in envir_user_spec:
for postfix in ['cod', 'pro', '']:
ptex2tex_begin = '\\' + 'b' + envir + postfix
ptex2tex_end = '\\' + 'e' + envir + postfix
begin_pattern = r'^\%s$' % ptex2tex_begin
end_pattern = r'^\%s$' % ptex2tex_end
if re.search(fix_latex_command_regex(begin_pattern),
filestr, flags=re.MULTILINE):
filestr = re.sub(
begin_pattern,
fix_latex_command_regex(begin, application='replacement'),
filestr, flags=re.MULTILINE)
filestr = re.sub(
end_pattern,
fix_latex_command_regex(end, application='replacement'),
filestr, flags=re.MULTILINE)
print '%s (!bc %s) -> %s\n' % (ptex2tex_begin, envir, begin)
# Replace other environments by a default choice
begin = r"""\begin{Verbatim}[numbers=none,fontsize=\fontsize{9pt}{9pt},baselinestretch=0.95]"""
end = r"""\end{Verbatim}"""
#begin = r"""\begin{quote}\begin{verbatim}"""
#end = r"""\end{verbatim}\end{quote}"""
for envir in user_envirs:
ptex2tex_begin = '\\' + 'b' + envir
ptex2tex_end = '\\' + 'e' + envir
begin_pattern = r'^\%s$' % ptex2tex_begin
end_pattern = r'^\%s$' % ptex2tex_end
if re.search(fix_latex_command_regex(begin_pattern),
filestr, flags=re.MULTILINE):
filestr = re.sub(
begin_pattern,
fix_latex_command_regex(begin, application='replacement'),
filestr, flags=re.MULTILINE)
filestr = re.sub(
end_pattern,
fix_latex_command_regex(end, application='replacement'),
filestr, flags=re.MULTILINE)
print '%s (!bc %s) -> %s ("%s" is unsupported so we use Verbatim)\n' % (ptex2tex_begin, envir, begin, envir)
# Make sure we include the necessary verbatim packages
if packages:
filestr = filestr.replace(r'\usepackage{ptex2tex}',
r'\usepackage{%s} %% packages needed for verbatim environments' %
(','.join(packages)))
else:
filestr = filestr.replace(r'\usepackage{ptex2tex}', '')
# Copy less well-known latex packages to the current directory
stylefiles = [name for name in ['minted', 'anslistings', 'fancyvrb']
if name in packages]
# preprocess is run so we can check which less known packages
# that are required
less_known_packages = ['mdframed', 'titlesec',] # more?
#stylefiles += less_known_packages
copy_latex_packages(stylefiles)
if 'minted' in packages:
failure, output = commands.getstatusoutput('pygmentize')
if failure:
print 'You have requested the minted latex style, but this'
print 'requires the pygments package to be installed. On Debian/Ubuntu: run'
print 'Terminal> sudo apt-get install python-pygments'
print 'Or'
print 'Terminal> hg clone http://bitbucket.org/birkenfeld/pygments-main pygments'
print 'Terminal> cd pygments; sudo python setup.py install'
_abort()
filestr = replace_code_command(filestr)
f = open(output_filename, 'w')
f.write(filestr)
f.close()
print 'output in', output_filename
def replace_code_command(filestr):
"""Replace \code{...} by \Verb!...! or \textttt{...}."""
# Remove one newline (two implies far too long inline verbatim)
pattern = re.compile(r'\\code\{([^\n}]*?)\n(.*?)\}', re.DOTALL)
# (this pattern does not handle \code{...} with internal } AND \n!)
filestr = pattern.sub(r'\code{\g<1> \g<2>}', filestr)
verb_command = 'Verb' # requires fancyvrb package, otherwise use std 'verb'
verb_delimiter = '!'
alt_verb_delimiters = '?', '@', '|' # can't use ~,%,#,$,^,&,* in latex headings
cpattern = re.compile(r"""\\code\{(.*?)\}([ \n,.;:?!)"'-])""", re.DOTALL)
# Check if the verbatim text contains verb_delimiter and make
# special solutions for these first
alt_verb_delimiter = None
for verbatim, dummy in cpattern.findall(filestr):
if verb_delimiter in verbatim:
for delimiter in alt_verb_delimiters:
if delimiter not in verbatim:
alt_verb_delimiter = delimiter
break
if alt_verb_delimiter is None:
alt_verb_delimiter = alt_verb_delimiters[0]
print """
*** warning: inline verbatim "%s"
contains all delimiters %s that the LaTeX
command \\Verb can make use of - be prepared for strange output that
requires manual editing (or use doconce replace/subst) of
\\Verb%s%s%s
or move this line verbatim expression to a code block !bc ... !ec.
""" % (verbatim, [verb_delimiter] + list(alt_verb_delimiter),
alt_verb_delimiter, verbatim, alt_verb_delimiter)
# Here one can have a problem in that verbatim contains
# special regex chars such as $, \, ., etc. Use re.escape
pattern = re.escape(r'\code{%s}' % verbatim) + r"""([ \n,.;:?!)"'-])"""
replacement = r'\\%s%s%s%s\g<1>' % \
(verb_command, alt_verb_delimiter, verbatim, alt_verb_delimiter)
# Note: A previous occurence in cpattern.findall may have performed
# this substitution
filestr = re.sub(pattern, replacement, filestr, flags=re.DOTALL)
# Exceptional cases are dealt with, proceed with the standard case
filestr = cpattern.sub(r'\\%s%s\g<1>%s\g<2>' %
(verb_command, verb_delimiter, verb_delimiter),
filestr)
'''
# If fontsize is part of the \Verb command (which is not wise, since
# explicit fontsize is not suitable for section headings),
# first handle combination of \protect and \code
fontsize = 10 # should be configurable from the command line
cpattern = re.compile(r"""\\protect\s*\\code\{(.*?)\}([ \n,.;:?!)"'-])""", re.DOTALL)
filestr = cpattern.sub(r'{\\fontsize{%spt}{%spt}\protect\\%s!\g<1>!}\g<2>' %
(fontsize, fontsize, verb_command), filestr)
# Handle ordinary \code
cpattern = re.compile(r"""\\code\{(.*?)\}([ \n,.;:?!)"'-])""", re.DOTALL)
filestr = cpattern.sub(r'{\\fontsize{%spt}{%spt}\\%s!\g<1>!}\g<2>' %
(fontsize, fontsize, verb_command), filestr)
'''
# \Verb!...! does not cause linebreak in latex, therefore shift to \texttt{}
# where possible since this will reduce overfull hboxes
filestr = re.sub(r'\\protect\s*\\Verb!([^{}_$\^#%&\\]+?)!',
r'\\texttt{\g<1>}', filestr)
filestr = re.sub(r'\\Verb!([^{}_$\^#%&\\]+?)!',
r'\\texttt{\g<1>}', filestr)
return filestr
def _usage_grab():
print 'Usage: doconce grab --from[-] from-text [--to[-] to-text] file'
def grab():
"""
Grab a portion of text from a file, starting with from-text
(included if specified as --from, not included if specified
via --from-) up to the first occurence of to-text (--to implies
that the last line is included, --to_ excludes the last line).
If --to[-] is not specified, all text up to the end of the file
is returned.
from-text and to-text are specified as regular expressions.
"""
if len(sys.argv) < 4:
_usage_grab()
sys.exit(0)
filename = sys.argv[-1]
if not sys.argv[1].startswith('--from'):
print 'missing --from fromtext or --from_ fromtext option on the command line'
_abort()
from_included = sys.argv[1] == '--from'
from_text = sys.argv[2]
# Treat --to
# impossible text (has newlines) that will never be found
# is used as to-text if this is not specified
impossible_text = '@\n\n@'
try:
to_included = sys.argv[3] == '--to'
to_text = sys.argv[4]
except IndexError:
to_included = True
to_text = impossible_text
from_found = False
to_found = False
copy = False
lines = [] # grabbed lines
for line in open(filename, 'r'):
m_from = re.search(from_text, line)
m_to = re.search(to_text, line)
if m_from and not from_found:
copy = True
from_found = True
if from_included:
lines.append(line)
elif m_to:
copy = False
to_found = True
if to_included:
lines.append(line)
elif copy:
lines.append(line)
if not from_found:
print 'Could not find match for from regex "%s"' % from_text
sys.exit(1)
if not to_found and to_text != impossible_text:
print 'Could not find match for to regex "%s"' % to_text
sys.exit(1)
print ''.join(lines).rstrip()
def remove_text(filestr, from_text, from_included, to_text, to_included):
"""
Remove a portion of text from the string filestr.
See remove() for explanation of arguments.
"""
impossible_text = '@\n\n@' # must be compatible with remove()
from_found = False
to_found = False
remove = False
lines = [] # survived lines
for line in filestr.splitlines():
m_from = re.search(from_text, line)
m_to = re.search(to_text, line)
if m_from:
remove = True
from_found = True
if not from_included:
lines.append(line)
elif m_to:
remove = False
to_found = True
if not to_included:
lines.append(line)
elif not remove:
lines.append(line)
return '\n'.join(lines).rstrip() + '\n', from_found, to_found
def _usage_remove():
print 'Usage: doconce remove --from[-] from-text [--to[-] to-text] file'
def remove():
"""
Remove a portion of text from a file, starting with from-text
(included if specified as --from, not included if specified
via --from-) up to the first occurence of to-text (--to implies
that the last line is included, --to_ excludes the last line).
If --to[-] is not specified, all text up to the end of the file
is returned.
from-text and to-text are specified as regular expressions.
"""
if len(sys.argv) < 4:
_usage_remove()
sys.exit(0)
filename = sys.argv[-1]
f = open(filename, 'r')
filestr = f.read()
f.close()
if not sys.argv[1].startswith('--from'):
print 'missing --from fromtext or --from_ fromtext option on the command line'
sys.exit(1)
from_included = sys.argv[1] == '--from'
from_text = sys.argv[2]
# Treat --to
# impossible text (has newlines) that will never be found
# is used as to-text if this is not specified
impossible_text = '@\n\n@'
try:
to_included = sys.argv[3] == '--to'
to_text = sys.argv[4]
except IndexError:
to_included = True
to_text = impossible_text
filestr, from_found, to_found = remove_text(
filestr, from_text, from_included, to_text, to_included)
if not from_found:
print 'Could not find match for from regex "%s"' % from_text
sys.exit(1)
if not to_found and to_text != impossible_text:
print 'Could not find match for to regex "%s"' % to_text
sys.exit(1)
os.rename(filename, filename + '.old~~')
f = open(filename, 'w')
f.write(filestr)
f.close()
def _usage_remove_exercise_answers():
print 'Usage: doconce remove_exercise_answers file_in_some_format'
def remove_exercise_answers():
if len(sys.argv) < 2:
_usage_remove_exercise_answers()
sys.exit(0)
filename = sys.argv[1]
f = open(filename, 'r')
filestr = f.read()
f.close()
envirs = ['solution of exercise', 'short answer in exercise']
from_texts = [r'--- begin ' + envir for envir in envirs]
to_texts = [r'--- end ' + envir for envir in envirs]
for from_text, to_text in zip(from_texts, to_texts):
filestr, from_found, to_found = remove_text(
filestr, from_text, True, to_text, True)
if from_found and to_found:
pass
else:
print 'no answers/solutions to exercises found in', filename
os.rename(filename, filename + '.old~~')
f = open(filename, 'w')
f.write(filestr)
f.close()
def clean():
"""
Remove all DocOnce-generated files and the Trash dir if it exists.
Place new removed files in Trash.
For example, if ``d1.do.txt`` and ``d2.do.txt`` are found,
all files ``d1.*`` and ``d1.*`` are deleted, except when ``*``
is ``.do.txt`` or ``.sh``. The subdirectories ``sphinx-*``,
``sphinx_*``, ``html_images``, ``latex_figs``, and
``standalone_exercises`` are also removed,
as well as all ``*~`` and ``tmp*`` files and all files made from
splitting (split_html, split_rst).
"""
if os.path.isdir('Trash'):
print
shutil.rmtree('Trash')
print 'Removing Trash directory'
removed = []
trash_files = ['_doconce_debugging.log', '__tmp.do.txt', 'texput.log']
# "secret" files (.trash$hash)
trash_files += glob.glob('.trash[a-f]*') + glob.glob('._.trash[a-f]*')
for trash_file in trash_files:
if os.path.isfile(trash_file):
removed.append(trash_file)
doconce_files = glob.glob('*.do.txt')
for dof in doconce_files:
namestem = dof[:-7]
generated_files = glob.glob(namestem + '.*')
extensions_to_keep = '.sh', '.do.txt'
#print 'generated_files:', namestem + '.*', generated_files
for ext in extensions_to_keep:
filename = namestem + ext
if os.path.isfile(filename):
generated_files.remove(filename)
for f in generated_files:
removed.append(f)
removed.extend(glob.glob('*~') + glob.glob('.*~') + glob.glob('tmp*') +
glob.glob(_part_filename_wildcard + '.html') +
glob.glob(_part_filename_wildcard + '.rst') +
glob.glob('.*.exerinfo') +
glob.glob('.*.quiz*') +
glob.glob('.*_html_file_collection'))
directories = ['html_images', 'latex_figs', 'standalone_exercises'] \
+ glob.glob('sphinx-*') + \
glob.glob('sphinx_*')
for d in directories:
if os.path.isdir(d):
removed.append(d)
if removed:
print 'Remove:', ' '.join(removed), '(-> Trash)'
os.mkdir('Trash')
for f in removed:
try:
shutil.move(f, 'Trash')
except shutil.Error, e:
if 'already exists' in str(e):
pass
else:
print 'Move problems with', f, e
if os.path.isdir(f):
shutil.rmtree(f)
def _usage_guess_encoding():
print 'Usage: doconce guess_encoding filename'
def _encoding_guesser(filename, verbose=False):
"""Try to guess the encoding of a file."""
f = open(filename, 'r')
text = f.read()
f.close()
encodings = ['ascii', 'us-ascii', 'iso-8859-1', 'iso-8859-2',
'iso-8859-3', 'iso-8859-4', 'cp37', 'cp930', 'cp1047',
'utf-8', 'utf-16', 'windows-1250', 'windows-1252',]
for encoding in encodings:
try:
if verbose:
print 'Trying encoding', encoding, 'with unicode(text, encoding)'
unicode(text, encoding, "strict")
except Exception, e:
if verbose:
print 'failed:', e
else:
break
return encoding
def guess_encoding():
if len(sys.argv) != 2:
_usage_guess_encoding()
sys.exit(0)
filename = sys.argv[1]
print _encoding_guesser(filename, verbose=False)
def _usage_change_encoding():
print 'Usage: doconce change_encoding from-encoding to-encoding file1 file2 ...'
print 'Example: doconce change_encoding utf-8 latin1 myfile.do.txt'
def _change_encoding_unix(filename, from_enc, to_enc):
backupfile = filename + '.old~~'
if sys.platform == 'linux2':
cmd = 'iconv -f %s -t %s %s --output %s' % \
(from_enc, to_enc, backupfile, filename)
elif sys.platform == 'darwin':
cmd = 'iconv -f %s -t %s %s > %s' % \
(from_enc, to_enc, backupfile, filename)
else:
print 'changing encoding is not implemented on Windows machines'
_abort()
os.rename(filename, backupfile)
failure = system(cmd, abort_on_failure=False)
if failure:
# Restore file
shutil.copy(backupfile, filename)
os.remove(backupfile)
def _change_encoding_python(filename, from_enc, to_enc):
f = codecs.open(filename, 'r', from_enc)
text = f.read()
f.close()
f = codecs.open(filename, 'w', to_enc)
f.write(text)
f.close()
def change_encoding():
if len(sys.argv) < 4:
_usage_change_encoding()
sys.exit(0)
from_encoding = sys.argv[1]
to_encoding = sys.argv[2]
filenames = wildcard_notation(sys.argv[3:])
for filename in filenames:
_change_encoding_unix(filename, from_encoding, to_encoding)
# Perhaps better alternative with pure Python:
#_change_encoding_python(filename, from_encoding, to_encoding)
html_images = 'html_images.zip'
reveal_files = 'reveal.js.zip'
csss_files = 'csss.zip'
deck_files = 'deck.js.zip'
latexstyle_files = 'latex_styles.zip'
def html_imagefile(imagename):
filename = os.path.join(html_images[:-4], imagename + '.png')
return filename
def copy_datafiles(datafile):
"""
Get a doconce datafile, ``files.zip`` or ``files.tar.gz``, to
the current directory and pack it out unless the subdirectory
``files`` (with all the files) already exists.
"""
if datafile.endswith('.zip'):
subdir = datafile[:-4]
import zipfile
uncompressor = zipfile.ZipFile
elif datafile.endswith('.tar.gz'):
subdir = datafile[:-7]
import tarfile
uncompressor = tarfile.TarFile
if not os.path.isdir(subdir):
import doconce
doconce_dir = os.path.dirname(doconce.__file__)
doconce_datafile = os.path.join(doconce_dir, datafile)
shutil.copy(doconce_datafile, os.curdir)
uncompressor(datafile).extractall()
print 'made subdirectory', subdir
os.remove(datafile)
return True
else:
return False
def _usage_html_colorbullets():
print 'Usage: doconce html_colorbullets mydoc.html'
def html_colorbullets():
# A much better implementation, avoiding tables, is given
# here: http://www.eng.buffalo.edu/webguide/Bullet_Lists.html
"""
Replace unordered lists by a table, in order to replace
``<li>`` tags (and the default bullets) by
images of balls with colors.
"""
if len(sys.argv) <= 1:
_usage_html_collorbullets()
sys.exit(0)
red_bullet = 'bullet_red2.png'
green_bullet = 'bullet_green2.png'
#red_bullet = 'bullet_red1.png'
#green_bullet = 'bullet_green1.png'
filenames = sys.argv[1:]
for filename in filenames:
f = open(filename, 'r')
text = f.read()
f.close()
#if '<li>' in text:
# copy_datafiles(html_images) # copy html_images subdir if needed
lines = text.splitlines()
f = open(filename, 'w')
level = 0
for line in lines:
linel = line.lower()
if '<ul>' in linel:
level += 1
line = '<p><table border="0">\n'
if '</ul>' in linel:
line = '</td></tr></table>\n'
level -= 1
if '<li>' in linel:
line = line.replace('<li>', """</tr><p><tr><td valign='top'><img src="BULLET"></td><td>""")
if level == 1:
#image_filename = html_imagefile(red_bullet)
image_filename = 'http://hplgit.github.io/doconce/bundled/html_images/' + red_bullet
elif level >= 2:
#image_filename = html_imagefile(green_bullet)
image_filename = 'http://hplgit.github.io/doconce/bundled/html_images/' + green_bullet
line = line.replace('BULLET', image_filename)
f.write(line + '\n')
f.close()
def _usage_split_html():
print """\
Usage: doconce split_html mydoc.html --method=... --nav_button=name --pagination --acknowledgment="..." --font_size=slides'
--method=split|space8|hrule|colorline specifies pagebreak
physical split with a new page (--method=split) or
just N blank lines (--method=spaceN) or a horizontal
rule (--method=hrule) with blank lines above and below, or
a colored line (--method=colorline).
Default is --method=split.
--nav_button=name sets the type of navigation button (next, previous):
text, gray1 (default), gray2, bigblue, blue, green.
See (https://raw.github.com/hplgit/doconce/master/doc/src/manual/fig/nav_buttons.png
for examples on these types (from left to right).
A value like -nav_button=gray2,top gives buttons only at the top of the page,
gray2,top+bottom gives buttons at the top and bottom (default), while
gray2,bottom gives buttons only at the bottom.
If the "doconce format html" command used bootstrap styles (with
--html_style=bootstrap*|bootswatch*), set just --nav_button=top or
bottom (default) or top+bottom.
--pagination means that one can click on pages at the button
if a bootstrap theme is used in the document.
--font_size= is used to increase the font size for slides.
--font_size=slides gives 140% font size in the body text.
--font_size=180 gives 180% font size in the body text.
--reference=... is used to insert a reference for acknowledging where
the source of the text is published, typically the reference of a
book if the document is the HTML version of a chapter in the book.
Example:
--reference="This text is taken from Appendix H.2 in the book <em>A Primer on Scientific Programming with Python</em> by H. P. Langtangen, 4th edition, Springer, 2014."
"""
def split_html():
"""
Split html file into parts. Use !split command as separator between
parts.
"""
if len(sys.argv) <= 1:
_usage_split_html()
sys.exit(0)
filename = sys.argv[1]
if not filename.endswith('.html'):
basename = filename
filename += '.html'
else:
basename = filename[:-5]
method = misc_option('method=', 'split')
# Note: can only do tablify and support slidecell specifications
# if --method=split (tablify requires the file split into parts)
if method != 'split':
# Load text
f = open(filename, 'r')
filestr = f.read()
f.close()
if method.startswith('space'):
if len(method) > len('space'):
num_lines = int(method[5:])
else:
num_lines = 8
filestr = filestr.replace(
'<!-- !split -->',
'<!-- !split -->' + '<br>'*num_lines)
elif method in ('hr', 'hrule'):
filestr = filestr.replace(
'<!-- !split -->',
'<!-- !split -->' + '<br><br><br><hr><br><br><br>')
elif method in ('colorline',):
filestr = filestr.replace(
'<!-- !split -->',
'<!-- !split -->' + '<br><br><br><br><br><br><img src="%s"><br><br>'
% 'http://hplgit.github.io/doconce/bundled/html_images/colorline.png')
else:
header, parts, footer = get_header_parts_footer(filename, "html")
parts = tablify(parts, "html")
files = doconce_split_html(header, parts, footer, basename, filename)
print '%s now links to the generated files' % filename
print ', '.join(files)
if method != 'split':
# Remove notes
filestr = re.sub(r'^<!-- !bnotes.+?^<!-- !enotes -->', '',
filestr, flags=re.MULTILINE|re.DOTALL)
'''
# Fix font size for solarized *slides* - won't do this so it affects
# all kind of documents. And if method != 'split', we have one
# file and can crank up the font in the browser once and for all.
if re.search(r"""<link href=["']http.+?solarized.*?\.css""", filestr):
filestr = filestr.replace(r'<style type="text/css">',
"""<style type="text/css">
body, td {font-size: 140%;}
h1 {font-size: 200%;}
h2 {font-size: 180%;}
""")
'''
f = open(filename, 'w')
f.write(filestr)
f.close()
if '<!-- !bslidecell' in filestr:
print '*** warning: !bslidecell-!eslidecell constructions are'
print ' ignored unless --method=split is specified'
print ' (--method=spaceX|hr|hrule|colorline all ignores cells)'
def _usage_slides_html():
print """
Usage: doconce slides_html mydoc.html slide_type --html_slide_theme=themename --html_footer_logo=name --nav_button=name --font_size=slides
slide_type: reveal deck csss dzslides
note: reveal and deck slide styles are doconce variants, different from the
original styles
(note: remark style is not generated by slides_html, but by slides_markdown)
alternative: doconce slides_html mydoc.html all (generate all types of slides)
themename is the reveal or deck theme:
reveal.js: beige, beigesmall, solarized, serif, simple, blood, sky,
moon, night, moon, darkgray, cbc, simula
deck.js: neon, sandstone.aurora, sandstone.dark, sandstone.mdn,
sandstone.mightly, sandstone.firefox, sandstone.default,
sandstone.light, beamer, mnml, swiss, web-2.0, cbc
(The generated HTML file contains a comment with link tags for the
the various stylesheets for the various available themes.)
--html_footer_logo=name sets the footer logo to be used, name is
a full URL to the logo image file,
or cbc_footer, cbc_symbol, simula_footer, simula_symbol,
uio_footer, uio_symbol (for which the full path is automatically created)
--nav_button=name sets the type of navigation button (next, previous):
text, gray1 (default), gray2, bigblue, blue, green.
See (https://raw.github.com/hplgit/doconce/master/doc/src/manual/fig/nav_buttons.png
for examples on these types (from left to right).
A value like gray2,top gives buttons only at the top of the page,
gray2,top+bottom gives buttons at the top and bottom (default), while
gray2,bottom gives buttons only at the bottom.
If the "doconce format html" command used bootstrap styles (with
--html_style=bootstrap*|bootswatch*), set just --nav_button=top or
bottom (default) or top+bottom.
--font_size= is used to increase the font size for slides.
--font_size=slides gives 140% font size in the body text.
--font_size=180 gives 180% font size in the body text.
--pagination means that one can click on page numbers if a bootstrap
theme is used in the document.
Note: if slide_tp is doconce, the doconce split_html command is
more versatile than slides_html since it allows the --method
argument, which can be used for physical splits (as in slides_html)
or "split" via just space or rules for separating the parts in
one (big) file.
"""
def slides_html():
"""
Split html file into slides and typeset slides using
various tools. Use !split command as slide separator.
"""
# Overview: http://www.impressivewebs.com/html-slidedeck-toolkits/
# Overview: http://www.sitepoint.com/5-free-html5-presentation-systems/
# x http://leaverou.github.com/CSSS/
# x http://lab.hakim.se/reveal-js/ (easy and fancy)
# x http://paulrouget.com/dzslides/ (easy and fancy, Keynote like)
# http://imakewebthings.com/deck.js/ (also easy)
# http://code.google.com/p/html5slides/ (also easy)
# http://slides.seld.be/?file=2010-05-30+Example.html#1 (also easy)
# http://www.w3.org/Talks/Tools/Slidy2/#(1) (also easy)
# http://johnpolacek.github.com/scrolldeck.js/ (inspired by reveal.js)
# http://meyerweb.com/eric/tools/s5/ (easy)
# https://github.com/mbostock/stack (very easy)
# https://github.com/markdalgleish/fathom
# http://shama.github.com/jmpress.js/#/home # jQuery version of impress
# https://github.com/bartaz/impress.js/
# Fancy and instructive demo:
# http://yihui.name/slides/2011-r-dev-lessons.html
# (view the source code)
# pandoc can make dzslides and embeds all javascript (no other files needed)
# pandoc -s -S -i -t dzslides --mathjax my.md -o my.html
if len(sys.argv) <= 2:
_usage_slides_html()
sys.exit(0)
filename = sys.argv[1]
if not filename.endswith('.html'):
filename += '.html'
if not os.path.isfile(filename):
print 'doconce file in html format, %s, does not exist' % filename
_abort()
basename = os.path.basename(filename)
filestem = os.path.splitext(basename)[0]
slide_type = sys.argv[2]
for arg in sys.argv[1:]:
if arg.startswith('--method='):
opt = arg.split('=')[1]
if opt != 'split':
print '*** error: slides_html cannot accept --method=%s' % opt
print ' (the slides will always be split)'
print ' use split_html with --method=...'
_abort()
# Treat the special case of generating a script for generating
# all the different slide versions that are supported
if slide_type == 'all':
#from doconce.misc import recommended_html_styles_and_pygments_styles
r = recommended_html_styles_and_pygments_styles()
f = open('tmp_slides_html_all.sh', 'w')
f.write('#!/bin/sh\n\n')
f.write('doconce format html %s SLIDE_TYPE=dummy SLIDE_THEME=dummy\ndoconce slides_html %s doconce\n\n' %
(filestem, filestem))
for sl_tp in r:
for style in r[sl_tp]:
pygm_style = r[sl_tp][style][0]
if sl_tp == 'html':
if style.startswith('solarized'):
f.write('doconce format html %s SLIDE_TYPE=%s SLIDE_THEME=%s --html_style=%s --html_output=%s_html_%s\ndoconce slides_html %s_html_%s doconce --nav_button=gray2,bottom --font_size=slides\n\n' % (filestem, sl_tp, style, style, filestem, style, filestem, style))
if style == 'solarized3':
f.write('doconce format html %s SLIDE_TYPE=%s SLIDE_THEME=%s --html_style=%s --html_output=%s_html_%s_space\ndoconce split_html %s_html_solarized3_space --method=space10\n\n' % (filestem, sl_tp, style, style, filestem, style, filestem))
else:
method = 'colorline' if style == 'blueish' else 'space8'
f.write('doconce format html %s --pygments_html_style=%s --keep_pygments_html_bg SLIDE_TYPE=%s SLIDE_THEME=%s --html_style=%s --html_output=%s_html_%s\ndoconce split_html %s_html_%s --method=%s # one long file\n\n' % (filestem, pygm_style, sl_tp, style, style, filestem, style, filestem, style, method))
else:
f.write('doconce format html %s --pygments_html_style=%s --keep_pygments_html_bg SLIDE_TYPE=%s SLIDE_THEME=%s\ndoconce slides_html %s %s --html_slide_theme=%s\ncp %s.html %s_%s_%s.html\n\n' % (filestem, pygm_style, sl_tp, style, filestem, sl_tp, style, filestem, filestem, sl_tp, style.replace('.', '_')))
f.write('echo "Here are the slide shows:"\n/bin/ls %s_*_*.html\n' % filestem)
print 'run\n sh tmp_slides_html_all.sh\nto generate the slides'
#print 'names:', ' '.join(glob.glob('%s_*_*.html' % filestem))
return
# --- Create a slide presentation from the HTML file ---
header, parts, footer = get_header_parts_footer(filename, "html")
parts = tablify(parts, "html")
filestr = None
if slide_type == 'doconce':
doconce_split_html(header, parts, footer, filestem, filename, slides=True)
elif slide_type in ('reveal', 'csss', 'dzslides', 'deck', 'html5slides'):
filestr = generate_html5_slides(header, parts, footer,
basename, filename, slide_type)
else:
print 'unknown slide type "%s"' % slide_type
if filestr is not None:
# Make whitespace nicer (clean up code)
from html import html_remove_whitespace
filestr = html_remove_whitespace(filestr)
# More fixes for html5 slides
filestr = re.sub(r'<section>\s+(?=<h[12])', r'<section>\n', filestr)
filestr = re.sub(r'<p>\n</section>', '</section>', filestr)
filestr = re.sub(r'\s+</section>', '\n</section>', filestr)
from html import html_remove_whitespace
filestr = html_remove_whitespace(filestr)
# More fixes for html5 slides
filestr = re.sub(r'<section>\s+(?=<h[12])', r'<section>\n', filestr)
filestr = re.sub(r'<p>\n</section>', '</section>', filestr)
filestr = re.sub(r'\s+</section>', '\n</section>', filestr)
f = open(filename, 'w')
f.write(filestr)
f.close()
print 'slides written to', filename
def tablify(parts, format="html"):
"""
Detect !bslidecell XY and !eslidecell environments and typeset
elements of a part (slide page) as a table.
"""
begin_comment, end_comment = _format_comments(format)
for i in range(len(parts)):
part = ''.join(parts[i])
if '%s !bslidecell' % begin_comment in part:
pattern = r'%s !bslidecell +(\d\d) *([.0-9 ]*?)%s\s+(.+?)%s !eslidecell *%s' % (begin_comment, end_comment, begin_comment, end_comment)
pattern00 = r'%s !bslidecell +00 *[.0-9 ]*?%s\s+(.+?)%s !eslidecell *%s' % (begin_comment, end_comment, begin_comment, end_comment)
cpattern = re.compile(pattern, re.DOTALL)
cells = cpattern.findall(part)
#print 'CELLS:'; import pprint; pprint.pprint(cells)
data = []
row_max = 0
col_max = 0
for pos, width, entry in cells:
try:
width = float(width)
except:
width = None
ypos = int(pos[0])
xpos = int(pos[1])
if ypos > row_max:
row_max += 1
if xpos > col_max:
col_max += 1
data.append([(ypos, xpos), entry, width])
table = [[None]*(col_max+1) for j in range(row_max+1)]
for r in range(len(table)):
for s in range(len(table[r])):
table[r][s] = ['', None]
#print 'data:', data
for pos, body, width in data:
table[pos[0]][pos[1]] = [body, width]
#print 'table 1:'; import pprint; pprint.pprint(table)
# Check consistency of widths
for r, row in enumerate(table):
widths = []
has_width = False
for column, width in row:
if width is not None:
has_width = True
widths.append(width)
if has_width:
if len(row) != len(widths):
# Can accept if only two columns
if len(row) == 2 and len(widths) == 1:
# Find the missing one
if table[r][0][1] is None:
table[r][0][1] = 1 - widths[0]
elif table[r][1][1] is None:
table[r][1][1] = 1 - widths[0]
else:
print '*** error: must specify width of all columns in slidecell table!'
print ' ',
for s, c in enumerate(row):
column, width = c
print ' %d%d: ' (r, s),
if width is not None:
print 'no width',
else:
print '%g' % width,
_abort()
else:
width = 1./len(row)
for s, c in enumerate(row):
table[r][s][1] = width
#print 'table 2:'; import pprint; pprint.pprint(table)
if format == 'html':
# typeset table in html
tbl = '\n<table border="0">\n'
for row in table:
tbl += '<tr>\n'
for column, width in row:
tbl += '<td class="padding">\n%s</td>\n' % (column)
# This is an attempt to control the width of columns,
# but it does not work well.
#tbl += '<td class="padding"><div style="width: %d%%"> %s </div></td>\n' % (int(100*width), column)
tbl += '</tr>\n'
tbl += '</table>\n'
# Put the whole table where cell 00 was defined
cpattern00 = re.compile(pattern00, re.DOTALL)
#part = cpattern00.sub(tbl, part) # does not preserve math \
part = cpattern00.sub('XXXYYY@#$', part) # some ID and then replace
part = part.replace('XXXYYY@#$', tbl) # since replace handles \
# Let the other cells be empty
part = cpattern.sub('', part)
#print 'part:'; pprint.pprint(part)
part = [line + '\n' for line in part.splitlines()]
parts[i] = part
elif format.endswith('latex'):
# typeset table in beamer latex
tbl = ''
for row in table:
tbl += r'\begin{columns}' + '\n'
for column, width in row:
if width is None:
raise ValueError('Bug: width is None')
tbl += r'\column{%g\textwidth}' % width + \
'\n%s\n' % column
tbl += r'\end{columns}' + '\n'
tbl += '\n'
# Put the whole table where cell 00 was defined
cpattern00 = re.compile(pattern00, re.DOTALL)
#part = cpattern00.sub(tbl, part) # does not preserve math \
part = cpattern00.sub('XXXYYY@#$', part) # some ID and then replace
part = part.replace('XXXYYY@#$', tbl) # since replace handles \
# Let the other cells be empty
part = cpattern.sub('', part)
#print 'part:'; pprint.pprint(part)
part = [line + '\n' for line in part.splitlines()]
parts[i] = part
return parts
def _format_comments(format='html'):
if format == 'html':
return '<!--', '-->'
elif format == 'latex':
return '%', ''
elif format == 'rst' or format == 'sphinx':
return '..', ''
else:
return None, None
def get_header_parts_footer(filename, format='html'):
"""Return list of lines for header, parts split by !split, and footer."""
from doconce import main_content_char
header = []
footer = []
parts = [[]]
if format in ('latex', 'pdflatex', 'html'):
loc = 'header'
else:
loc = 'body' # no header
begin_comment, end_comment = _format_comments(format)
f = open(filename, 'r')
for line in f:
if re.search(r'^%s %s+ main content %s+ ?%s' %
(begin_comment, main_content_char,
main_content_char, end_comment), line):
loc = 'body'
if re.search(r'^%s !split.*?%s' % (begin_comment, end_comment), line):
parts.append([])
if re.search(r'^%s %s+ end of main content %s+ ?%s' %
(begin_comment, main_content_char,
main_content_char, end_comment), line):
loc = 'footer'
if loc == 'header':
header.append(line)
elif loc == 'body':
parts[-1].append(line)
elif loc == 'footer':
footer.append(line)
f.close()
return header, parts, footer
def doconce_split_html(header, parts, footer, basename, filename, slides=False):
"""Native doconce style splitting of HTML file into parts."""
import html
header_str = '\n'.join(header)
bootstrap = '<!-- Bootstrap style: ' in header_str or \
bool(re.search(r'<link .*href=.+?boots(trap|watch).*\.css', header_str))
if bootstrap:
local_navigation_pics = False # navigation is in the template
def bootstrap_navigation(pn, prev_part_filename, next_part_filename):
text = '<!-- navigation buttons at the bottom of the page -->'
if '--pagination' in sys.argv:
# Use Bootstrap pagination
text += '\n<ul class="pagination">\n'
if pn > 0:
text += '<li><a href="%s">«</a></li>\n' % prev_part_filename
max_pagination_pages = 16
#max_pagination_pages = 4 # for debugging
if len(parts) <= max_pagination_pages/2:
# Show all pages
for i in range(len(parts)):
if i == pn:
text += ' <li class="active"><a href="%s">%d</a></li>\n' % (_part_filename % (basename, i) + '.html', i+1)
else:
text += ' <li><a href="%s">%d</a></li>\n' % (_part_filename % (basename, i) + '.html', i+1)
else:
# Show first, last, and pages around the current one
if pn >= max_pagination_pages/2 + 2:
i = 0
text += ' <li><a href="%s">%d</a></li>\n' % (_part_filename % (basename, i) + '.html', i+1)
text += ' <li><a href="">...</a></li>\n'
start = max(0, pn-(max_pagination_pages/2))
stop = min(len(parts), pn+max_pagination_pages/2+2)
if start == 1:
# Special case, add page 1
text += ' <li><a href="%s">%d</a></li>\n' % (_part_filename % (basename, 0) + '.html', 0+1)
for i in range(start, stop):
if i == pn:
text += ' <li class="active"><a href="%s">%d</a></li>\n' % (_part_filename % (basename, i) + '.html', i+1)
else:
text += ' <li><a href="%s">%d</a></li>\n' % (_part_filename % (basename, i) + '.html', i+1)
if pn <= (len(parts) - (max_pagination_pages/2 + 3)):
text += ' <li><a href="">...</a></li>\n'
i = len(parts)-1
text += ' <li><a href="%s">%d</a></li>\n' % (_part_filename % (basename, i) + '.html', i+1)
if pn < len(parts)-1:
text += ' <li><a href="%s">»</a></li>\n' % next_part_filename
text += '</ul>\n'
else:
# Use plain next and prev buttons with arrows, but
# Bootstrap style
text += '\n<ul class="pager">\n'
if pn > 0:
text += """\
<li class="previous">
<a href="%s">← Prev</a>
</li>
""" % prev_part_filename
if pn < len(parts)-1:
text += """\
<li class="next">
<a href="%s">Next →</a>
</li>
""" % next_part_filename
text += '</ul>\n'
return text
else:
local_navigation_pics = False # avoid copying images to subdir...
nav_button = 'gray1'
for arg in sys.argv:
if arg.startswith('--nav_button='):
nav_button = arg.split('=')[1]
break
if bootstrap:
nav_button_pos = 'bottom'
if nav_button in ('top', 'top+bottom'):
nav_button_pos = nav_button
else:
nav_button_pos = 'top+bottom'
if ',' in nav_button:
nav_button, nav_button_pos = nav_button.split(',')
# Values after comma: top, bottom, top+bottom
# Map nav_button name to actual image file in bundled/html_images
prev_button = next_button = ''
if nav_button == 'gray1':
prev_button = 'prev1'
next_button = 'next1'
elif nav_button == 'gray2':
prev_button = 'prev2'
next_button = 'next2'
elif nav_button == 'bigblue':
prev_button = 'prev3'
next_button = 'next3'
elif nav_button == 'blue':
prev_button = 'prev4'
next_button = 'next4'
elif nav_button == 'green':
prev_button = 'Knob_Left'
next_button = 'Knob_Forward'
elif nav_button in ('text', 'top', 'bottom', 'top+bottom'):
pass
else:
print '*** warning: --nav_button=%s is illegal value, text,top+bottom is used' % nav_button
nav_button == 'text'
nav_button_pos = 'top+bottom'
header_part_line = '' # 'colorline'
if local_navigation_pics:
copy_datafiles(html_images) # copy html_images subdir if needed
if prev_button:
prev_button_filename = html_imagefile(prev_button)
next_button_filename = html_imagefile(next_button)
html.add_to_file_collection(prev_button_filename, filename, 'a')
html.add_to_file_collection(next_button_filename, filename, 'a')
else:
if prev_button:
prev_button_filename = 'http://hplgit.github.io/doconce/bundled/html_images/%s.png' % prev_button
next_button_filename = 'http://hplgit.github.io/doconce/bundled/html_images/%s.png' % next_button
# Fix internal links to point to the right splitted file
name_pattern = r' id="([^"]+?)">'
parts_name = [re.findall(name_pattern, ''.join(part)) for part in parts]
parts_name.append(re.findall(name_pattern, ''.join(header)))
parts_name.append(re.findall(name_pattern, ''.join(footer)))
href_pattern = r'<a href="#([^"]+?)"'
parts_href = [re.findall(href_pattern, ''.join(part)) for part in parts]
parts_href.append(re.findall(href_pattern, ''.join(header)))
parts_href.append(re.findall(href_pattern, ''.join(footer)))
# id="..." can also define anchors (e.g., in bootstrap styles)
id_pattern = r' id="([^"]+?)"'
all_ids = [re.findall(id_pattern, ''.join(part)) for part in parts] + \
[re.findall(id_pattern, ''.join(header)),
re.findall(id_pattern, ''.join(footer))]
# Flatten
ids = []
for sublist in all_ids:
for id in sublist:
ids.append(id)
parts_name2part = {} # map a name to where it is defined
for i in range(len(parts_name)):
for name in parts_name[i]:
parts_name2part[name] = i
import pprint
# Substitute hrefs in each part, plus header and footer
for i in range(len(parts_href)):
for name in parts_href[i]:
n = parts_name2part.get(name, None) #part where this name is defined
if n is None and name not in ids:
print '*** error: <a href="#%s" has no corresponding anchor (<a name= or some id=)' % name
print ' Reasons: 1) wrong reference, 2) no BIBFILE, 2) bug in DocOnce.'
_abort()
continue # go to next if abort is turned off
if n is not None and n != i:
# Reference to label in another part, except the header
# and footer (which is included in all parts)
name_def_filename = _part_filename % (basename, n) + '.html'
if i < len(parts):
part = parts[i]
elif i == len(parts):
part = header
elif i == len(parts)+1:
part = footer
text = ''.join(part).replace(
'<a href="#%s"' % name,
'<a href="%s#%s"' % (name_def_filename, name))
# Side effect: will substitute in header and footer
# when it should not. This is fixed when the whole
# file is constructed.
if i < len(parts):
parts[i] = text.splitlines(True)
elif i == len(parts):
header = text.splitlines(True)
elif i == len(parts)+1:
footer = text.splitlines(True)
# Treat \eqref and labels: MathJax does not support references
# to eq. labels in other files.
# Also, skip equation references to external documents.
label_pattern = r'\label\{(.+?)\}' # label in latex equations
parts_label = [re.findall(label_pattern, ''.join(part)) for part in parts]
eqref_pattern = r'\eqref\{(.+?)\}'
ref_pattern = r'ref(ch)?\[([^\]]*?)\]\[([^\]]*?)\]\[([^\]]*?)\]'
parts_eqref = [re.findall(eqref_pattern,
re.sub(ref_pattern, '', ''.join(part)))
for part in parts]
parts_label2part = {} # map an eq. label to where it is defined
for i in range(len(parts_label)):
for label in parts_label[i]:
parts_label2part[label] = i
# Check if there are eqrefs to undefined labels
if misc_option('labelcheck=', 'off') == 'on':
undefined_labels = []
for i in range(len(parts_eqref)):
for label in parts_eqref[i]:
if label not in parts_label2part:
undefined_labels.append(label)
if undefined_labels:
for label in undefined_labels:
print '*** error: equation ref (ref{%s}) but no label{%s}' % (label, label)
print '*** error: found references to undefined equation labels'
print ' (use generalized references ref[][][] if labels are'
print ' defined outside this doconce document)'
_abort()
# Substitute eqrefs in each part.
# MathJax cannot refer to labels in other HTML files.
# We generate tag number for each label, in the right numbering
# and use tags to refer to equations.
# Info on http://stackoverflow.com/questions/16339000/how-to-refer-to-an-equation-in-a-different-page-with-mathjax
# Tags are numbered globally
labels = [] # Hold all labels in a list (not list of list as parts_label)
for i in parts_label:
labels += i
label2tag = {}
for i in range(len(labels)):
label2tag[labels[i]] = i+1
# Go from AMS to non equationNumering in MathJax since we do not
# want any equation without label to have numbers (instead we
# control all numbers here by inserting \tag)
for i in range(len(header)):
if 'autoNumber: "AMS"' in header[i]:
header[i] = header[i].replace('autoNumber: "AMS"', 'autoNumber: "none"')
break
# Insert tags in each part
for i in range(len(parts)):
text = ''.join(parts[i])
if r'\label{' in text:
labels = re.findall(label_pattern, text)
for label in labels:
text = text.replace(
r'\label{%s}' % label,
r'\tag{%s}' % (label2tag[label]))
parts[i] = text.splitlines(True)
# Substitute all \eqrefs (can only have tags, not labels for
# right navigation to an equation)
for i in range(len(parts_eqref)):
for label in parts_eqref[i]:
if not label in parts_label2part:
continue
n = parts_label2part[label] # part where this label is defined
if i < len(parts):
part = parts[i]
text = ''.join(part)
if n != i:
# Reference to equation with label in another file
#print '*** warning: \\eqref{%s} to label in another HTML file will appear as (eq)' % (label)
label_def_filename = _part_filename % (basename, n) + '.html'
text = text.replace(
r'\eqref{%s}' % label,
'<a href="%s#mjx-eqn-%s">(%s)</a>' %
(label_def_filename, label2tag[label], label2tag[label]))
else:
text = text.replace(
r'\eqref{%s}' % label,
'<a href="#mjx-eqn-%s">(%s)</a>' %
(label2tag[label], label2tag[label]))
if i < len(parts):
parts[i] = text.splitlines(True)
generated_files = []
for pn, part in enumerate(parts):
header_copy = header[:]
if bootstrap:
# Highligh first section in this part in the navigation in header
m = re.search(r'<h(1|2|3).*?>(.+?)<', ''.join(part))
if m:
first_header = m.group(2).strip()
for k in range(len(header_copy)):
if 'navigation toc:' in header[k]:
m2 = re.search(r'<li><a href="(.+?)">', header[k])
if m2:
if m2.group(1) == first_header:
header_copy[k] = header[k].replace(
'<li>', '<li class="active">')
else:
print '*** error: doconce bug: wrong syntax in navigation toc for bootstrap styles'
_abort()
lines = header_copy[:]
lines.append('<a name="part%04d"></a>\n' % pn)
# Decoration line?
if header_part_line and not bootstrap:
if local_navigation_pics:
header_part_line_filename = html_imagefile(header_part_line)
else:
header_part_line_filename = 'http://hplgit.github.io/doconce/bundled/html_images/%s.png' % header_part_line
lines.append("""
<p><br><img src="%s"><p><br><p>
""" % header_part_line_filename)
part_filename = _part_filename % (basename, pn) + '.html'
prev_part_filename = _part_filename % (basename, pn-1) + '.html'
next_part_filename = _part_filename % (basename, pn+1) + '.html'
generated_files.append(part_filename)
if bootstrap:
# Make navigation arrows
prev_ = next_ = ''
buttons = bootstrap_navigation(pn, prev_part_filename, next_part_filename)
# Add jumbotron button reference on first page
if pn == 0:
for i in range(len(part)):
if part[i].startswith('<!-- potential-jumbotron-button -->'):
part[i] = part[i].replace(
'<!-- potential-jumbotron-button -->',
'\n\n<p><a href="%s" class="btn btn-primary btn-lg">Read »</a></p>\n\n' % next_part_filename)
break
if 'top' in nav_button_pos:
lines += buttons.splitlines(True)
elif 'top' in nav_button_pos:
# Simple navigation buttons at the top and bottom of the page
# (only at bottom if the function argument slides is True)
if not slides:
lines.append('<p>\n<!-- begin top navigation -->\n') # for easy removal
# Need a table for navigation pics, otherwise they cannot
# be on the same line
lines.append('<table style="width: 100%"><tr><td>\n')
if pn > 0:
if nav_button == 'text':
lines.append("""\
<div style="text-align: left;"><a href="%s">« Previous</a></div>
""" % (prev_part_filename))
else:
lines.append("""\
<div style="text-align: left;"><a href="%s"><img src="%s" border=0 alt="« Previous"></a></div>
""" % (prev_part_filename, prev_button_filename))
lines.append('</td><td>\n')
if pn < len(parts)-1:
if nav_button == 'text':
lines.append("""\
<div style="text-align: right;"><a href="%s">Next »</a></div>
""" % (next_part_filename))
else:
lines.append("""\
<div style="text-align: right;"><a href="%s"><img src="%s" border=0 alt="Next »"></a></div>
""" % (next_part_filename, next_button_filename))
lines.append('</td></tr></table>\n')
lines.append('<!-- end top navigation -->\n</p>\n\n')
lines.append('<p>\n')
# Main body of text
lines += part
# Navigation in the bottom of the page
lines.append('<p>\n')
if bootstrap:
if 'bottom' in nav_button_pos:
lines += buttons.splitlines(True)
lines += footer
elif 'bottom' in nav_button_pos:
lines.append('<!-- begin bottom navigation -->\n')
lines.append('<table style="width: 100%"><tr><td>\n')
if pn > 0:
if nav_button == 'text':
lines.append("""\
<div style="text-align: left;"><a href="%s">« Previous</a></div>
""" % (prev_part_filename))
else:
lines.append("""\
<div style="text-align: left;"><a href="%s"><img src="%s" border=0 alt="« Previous"></a></div>
""" % (prev_part_filename, prev_button_filename))
lines.append('</td><td>\n')
if pn < len(parts)-1:
if nav_button == 'text':
lines.append("""\
<div style="text-align: right;"><a href="%s">Next »</a></div>
""" % (next_part_filename))
else:
lines.append("""\
<div style="text-align: right;"><a href="%s"><img src="%s" border=0 alt="Next »"></a></div>
""" % (next_part_filename, next_button_filename))
lines.append('</td></tr></table>\n')
lines.append('<!-- end bottom navigation -->\n</p>\n\n')
lines += footer
html.add_to_file_collection(part_filename, filename, 'a')
part_text = ''.join(lines)
# Remove references with this file as prefix in href
# (some Bootstrap functionality does not work without this fix,
# and in general we should strip local references anyway)
part_text = part_text.replace('<a href="%s#' % part_filename,
'<a href="#')
# Insert reference to published version of document?
ackn = misc_option('reference=', None)
if ackn is not None:
ackn1 = '<p style="font-size:80%%">%s</p>' % ackn
ackn2 = '<div style="font-size:80%%">%s</div>' % ackn
ackn3 = '<center style="font-size:80%%">%s</center>' % ackn
if pn >= 1:
# Place the acknowledgment/reference at the top, right after
# the (only) !split command in each file
part_text = part_text.replace(
'<!-- !split -->', '<!-- !split -->\n%s' % ackn1)
elif pn == 0:
# Include in front page if jumbotron button
pattern = r'<p><a href=".+?" class="btn btn-primary btn-lg">Read »</a></p>'
m = re.search(pattern, part_text)
if m: # jumbotron button?
button = m.group()
part_text = part_text.replace(
button, '\n<p>' + ackn2 + '</p>\n' + button)
else:
# Put text after navigation
part_text = part_text.replace(
'<!-- end bottom navigation -->\n</p>\n',
'<!-- end bottom navigation -->\n</p>%s\n' % ackn1)
# Remove notes
part_text = re.sub(r'^<!-- !bnotes.+?^<!-- !enotes -->', '',
part_text, flags=re.MULTILINE|re.DOTALL)
# Fix font size for *slides*
font_size = misc_option('font_size=', 'standard')
if font_size == 'slides' or font_size.isdigit():
if font_size.isdigit():
font_size = int(font_size)
else:
font_size = 140
part_text = part_text.replace(r'<style type="text/css">',
"""<style type="text/css">
body, td {font-size: %d%%;}
h1 {font-size: 200%%;}
h2 {font-size: 180%%;}
""" % (font_size))
# Write part to ._*.html file
f = open(part_filename, 'w')
f.write(part_text)
f.close()
# Make sure main html file equals the first part
if pn == 0:
shutil.copy(part_filename, filename)
return generated_files
def generate_html5_slides(header, parts, footer, basename, filename,
slide_tp='reveal'):
if slide_tp not in ['dzslides', 'html5slides']:
copy_datafiles(eval(slide_tp + '_files')) # copy to subdir if needed
slide_syntax = dict(
reveal=dict(
subdir='reveal.js',
default_theme='beige',
main_style='reveal',
slide_envir_begin='<section>',
slide_envir_end='</section>',
pop=('fragment', 'li'),
notes='<aside class="notes">\n<!-- click "s" to activate -->\n\\g<1>\n</aside>\n',
head_header="""
<!-- reveal.js: http://lab.hakim.se/reveal-js/ -->
<meta name="viewport" content="width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=no">
<meta name="apple-mobile-web-app-capable" content="yes" />
<meta name="apple-mobile-web-app-status-bar-style" content="black-translucent" />
<link rel="stylesheet" href="reveal.js/css/%(main_style)s.css">
<link rel="stylesheet" href="reveal.js/css/theme/%(theme)s.css" id="theme">
<!--
<link rel="stylesheet" href="reveal.js/css/reveal.css">
<link rel="stylesheet" href="reveal.js/css/theme/beige.css" id="theme">
<link rel="stylesheet" href="reveal.js/css/theme/beigesmall.css" id="theme">
<link rel="stylesheet" href="reveal.js/css/theme/solarized.css" id="theme">
<link rel="stylesheet" href="reveal.js/css/theme/serif.css" id="theme">
<link rel="stylesheet" href="reveal.js/css/theme/night.css" id="theme">
<link rel="stylesheet" href="reveal.js/css/theme/moon.css" id="theme">
<link rel="stylesheet" href="reveal.js/css/theme/simple.css" id="theme">
<link rel="stylesheet" href="reveal.js/css/theme/sky.css" id="theme">
<link rel="stylesheet" href="reveal.js/css/theme/darkgray.css" id="theme">
<link rel="stylesheet" href="reveal.js/css/theme/default.css" id="theme">
<link rel="stylesheet" href="reveal.js/css/theme/cbc.css" id="theme">
<link rel="stylesheet" href="reveal.js/css/theme/simula.css" id="theme">
-->
<!-- For syntax highlighting -->
<link rel="stylesheet" href="reveal.js/lib/css/zenburn.css">
<!-- If the query includes 'print-pdf', use the PDF print sheet -->
<script>
document.write( '<link rel="stylesheet" href="reveal.js/css/print/' + ( window.location.search.match( /print-pdf/gi ) ? 'pdf' : 'paper' ) + '.css" type="text/css" media="print">' );
</script>
<style type="text/css">
hr { border: 0; width: 80%%; border-bottom: 1px solid #aaa}
p.caption { width: 80%%; font-size: 60%%; font-style: italic; text-align: left; }
hr.figure { border: 0; width: 80%%; border-bottom: 1px solid #aaa}
.reveal .alert-text-small { font-size: 80%%; }
.reveal .alert-text-large { font-size: 130%%; }
.reveal .alert-text-normal { font-size: 90%%; }
.reveal .alert {
padding:8px 35px 8px 14px; margin-bottom:18px;
text-shadow:0 1px 0 rgba(255,255,255,0.5);
border:5px solid #bababa;
-webkit-border-radius: 14px; -moz-border-radius: 14px;
border-radius:14px
background-position: 10px 10px;
background-repeat: no-repeat;
background-size: 38px;
padding-left: 30px; /* 55px; if icon */
}
.reveal .alert-block {padding-top:14px; padding-bottom:14px}
.reveal .alert-block > p, .alert-block > ul {margin-bottom:1em}
/*.reveal .alert li {margin-top: 1em}*/
.reveal .alert-block p+p {margin-top:5px}
/*.reveal .alert-notice { background-image: url(http://hplgit.github.io/doconce/bundled/html_images/small_gray_notice.png); }
.reveal .alert-summary { background-image:url(http://hplgit.github.io/doconce/bundled/html_images/small_gray_summary.png); }
.reveal .alert-warning { background-image: url(http://hplgit.github.io/doconce/bundled/html_images/small_gray_warning.png); }
.reveal .alert-question {background-image:url(http://hplgit.github.io/doconce/bundled/html_images/small_gray_question.png); } */
</style>
""",
body_header="""\
<body>
<div class="reveal">
<!-- Any section element inside the <div class="slides"> container
is displayed as a slide -->
<div class="slides">
""",
footer="""
</div> <!-- class="slides" -->
</div> <!-- class="reveal" -->
<script src="reveal.js/lib/js/head.min.js"></script>
<script src="reveal.js/js/reveal.min.js"></script>
<script>
// Full list of configuration options available here:
// https://github.com/hakimel/reveal.js#configuration
Reveal.initialize({
// Display navigation controls in the bottom right corner
controls: true,
// Display progress bar (below the horiz. slider)
progress: true,
// Display the page number of the current slide
slideNumber: true,
// Push each slide change to the browser history
history: false,
// Enable keyboard shortcuts for navigation
keyboard: true,
// Enable the slide overview mode
overview: true,
// Vertical centering of slides
//center: true,
center: false,
// Enables touch navigation on devices with touch input
touch: true,
// Loop the presentation
loop: false,
// Change the presentation direction to be RTL
rtl: false,
// Turns fragments on and off globally
fragments: true,
// Flags if the presentation is running in an embedded mode,
// i.e. contained within a limited portion of the screen
embedded: false,
// Number of milliseconds between automatically proceeding to the
// next slide, disabled when set to 0, this value can be overwritten
// by using a data-autoslide attribute on your slides
autoSlide: 0,
// Stop auto-sliding after user input
autoSlideStoppable: true,
// Enable slide navigation via mouse wheel
mouseWheel: false,
// Hides the address bar on mobile devices
hideAddressBar: true,
// Opens links in an iframe preview overlay
previewLinks: false,
// Transition style
transition: 'default', // default/cube/page/concave/zoom/linear/fade/none
// Transition speed
transitionSpeed: 'default', // default/fast/slow
// Transition style for full page slide backgrounds
backgroundTransition: 'default', // default/none/slide/concave/convex/zoom
// Number of slides away from the current that are visible
viewDistance: 3,
// Parallax background image
//parallaxBackgroundImage: '', // e.g. "'https://s3.amazonaws.com/hakim-static/reveal-js/reveal-parallax-1.jpg'"
// Parallax background size
//parallaxBackgroundSize: '' // CSS syntax, e.g. "2100px 900px"
theme: Reveal.getQueryHash().theme, // available themes are in reveal.js/css/theme
transition: Reveal.getQueryHash().transition || 'default', // default/cube/page/concave/zoom/linear/none
});
Reveal.initialize({
dependencies: [
// Cross-browser shim that fully implements classList - https://github.com/eligrey/classList.js/
{ src: 'reveal.js/lib/js/classList.js', condition: function() { return !document.body.classList; } },
// Interpret Markdown in <section> elements
{ src: 'reveal.js/plugin/markdown/marked.js', condition: function() { return !!document.querySelector( '[data-markdown]' ); } },
{ src: 'reveal.js/plugin/markdown/markdown.js', condition: function() { return !!document.querySelector( '[data-markdown]' ); } },
// Syntax highlight for <code> elements
{ src: 'reveal.js/plugin/highlight/highlight.js', async: true, callback: function() { hljs.initHighlightingOnLoad(); } },
// Zoom in and out with Alt+click
{ src: 'reveal.js/plugin/zoom-js/zoom.js', async: true, condition: function() { return !!document.body.classList; } },
// Speaker notes
{ src: 'reveal.js/plugin/notes/notes.js', async: true, condition: function() { return !!document.body.classList; } },
// Remote control your reveal.js presentation using a touch device
//{ src: 'reveal.js/plugin/remotes/remotes.js', async: true, condition: function() { return !!document.body.classList; } },
// MathJax
//{ src: 'reveal.js/plugin/math/math.js', async: true }
]
});
Reveal.initialize({
// The "normal" size of the presentation, aspect ratio will be preserved
// when the presentation is scaled to fit different resolutions. Can be
// specified using percentage units.
width: 1170, // original: 960,
height: 700,
// Factor of the display size that should remain empty around the content
margin: 0.1,
// Bounds for smallest/largest possible scale to apply to content
minScale: 0.2,
maxScale: 1.0
});
</script>
<!-- begin footer logo
<div style="position: absolute; bottom: 0px; left: 0; margin-left: 0px">
<img src="somelogo.png">
</div>
end footer logo -->
""",
theme=None,
title=None,
),
csss=dict(
subdir='csss',
default_theme='csss_default',
slide_envir_begin='<section class="slide">',
slide_envir_end='</section>',
pop=('delayed', 'li'),
notes='<p class="presenter-notes">\n<!-- press "Ctrl+P" or "Shift+P" to activate -->\n\\g<1>\n</p>\n',
head_header="""
<!-- CSSS: http://leaverou.github.com/CSSS/ -->
<meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=1" />
<link href="csss/slideshow.css" rel="stylesheet" />
<link href="csss/theme.css" rel="stylesheet" />
<link href="csss/talk.css" rel="stylesheet" />
<script src="csss/prefixfree.min.js"></script>
""",
body_header="""\
<body data-duration="10">
""",
footer="""
<script src="csss/slideshow.js"></script>
<script src="csss/plugins/css-edit.js"></script>
<script src="csss/plugins/css-snippets.js"></script>
<script src="csss/plugins/css-controls.js"></script>
<script src="csss/plugins/code-highlight.js"></script>
<script>
var slideshow = new SlideShow();
var snippets = document.querySelectorAll('.snippet');
for(var i=0; i<snippets.length; i++) {
new CSSSnippet(snippets[i]);
}
var cssControls = document.querySelectorAll('.css-control');
for(var i=0; i<cssControls.length; i++) {
new CSSControl(cssControls[i]);
}
</script>
""",
theme=None,
title=None,
),
dzslides=dict(
subdir=None,
default_theme='dzslides_default', # just one theme in dzslides
slide_envir_begin='<section>',
slide_envir_end='</section>',
#notes='<div role="note">\n\g<1>\n</div>',
pop=('incremental', 'ul', 'ol'),
notes='<details>\n<!-- use onstage shell to activate: invoke http://hplgit.github.io/doconce/bundled/dzslides/shells/onstage.html -->\n\\g<1>\n</details>\n',
#notes='<div role="note">\n<!-- use onstage shell to activate: invoke http://hplgit.github.io/doconce/bundled/dzslides/shells/onstage.html -->\n\\g<1>\n</div>\n',
head_header="""
<!-- dzslides: http://paulrouget.com/dzslides/ -->
<!-- One section is one slide -->
""",
body_header="""\
<body>
""",
footer="""
<!-- Define the style of your presentation -->
<!--
Style by Hans Petter Langtangen hpl@simula.no:
a slight modification of the original dzslides style,
basically smaller fonts and left-adjusted titles.
-->
<!-- Maybe a font from http://www.google.com/webfonts ? -->
<link href='http://fonts.googleapis.com/css?family=Oswald' rel='stylesheet'>
<style>
html, .view body { background-color: black; counter-reset: slideidx; }
body, .view section { background-color: white; border-radius: 12px }
/* A section is a slide. It's size is 800x600, and this will never change */
section, .view head > title {
/* The font from Google */
font-family: 'Oswald', arial, serif;
font-size: 30px;
}
.view section:after {
counter-increment: slideidx;
content: counter(slideidx, decimal-leading-zero);
position: absolute; bottom: -80px; right: 100px;
color: white;
}
.view head > title {
color: white;
text-align: center;
margin: 1em 0 1em 0;
}
center {
font-size: 20px;
}
h1 {
margin-top: 100px;
text-align: center;
font-size: 50px;
}
h2 {
margin-top: 10px;
margin: 25px;
text-align: left;
font-size: 40px;
}
h3 {
margin-top: 10px;
margin: 25px;
text-align: left;
font-size: 30px;
}
ul {
margin: 0px 60px;
font-size: 20px;
}
ol {
margin: 0px 60px;
font-size: 20px;
}
p {
margin: 25px;
font-size: 20px;
}
pre {
font-size: 50%;
margin: 25px;
}
blockquote {
height: 100%;
background-color: black;
color: white;
font-size: 60px;
padding: 50px;
}
blockquote:before {
content: open-quote;
}
blockquote:after {
content: close-quote;
}
/* Figures are displayed full-page, with the caption
on top of the image/video */
figure {
background-color: black;
width: 100%;
height: 100%;
}
figure > * {
position: absolute;
}
figure > img, figure > video {
width: 100%; height: 100%;
}
figcaption {
margin: 70px;
font-size: 50px;
}
footer {
position: absolute;
bottom: 0;
width: 100%;
padding: 40px;
text-align: right;
background-color: #F3F4F8;
border-top: 1px solid #CCC;
}
/* Transition effect */
/* Feel free to change the transition effect for original
animations. See here:
https://developer.mozilla.org/en/CSS/CSS_transitions
How to use CSS3 Transitions: */
section {
-moz-transition: left 400ms linear 0s;
-webkit-transition: left 400ms linear 0s;
-ms-transition: left 400ms linear 0s;
transition: left 400ms linear 0s;
}
.view section {
-moz-transition: none;
-webkit-transition: none;
-ms-transition: none;
transition: none;
}
.view section[aria-selected] {
border: 5px red solid;
}
/* Before */
section { left: -150%; }
/* Now */
section[aria-selected] { left: 0; }
/* After */
section[aria-selected] ~ section { left: +150%; }
/* Incremental elements */
/* By default, visible */
.incremental > * { opacity: 1; }
/* The current item */
.incremental > *[aria-selected] { opacity: 1; }
/* The items to-be-selected */
.incremental > *[aria-selected] ~ * { opacity: 0; }
/* The progressbar, at the bottom of the slides, show the global
progress of the presentation. */
#progress-bar {
height: 2px;
background: #AAA;
}
</style>
<!-- {{{{ dzslides core
#
#
# __ __ __ . __ ___ __
# | \ / /__` | | | \ |__ /__`
# |__/ /_ .__/ |___ | |__/ |___ .__/ core
#
#
# The following block of code is not supposed to be edited.
# But if you want to change the behavior of these slides,
# feel free to hack it!
#
-->
<div id="progress-bar"></div>
<!-- Default Style -->
<style>
* { margin: 0; padding: 0; -moz-box-sizing: border-box; -webkit-box-sizing: border-box; box-sizing: border-box; }
[role="note"] { display: none; }
body {
width: 800px; height: 600px;
margin-left: -400px; margin-top: -300px;
position: absolute; top: 50%; left: 50%;
overflow: hidden;
display: none;
}
.view body {
position: static;
margin: 0; padding: 0;
width: 100%; height: 100%;
display: inline-block;
overflow: visible; overflow-x: hidden;
/* undo Dz.onresize */
transform: none !important;
-moz-transform: none !important;
-webkit-transform: none !important;
-o-transform: none !important;
-ms-transform: none !important;
}
.view head, .view head > title { display: block }
section {
position: absolute;
pointer-events: none;
width: 100%; height: 100%;
}
.view section {
pointer-events: auto;
position: static;
width: 800px; height: 600px;
margin: -150px -200px;
float: left;
transform: scale(.4);
-moz-transform: scale(.4);
-webkit-transform: scale(.4);
-o-transform: scale(.4);
-ms-transform: scale(.4);
}
.view section > * { pointer-events: none; }
section[aria-selected] { pointer-events: auto; }
html { overflow: hidden; }
html.view { overflow: visible; }
body.loaded { display: block; }
.incremental {visibility: hidden; }
.incremental[active] {visibility: visible; }
#progress-bar{
bottom: 0;
position: absolute;
-moz-transition: width 400ms linear 0s;
-webkit-transition: width 400ms linear 0s;
-ms-transition: width 400ms linear 0s;
transition: width 400ms linear 0s;
}
.view #progress-bar {
display: none;
}
</style>
<script>
var Dz = {
remoteWindows: [],
idx: -1,
step: 0,
html: null,
slides: null,
progressBar : null,
params: {
autoplay: "1"
}
};
Dz.init = function() {
document.body.className = "loaded";
this.slides = Array.prototype.slice.call($$("body > section"));
this.progressBar = $("#progress-bar");
this.html = document.body.parentNode;
this.setupParams();
this.onhashchange();
this.setupTouchEvents();
this.onresize();
this.setupView();
}
Dz.setupParams = function() {
var p = window.location.search.substr(1).split('&');
p.forEach(function(e, i, a) {
var keyVal = e.split('=');
Dz.params[keyVal[0]] = decodeURIComponent(keyVal[1]);
});
// Specific params handling
if (!+this.params.autoplay)
$$.forEach($$("video"), function(v){ v.controls = true });
}
Dz.onkeydown = function(aEvent) {
// Don't intercept keyboard shortcuts
if (aEvent.altKey
|| aEvent.ctrlKey
|| aEvent.metaKey
|| aEvent.shiftKey) {
return;
}
if ( aEvent.keyCode == 37 // left arrow
|| aEvent.keyCode == 38 // up arrow
|| aEvent.keyCode == 33 // page up
) {
aEvent.preventDefault();
this.back();
}
if ( aEvent.keyCode == 39 // right arrow
|| aEvent.keyCode == 40 // down arrow
|| aEvent.keyCode == 34 // page down
) {
aEvent.preventDefault();
this.forward();
}
if (aEvent.keyCode == 35) { // end
aEvent.preventDefault();
this.goEnd();
}
if (aEvent.keyCode == 36) { // home
aEvent.preventDefault();
this.goStart();
}
if (aEvent.keyCode == 32) { // space
aEvent.preventDefault();
this.toggleContent();
}
if (aEvent.keyCode == 70) { // f
aEvent.preventDefault();
this.goFullscreen();
}
if (aEvent.keyCode == 79) { // o
aEvent.preventDefault();
this.toggleView();
}
}
/* Touch Events */
Dz.setupTouchEvents = function() {
var orgX, newX;
var tracking = false;
var db = document.body;
db.addEventListener("touchstart", start.bind(this), false);
db.addEventListener("touchmove", move.bind(this), false);
function start(aEvent) {
aEvent.preventDefault();
tracking = true;
orgX = aEvent.changedTouches[0].pageX;
}
function move(aEvent) {
if (!tracking) return;
newX = aEvent.changedTouches[0].pageX;
if (orgX - newX > 100) {
tracking = false;
this.forward();
} else {
if (orgX - newX < -100) {
tracking = false;
this.back();
}
}
}
}
Dz.setupView = function() {
document.body.addEventListener("click", function ( e ) {
if (!Dz.html.classList.contains("view")) return;
if (!e.target || e.target.nodeName != "SECTION") return;
Dz.html.classList.remove("view");
Dz.setCursor(Dz.slides.indexOf(e.target) + 1);
}, false);
}
/* Adapt the size of the slides to the window */
Dz.onresize = function() {
var db = document.body;
var sx = db.clientWidth / window.innerWidth;
var sy = db.clientHeight / window.innerHeight;
var transform = "scale(" + (1/Math.max(sx, sy)) + ")";
db.style.MozTransform = transform;
db.style.WebkitTransform = transform;
db.style.OTransform = transform;
db.style.msTransform = transform;
db.style.transform = transform;
}
Dz.getNotes = function(aIdx) {
var s = $("section:nth-of-type(" + aIdx + ")");
var d = s.$("[role='note']");
return d ? d.innerHTML : "";
}
Dz.onmessage = function(aEvent) {
var argv = aEvent.data.split(" "), argc = argv.length;
argv.forEach(function(e, i, a) { a[i] = decodeURIComponent(e) });
var win = aEvent.source;
if (argv[0] === "REGISTER" && argc === 1) {
this.remoteWindows.push(win);
this.postMsg(win, "REGISTERED", document.title, this.slides.length);
this.postMsg(win, "CURSOR", this.idx + "." + this.step);
return;
}
if (argv[0] === "BACK" && argc === 1)
this.back();
if (argv[0] === "FORWARD" && argc === 1)
this.forward();
if (argv[0] === "START" && argc === 1)
this.goStart();
if (argv[0] === "END" && argc === 1)
this.goEnd();
if (argv[0] === "TOGGLE_CONTENT" && argc === 1)
this.toggleContent();
if (argv[0] === "SET_CURSOR" && argc === 2)
window.location.hash = "#" + argv[1];
if (argv[0] === "GET_CURSOR" && argc === 1)
this.postMsg(win, "CURSOR", this.idx + "." + this.step);
if (argv[0] === "GET_NOTES" && argc === 1)
this.postMsg(win, "NOTES", this.getNotes(this.idx));
}
Dz.toggleContent = function() {
// If a Video is present in this new slide, play it.
// If a Video is present in the previous slide, stop it.
var s = $("section[aria-selected]");
if (s) {
var video = s.$("video");
if (video) {
if (video.ended || video.paused) {
video.play();
} else {
video.pause();
}
}
}
}
Dz.setCursor = function(aIdx, aStep) {
// If the user change the slide number in the URL bar, jump
// to this slide.
aStep = (aStep != 0 && typeof aStep !== "undefined") ? "." + aStep : ".0";
window.location.hash = "#" + aIdx + aStep;
}
Dz.onhashchange = function() {
var cursor = window.location.hash.split("#"),
newidx = 1,
newstep = 0;
if (cursor.length == 2) {
newidx = ~~cursor[1].split(".")[0];
newstep = ~~cursor[1].split(".")[1];
if (newstep > Dz.slides[newidx - 1].$$('.incremental > *').length) {
newstep = 0;
newidx++;
}
}
this.setProgress(newidx, newstep);
if (newidx != this.idx) {
this.setSlide(newidx);
}
if (newstep != this.step) {
this.setIncremental(newstep);
}
for (var i = 0; i < this.remoteWindows.length; i++) {
this.postMsg(this.remoteWindows[i], "CURSOR", this.idx + "." + this.step);
}
}
Dz.back = function() {
if (this.idx == 1 && this.step == 0) {
return;
}
if (this.step == 0) {
this.setCursor(this.idx - 1,
this.slides[this.idx - 2].$$('.incremental > *').length);
} else {
this.setCursor(this.idx, this.step - 1);
}
}
Dz.forward = function() {
if (this.idx >= this.slides.length &&
this.step >= this.slides[this.idx - 1].$$('.incremental > *').length) {
return;
}
if (this.step >= this.slides[this.idx - 1].$$('.incremental > *').length) {
this.setCursor(this.idx + 1, 0);
} else {
this.setCursor(this.idx, this.step + 1);
}
}
Dz.goStart = function() {
this.setCursor(1, 0);
}
Dz.goEnd = function() {
var lastIdx = this.slides.length;
var lastStep = this.slides[lastIdx - 1].$$('.incremental > *').length;
this.setCursor(lastIdx, lastStep);
}
Dz.toggleView = function() {
this.html.classList.toggle("view");
if (this.html.classList.contains("view")) {
$("section[aria-selected]").scrollIntoView(true);
}
}
Dz.setSlide = function(aIdx) {
this.idx = aIdx;
var old = $("section[aria-selected]");
var next = $("section:nth-of-type("+ this.idx +")");
if (old) {
old.removeAttribute("aria-selected");
var video = old.$("video");
if (video) {
video.pause();
}
}
if (next) {
next.setAttribute("aria-selected", "true");
if (this.html.classList.contains("view")) {
next.scrollIntoView();
}
var video = next.$("video");
if (video && !!+this.params.autoplay) {
video.play();
}
} else {
// That should not happen
this.idx = -1;
// console.warn("Slide doesn't exist.");
}
}
Dz.setIncremental = function(aStep) {
this.step = aStep;
var old = this.slides[this.idx - 1].$('.incremental > *[aria-selected]');
if (old) {
old.removeAttribute('aria-selected');
}
var incrementals = $$('.incremental');
if (this.step <= 0) {
$$.forEach(incrementals, function(aNode) {
aNode.removeAttribute('active');
});
return;
}
var next = this.slides[this.idx - 1].$$('.incremental > *')[this.step - 1];
if (next) {
next.setAttribute('aria-selected', true);
next.parentNode.setAttribute('active', true);
var found = false;
$$.forEach(incrementals, function(aNode) {
if (aNode != next.parentNode)
if (found)
aNode.removeAttribute('active');
else
aNode.setAttribute('active', true);
else
found = true;
});
} else {
setCursor(this.idx, 0);
}
return next;
}
Dz.goFullscreen = function() {
var html = $('html'),
requestFullscreen = html.requestFullscreen || html.requestFullScreen || html.mozRequestFullScreen || html.webkitRequestFullScreen;
if (requestFullscreen) {
requestFullscreen.apply(html);
}
}
Dz.setProgress = function(aIdx, aStep) {
var slide = $("section:nth-of-type("+ aIdx +")");
if (!slide)
return;
var steps = slide.$$('.incremental > *').length + 1,
slideSize = 100 / (this.slides.length - 1),
stepSize = slideSize / steps;
this.progressBar.style.width = ((aIdx - 1) * slideSize + aStep * stepSize) + '%';
}
Dz.postMsg = function(aWin, aMsg) { // [arg0, [arg1...]]
aMsg = [aMsg];
for (var i = 2; i < arguments.length; i++)
aMsg.push(encodeURIComponent(arguments[i]));
aWin.postMessage(aMsg.join(" "), "*");
}
function init() {
Dz.init();
window.onkeydown = Dz.onkeydown.bind(Dz);
window.onresize = Dz.onresize.bind(Dz);
window.onhashchange = Dz.onhashchange.bind(Dz);
window.onmessage = Dz.onmessage.bind(Dz);
}
window.onload = init;
</script>
<script> // Helpers
if (!Function.prototype.bind) {
Function.prototype.bind = function (oThis) {
// closest thing possible to the ECMAScript 5 internal IsCallable
// function
if (typeof this !== "function")
throw new TypeError(
"Function.prototype.bind - what is trying to be fBound is not callable"
);
var aArgs = Array.prototype.slice.call(arguments, 1),
fToBind = this,
fNOP = function () {},
fBound = function () {
return fToBind.apply( this instanceof fNOP ? this : oThis || window,
aArgs.concat(Array.prototype.slice.call(arguments)));
};
fNOP.prototype = this.prototype;
fBound.prototype = new fNOP();
return fBound;
};
}
var $ = (HTMLElement.prototype.$ = function(aQuery) {
return this.querySelector(aQuery);
}).bind(document);
var $$ = (HTMLElement.prototype.$$ = function(aQuery) {
return this.querySelectorAll(aQuery);
}).bind(document);
$$.forEach = function(nodeList, fun) {
Array.prototype.forEach.call(nodeList, fun);
}
</script>
""",
theme=None,
title=None,
),
deck=dict(
subdir='deck.js',
default_theme='web-2.0',
slide_envir_begin='<section class="slide">',
slide_envir_end='</section>',
pop=('slide', 'li'),
notes='<div class="notes">\n<!-- press "n" to activate -->\n\\g<1>\n</div>\n',
head_header="""
<!-- deck.js: https://github.com/imakewebthings/deck.js -->
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
<meta name="viewport" content="width=1024, user-scalable=no">
<!-- Required stylesheet -->
<link rel="stylesheet" href="deck.js/core/deck.core.css">
<!-- Extension CSS files go here. Remove or add as needed.
deck.goto: Adds a shortcut key to jump to any slide number.
Hit g, type in the slide number, and hit enter.
deck.hash: Enables internal linking within slides, deep
linking to individual slides, and updates the address bar and
a permalink anchor with each slide change.
deck.menu: Adds a menu view, letting you see all slides in a grid.
Hit m to toggle to menu view, continue navigating your deck,
and hit m to return to normal view. Touch devices can double-tap
the deck to switch between views.
deck.navigation: Adds clickable left and right buttons for the
less keyboard inclined.
deck.status: Adds a page number indicator. (current/total).
deck.scale: Scales each slide to fit within the deck container
using CSS Transforms for those browsers that support them.
deck.pointer: Turn mouse into laser pointer (toggle with p).
(Requires https://github.com/mikeharris100/deck.pointer.js)
-->
<link rel="stylesheet" href="deck.js/extensions/menu/deck.menu.css">
<link rel="stylesheet" href="deck.js/extensions/navigation/deck.navigation.css">
<link rel="stylesheet" href="deck.js/extensions/scale/deck.scale.css">
<link rel="stylesheet" href="deck.js/extensions/pointer/deck.pointer.css">
<link rel="stylesheet" href="deck.js/extensions/notes/deck.notes.css">
<!--
<link rel="stylesheet" href="deck.js/extensions/goto/deck.goto.css">
<link rel="stylesheet" href="deck.js/extensions/hash/deck.hash.css">
<link rel="stylesheet" href="deck.js/extensions/status/deck.status.css">
-->
<!-- Style theme. More available in themes/style/ or create your own. -->
<link rel="stylesheet" href="deck.js/themes/style/%(theme)s.css">
<!--
<link rel="stylesheet" href="deck.js/themes/style/neon.css">
<link rel="stylesheet" href="deck.js/themes/style/swiss.css">
<link rel="stylesheet" href="deck.js/themes/style/web-2.0.css">
git clone git://github.com/duijf/mnml.git
<link rel="stylesheet" href="deck.js/themes/style/mnml.css">
git://github.com/groovecoder/deckjs-theme-mozilla.git
<link rel="stylesheet" href="deck.js/themes/style/sandstone.css">
<link rel="stylesheet" href="deck.js/themes/style/sandstone.aurora.css">
<link rel="stylesheet" href="deck.js/themes/style/sandstone.dark.css">
<link rel="stylesheet" href="deck.js/themes/style/sandstone.default.css">
<link rel="stylesheet" href="deck.js/themes/style/sandstone.firefox.css">
<link rel="stylesheet" href="deck.js/themes/style/sandstone.light.css">
<link rel="stylesheet" href="deck.js/themes/style/sandstone.mdn.css">
<link rel="stylesheet" href="deck.js/themes/style/sandstone.nightly.css">
<link rel="stylesheet" href="deck.js/themes/style/sandstone.cbc.css">
git://github.com/barraq/deck.ext.js.git
<link rel="stylesheet" href="deck.js/themes/style/beamer.css">
-->
<!--
Transition theme. More available in /themes/transition/ or create your own. -->
<!--
<link rel="stylesheet" href="deck.js/themes/transition/horizontal-slide.css">
<link rel="stylesheet" href="deck.js/themes/transition/fade.css">
<link rel="stylesheet" href="deck.js/themes/transition/vertical-slide.css">
<link rel="stylesheet" href="deck.js/themes/transition/horizontal-slide.css">
-->
<!-- Required Modernizr file -->
<script src="deck.js/modernizr.custom.js"></script>
<style type="text/css">
/* Override h1, h2, ... styles */
h1 { font-size: 2.8em; }
h2 { font-size: 1.5em; }
h3 { font-size: 1.4em; }
h4 { font-size: 1.3em; }
h1, h2, h3, h4 { font-weight: bold; line-height: 1.2; }
body { overflow: auto; } /* vertical scrolling */
hr { border: 0; width: 80%%; border-bottom: 1px solid #aaa}
p.caption { width: 80%%; font-size: 60%%; font-style: italic; text-align: left; }
hr.figure { border: 0; width: 80%%; border-bottom: 1px solid #aaa}
.slide .alert-text-small { font-size: 80%%; }
.slide .alert-text-large { font-size: 130%%; }
.slide .alert-text-normal { font-size: 90%%; }
.slide .alert {
padding:8px 35px 8px 14px; margin-bottom:18px;
text-shadow:0 1px 0 rgba(255,255,255,0.5);
border:5px solid #bababa;
-webkit-border-radius:14px; -moz-border-radius:14px;
border-radius:14px
background-position: 10px 10px;
background-repeat: no-repeat;
background-size: 38px;
padding-left: 30px; /* 55px; if icon */
}
.slide .alert-block {padding-top:14px; padding-bottom:14px}
.slide .alert-block > p, .alert-block > ul {margin-bottom:0}
/*.slide .alert li {margin-top: 1em}*/
.deck .alert-block p+p {margin-top:5px}
/*.slide .alert-notice { background-image: url(http://hplgit.github.io/doconce/bundled/html_images//small_gray_notice.png); }
.slide .alert-summary { background-image:url(http://hplgit.github.io/doconce/bundled/html_images//small_gray_summary.png); }
.slide .alert-warning { background-image: url(http://hplgit.github.io/doconce/bundled/html_images//small_gray_warning.png); }
.slide .alert-question {background-image:url(http://hplgit.github.io/doconce/bundled/html_images/small_gray_question.png); } */
</style>
""",
body_header="""\
<body class="deck-container">
<header>
<!-- Here goes a potential header -->
</header>
<!-- do not use the article tag - it gives strange sizings -->
""",
footer="""
<footer>
<!-- Here goes a footer -->
</footer>
<!-- Begin extension snippets. Add or remove as needed. -->
<!-- deck.navigation snippet -->
<a href="#" class="deck-prev-link" title="Previous">←</a>
<a href="#" class="deck-next-link" title="Next">→</a>
<!-- deck.status snippet
<p class="deck-status">
<span class="deck-status-current"></span>
/
<span class="deck-status-total"></span>
</p>
-->
<!-- deck.goto snippet
<form action="." method="get" class="goto-form">
<label for="goto-slide">Go to slide:</label>
<input type="text" name="slidenum" id="goto-slide" list="goto-datalist">
<datalist id="goto-datalist"></datalist>
<input type="submit" value="Go">
</form>
-->
<!-- deck.hash snippet
<a href="." title="Permalink to this slide" class="deck-permalink">#</a>
-->
<!-- End extension snippets. -->
<!-- Required JS files. -->
<script src="deck.js/jquery.min.js"></script>
<script src="deck.js/core/deck.core.js"></script>
<!-- Extension JS files. Add or remove as needed. -->
<script src="deck.js/core/deck.core.js"></script>
<script src="deck.js/extensions/hash/deck.hash.js"></script>
<script src="deck.js/extensions/menu/deck.menu.js"></script>
<script src="deck.js/extensions/goto/deck.goto.js"></script>
<script src="deck.js/extensions/status/deck.status.js"></script>
<script src="deck.js/extensions/navigation/deck.navigation.js"></script>
<script src="deck.js/extensions/scale/deck.scale.js"></script>
<script src="deck.js/extensions/notes/deck.notes.js"></script>
<!-- From https://github.com/mikeharris100/deck.pointer.js -->
<script src="deck.js/extensions/pointer/deck.pointer.js"></script>
<!-- From https://github.com/stvnwrgs/presenterview
<script type="text/javascript" src="deck.js/extensions/presenterview/deck.presenterview.js"></script> -->
<!-- From https://github.com/nemec/deck.annotate.js
<script type="text/javascript" src="deck.js/extensions/deck.annotate.js/deck.annotate.js"></script>
-->
<!-- Initialize the deck. You can put this in an external file if desired. -->
<script>
$(function() {
$.deck('.slide');
});
</script>
""",
theme=None,
title=None,
),
html5slides=dict(
subdir=None,
default_theme='template-default', # template-io2011, should use template-io2012: https://code.google.com/p/io-2012-slides/
slide_envir_begin='<article>',
slide_envir_end='</article>',
pop=('build', 'ul'),
notes='<aside class="note">\n<!-- press "p" to activate -->\n\\g<1>\n</aside>\n',
head_header="""
<!-- Google HTML5 Slides:
http://code.google.com/p/html5slides/
-->
<meta charset='utf-8'>
<script
src='http://html5slides.googlecode.com/svn/trunk/slides.js'>
</script>
</head>
<style>
/* Your individual styles here... */
</style>
""",
body_header="""\
<body style='display: none'>
<!-- See http://code.google.com/p/html5slides/source/browse/trunk/styles.css
for definition of template-default and other styles -->
<section class='slides layout-regular %(theme)s'>
<!-- <section class='slides layout-regular template-io2011'> -->
<!-- <section class='slides layout-regular template-default'> -->
<!-- Slides are in <article> tags -->
""",
footer="""
</section>
""",
theme=None,
title=None,
),
)
theme = misc_option('html_slide_theme=', default='default')
# Check that the theme name is registered
#from doconce.misc import recommended_html_styles_and_pygments_styles
all_combinations = recommended_html_styles_and_pygments_styles()
if not slide_tp in all_combinations:
# This test will not be run since it is already tested that
# the slide type is legal (before calling this function)
print '*** error: slide type "%s" is not known - abort' % slide_tp
print 'known slide types:', ', '.join(list(all_combinations.keys()))
_abort()
# We need the subdir with reveal.js, deck.js, or similar to show
# the HTML slides so add the subdir to the registered file collection
if slide_syntax[slide_tp]['subdir'] is not None:
import html
html.add_to_file_collection(
slide_syntax[slide_tp]['subdir'], filename, 'a')
if theme != 'default':
if not theme in all_combinations[slide_tp]:
print '*** error: %s theme "%s" is not known - abort' % \
(slide_tp, theme)
print 'known themes:', ', '.join(list(all_combinations[slide_tp].keys()))
_abort()
#m = re.search(r'<title>(.*?)</title>', ''.join(parts[0]))
#if m:
# title = m.group(1).strip()
#else:
# title = ''
#slide_syntax[slide_tp]['title'] = title
slide_syntax[slide_tp]['theme'] = \
slide_syntax[slide_tp]['default_theme'] if (theme == 'default' or theme.endswith('_default')) else theme
# Fill in theme etc.
slide_syntax[slide_tp]['head_header'] = \
slide_syntax[slide_tp]['head_header'] % slide_syntax[slide_tp]
slide_syntax[slide_tp]['body_header'] = \
slide_syntax[slide_tp]['body_header'] % slide_syntax[slide_tp]
footer_logo = misc_option('html_footer_logo=', default=None)
# Handle short forms for cbc, simula, and uio logos
if footer_logo == 'cbc':
footer_logo = 'cbc_footer'
elif footer_logo == 'simula':
footer_logo = 'simula_footer'
elif footer_logo == 'uio':
footer_logo = 'uio_footer'
# Default footer logo command
repl = """
<div style="position: absolute; bottom: 0px; left: 0; margin-left: 0px;">
<img src="%s/cbc_footer.png" width=110%%;></div>
""" % footer_logo
# Override repl for cbc, simula, uio logos since these are specified
# without full URLs
# Path to cbc, simula, uio logo files
footer_logo_path = dict(reveal='reveal.js/css/images',
deck='deck.js/themes/images')
if footer_logo == 'cbc_footer':
if slide_tp not in ('reveal', 'deck'):
raise ValueError('slide type "%s" cannot have --html_footer_logo' ^ slide_tp)
repl = """
<div style="position: absolute; bottom: 0px; left: 0; margin-left: 0px;">
<img src="%s/cbc_footer.png" width=110%%;></div>
""" % footer_logo_path[slide_tp]
elif footer_logo == 'cbc_symbol':
repl = """
<div style="position: absolute; bottom: 0px; left: 0; margin-left: 20px; margin-bottom: 20px;">
<img src="%s/cbc_symbol.png" width="50"></div>
""" % footer_logo_path[slide_tp]
elif footer_logo == 'simula_footer':
repl = """
<div style="position: absolute; bottom: 0px; left: 0; margin-left: 0px;">
<img src="%s/simula_footer.png" width=700></div>
""" % footer_logo_path[slide_tp]
elif footer_logo == 'simula_symbol':
repl = """
<div style="position: absolute; bottom: 0px; left: 0; margin-left: 20px; margin-bottom: 10px;">
<img src="%s/simula_symbol.png" width=200></div>
""" % footer_logo_path[slide_tp]
elif footer_logo == 'uio_footer':
repl = """
<div style="position: absolute; bottom: 0px; left: 0; margin-left: 20px; margin-bottom: 0px;">
<img src="%s/uio_footer.png" width=450></div>
""" % footer_logo_path[slide_tp]
elif footer_logo == 'uio_symbol':
repl = """
<div style="position: absolute; bottom: 0px; left: 0; margin-left: 20px; margin-bottom: 20px;">
<img src="%s/uio_symbol.png" width=100></div>
""" % footer_logo_path[slide_tp]
elif footer_logo == 'uio_simula_symbol':
repl = """
<div style="position: absolute; bottom: 0px; left: 0; margin-left: 20px; margin-bottom: 0px;">
<img src="%s/uio_footer.png" width="180"></div>
<div style="position: absolute; bottom: 0px; left: 0; margin-left: 250px; margin-bottom: 0px;">
<img src="%s/simula_symbol.png" width="250"></div>
""" % (footer_logo_path[slide_tp], footer_logo_path[slide_tp])
pattern = dict(
reveal=r'<!-- begin footer logo\s+(.+?)\s+end footer logo -->',
deck=r'<!-- Here goes a footer -->')
if footer_logo is not None:
slide_syntax[slide_tp]['footer'] = re.sub(
pattern[slide_tp], repl,
slide_syntax[slide_tp]['footer'], flags=re.DOTALL)
# Grab the relevant lines in the <head> and <body> parts of
# the original header
head_lines = []
body_lines = []
inside_style = False
inside_head = False
inside_body = False
for line in header:
if '<head>' in line:
inside_head = True
continue
elif '</head>' in line:
inside_head = False
continue
elif line.strip().startswith('<body'):
inside_body = True
continue
elif '</body>' in line:
inside_body = False
continue
elif line.strip().startswith('<style'):
inside_style = True
continue
elif '</style>' in line:
inside_style = False
continue
if inside_style:
continue # skip style lines
elif inside_body:
body_lines.append(line)
elif inside_head:
head_lines.append(line)
slide_syntax[slide_tp]['head_lines'] = ''.join(head_lines)
slide_syntax[slide_tp]['body_lines'] = ''.join(body_lines)
#<title>%(title)s</title>
slides = """\
<!DOCTYPE html>
%(head_lines)s
%(head_header)s
<!-- Styles for table layout of slides -->
<style type="text/css">
td.padding {
padding-top:20px;
padding-bottom:20px;
padding-right:50px;
padding-left:50px;
}
</style>
</head>
%(body_header)s
%(body_lines)s
""" % slide_syntax[slide_tp]
# Avoid too many numbered equations: use \tag for all equations
# with labels (these get numbers) and turn all other numbers off
# by autoNumber: "none"
slides = slides.replace('autoNumber: "AMS"', 'autoNumber: "none"')
for part_no, part in enumerate(parts):
part = ''.join(part)
if '<!-- begin inline comment' in part:
pattern = r'<!-- begin inline comment -->\s*\[<b>.+?</b>:\s*<em>(.+?)</em>]\s*<!-- end inline comment -->'
part = re.sub(pattern,
slide_syntax[slide_tp]['notes'], part,
flags=re.DOTALL)
if '<!-- !bnotes' in part:
pattern = r'<!-- !bnotes .*?-->(.+?)<!-- !enotes.*?-->'
part = re.sub(pattern,
slide_syntax[slide_tp]['notes'], part,
flags=re.DOTALL)
if slide_tp == 'deck':
if '<!-- document title -->' in part:
# h1 title should be h2 to fix problems with
# .csstransforms h1, .csstransforms .vcenter in css files
pattern = r'<center><h1>(.+?)</h1></center>'
part = re.sub(pattern,
r'<h2 style="text-align: center;">\g<1></h2>',
part)
# Date should use b rather than h4 (which is too big)
pattern = '<center><h4>(.+?)</h4></center>'
part = re.sub(pattern, r'<center><b>\g<1></b></center>', part)
# <b> does not work, so we must turn on bold manually
part = part.replace('<b>', '<b style="font-weight: bold">')
if slide_tp in ('deck', 'reveal'):
# Add more space around equations
part = re.sub(r'\$\$([^$]+)\$\$',
#r'<p> <br> <br>\n$$\g<1>$$\n <br>',
r'<p> <br>\n$$\g<1>$$\n<p> <br>',
part)
if slide_tp == 'reveal' and part_no == 0:
# Add space after names and after institutions
part = re.sub(r'<p>\s+<!-- institution\(s\)',
r'<p> <br>\n<!-- institution(s)', part)
part = re.sub(r'<p>\s+<center><h4>(.+?)</h4></center>\s+<!-- date -->',
r'<p> <br>\n<center><h4>\g<1></h4></center> <!-- date -->',
part)
#if '!bpop' not in part:
#if slide_tp in ['reveal']:
part = part.replace('<li>', '<p><li>') # more space between bullets
# else: the <p> destroys proper handling of incremental pop up
# Try this for all and see if any problem appears
part = part.replace('<li ', '<p><li ') # more space between bullets
# Find pygments style
m = re.search(r'typeset with pygments style "(.+?)"', part)
pygm_style = m.group(1) if m else 'plain <pre>'
html_style = slide_syntax[slide_tp]['theme']
recommended_combinations = all_combinations[slide_tp]
if html_style in recommended_combinations:
if pygm_style != 'plain <pre>' and \
not pygm_style in recommended_combinations[html_style]:
print '*** warning: pygments style "%s" is not '\
'recommended for "%s"!' % (pygm_style, html_style)
print 'recommended styles are %s' % \
(', '.join(['"%s"' % combination
for combination in
recommended_combinations[html_style]]))
# Fix styles: native should have black background for dark themes
if slide_syntax[slide_tp]['theme'] in ['neon', 'night', 'moon', 'blood']:
if pygm_style == 'native':
# Change to black background
part = part.replace('background: #202020',
'background: #000000')
# Make h1 section headings centered
part = part.replace('<h1>', '<h1 style="text-align: center;">')
# Pieces to pop up item by item as the user is clicking
if '<!-- !bpop' in part:
pattern = r'<!-- !bpop (.*?)-->(.+?)<!-- !epop.*?-->'
cpattern = re.compile(pattern, re.DOTALL)
#import pprint;pprint.pprint(cpattern.findall(part))
def subst(m): # m is match object
arg = m.group(1).strip()
if arg:
arg = ' ' + arg
inserted_pop_up = False
class_tp = slide_syntax[slide_tp]['pop'][0]
placements = slide_syntax[slide_tp]['pop'][1:]
body = m.group(2)
# Insert special pop-up tags for lists, admons, and
# pygments code blocks first.
# If none of these are found (inserted_pop_up = False)
# mark the whole paragraph as pop-up element
if '<ol>' in body or '<ul>' in body:
for tag in placements:
tag = '<%s>' % tag.lower()
if tag in body:
body = body.replace(tag, '%s class="%s%s">' % (tag[:-1], class_tp, arg))
inserted_pop_up = True
if '<div class="alert' in body:
# Augment admonitions with pop-up syntax
body = body.replace('div class="alert',
'div class="%s alert' % class_tp)
inserted_pop_up = True
if '<div class="highlight' in body:
# Augment pygments blocks with pop-up syntax
body = body.replace('<div class="highlight',
'<div class="%s" class="highlight' % class_tp)
inserted_pop_up = True
if not inserted_pop_up:
# Treat whole block as pop-up paragraph
# Hack to preserve spacings before equation (see above),
# when <p> is removed (as we must do below)
body = body.replace('<p> <br>', ' <br> <br>')
body = body.replace('<p>', '') # can make strange behavior
# Add a <p class="fragments"> to the whole body
# (but only if not code or admon content?)
body2 = '\n<p class="%s">\n' % class_tp
# Add arguments specified after !bpop?
if slide_tp == 'reveal' and arg: # reveal specific
args = arg.split()
for arg in args:
if arg:
body2 += '\n<span class="%s %s">\n' % (class_tp, arg)
body2 += body
for arg in args:
if arg:
body2 += '\n</span>\n'
else:
body2 += body
body2 += '\n</p>\n'
body = body2
return body
part = cpattern.sub(subst, part)
# Special treatment of the text for some slide tools
if slide_tp == 'deck':
part = re.sub(r'<pre>(.+?)</pre>',
r'<pre><code>\g<1></code></pre>',
part, flags=re.DOTALL)
if slide_tp == 'reveal':
part = re.sub(r'<pre><code>(.+?)</code></pre>',
r'<pre><code data-trim contenteditable>\g<1></code></pre>',
part,
flags=re.DOTALL)
# Add space after list, except in admons (ended by </div>)
part = re.sub(r'</ul>(?!\s*</div>)', r'</ul>\n<p>', part)
part = re.sub(r'</ol>(?!\s*</div>)', r'</ol>\n<p>', part)
slides += """
%s
%s
%s
""" % (slide_syntax[slide_tp]['slide_envir_begin'],
part,
slide_syntax[slide_tp]['slide_envir_end'])
slides += """
%s
</body>
</html>
""" % (slide_syntax[slide_tp]['footer'])
slides = re.sub(r'<!-- !split .*-->\n', '', slides)
eq_no = 1 # counter for equations
# Insert \tag for each \label (\label only in equations in HTML)
labels = re.findall(r'\\label\{(.+?)\}', slides)
for label in labels:
slides = slides.replace(r'\label{%s}' % label,
r'\tag{%s}' % eq_no)
slides = slides.replace(r'\eqref{%s}' % label,
'<a href="#mjx-eqn-%s">(%s)</a>' %
(eq_no, eq_no))
eq_no += 1
if slide_tp == 'reveal':
# Adjust font size in code
slides = slides.replace('<pre style="', '<pre style="font-size: 80%; ')
return slides
def _usage_slides_beamer():
print """Usage: doconce slides_beamer mydoc --beamer_slide_theme=themename --beamer_slide_navigation=off --beamer_block_style=mdbox [--handout]
themename can be
red_plain, blue_plain, red_shadow, blue_shadow, dark, dark_gradient, vintage
--beamer_slide_navigation=on turns on navigation links in the header
and footer. The links are defined by sections (only), i.e., headings
with 7 = in the source file.
--beamer_block_style=X controls how beamer blocks are typeset.
X=native gives the standard beamer blocks.
X=mdbox gives a mdframed box around the content. This is preferable
for simple slide styles.
--handout is used for generating PDF that can be printed as handouts
(usually after using pdfnup to put multiple slides per sheet).
"""
def slides_beamer():
"""
Split latex file into slides and typeset slides using
various tools. Use !split command as slide separator.
"""
if len(sys.argv) <= 1:
_usage_slides_beamer()
sys.exit(0)
filename = sys.argv[1]
if not filename.endswith('.tex'):
filename += '.tex'
if not os.path.isfile(filename):
print 'doconce file in latex format, %s, does not exist - abort' % filename
_abort()
basename = os.path.basename(filename)
filestem = os.path.splitext(basename)[0]
header, parts, footer = get_header_parts_footer(filename, "latex")
parts = tablify(parts, "latex")
filestr = generate_beamer_slides(header, parts, footer,
basename, filename)
if filestr is not None:
f = open(filename, 'w')
f.write(filestr)
f.close()
print 'slides written to', filename
if misc_option('handout', False):
print 'printing for handout:\npdfnup --nup 2x3 --frame true --delta "1cm 1cm" --scale 0.9 --outfile %s.pdf %s.pdf' % (filestem, filestem)
def _usage_slides_markdown():
print """
Usage: doconce slides_markdown mydoc slide_type --slide_theme=dark
slide_type: remark (the only implemented so far)
--slide_theme: light (default) or dark
Output: mydoc.html
"""
def slides_markdown():
"""
Transform markdown file to remark slides.
Must have been generated by doconce format pandoc mydoc.html --github_md
"""
if len(sys.argv) <= 2:
_usage_slides_markdown()
sys.exit(0)
filename = sys.argv[1]
if not filename.endswith('.md'):
filename += '.md'
if not os.path.isfile(filename):
print 'doconce file in html format, %s, does not exist' % filename
_abort()
f = open(filename, 'r')
filestr = f.read()
f.close()
slide_type = sys.argv[2]
if slide_type != 'remark':
print '*** error: only remark slides are allowed, not %s' % slide_type
template = """
<!DOCTYPE html>
<html>
<head>
<title>%(title)s</title>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8"/>
<style type="text/css">
@import url(http://fonts.googleapis.com/css?family=Yanone+Kaffeesatz);
@import url(http://fonts.googleapis.com/css?family=Droid+Serif:400,700,400italic);
@import url(http://fonts.googleapis.com/css?family=Ubuntu+Mono:400,700,400italic);
body { font-family: 'Droid Serif'; }
h1, h2, h3 {
font-family: 'Yanone Kaffeesatz';
font-weight: normal;
}
.remark-code, .remark-inline-code { font-family: 'Ubuntu Mono'; }
%(additional_styling)s
</style>
</head>
<body>
%(mathjax)s
<textarea id="source">
%(class_)s
%(main)s
</textarea>
<script src="http://gnab.github.io/remark/downloads/remark-latest.min.js" type="text/javascript">
</script>
<script type="text/javascript">
var slideshow = remark.create();
</script>
</body>
</html>
"""
theme = misc_option('slide_theme=', default='light')
class_ = 'class: center, middle'
additional_styling = ''
if theme == 'dark':
class_ = 'class: center, middle, inverse'
additional_styling = """
/* Style taken from the official remark demo */
body { font-family: 'Droid Serif'; }
h1, h2, h3 {
font-family: 'Yanone Kaffeesatz';
font-weight: 400;
margin-bottom: 0;
}
.remark-slide-content h1 { font-size: 3em; }
.remark-slide-content h2 { font-size: 2em; }
.remark-slide-content h3 { font-size: 1.6em; }
.footnote { position: absolute; bottom: 3em; }
li p { line-height: 1.25em; }
.red { color: #fa0000; }
.large { font-size: 2em; }
a, a > code { color: rgb(249, 38, 114); text-decoration: none; }
code {
-moz-border-radius: 5px;
-web-border-radius: 5px;
background: #e7e8e2;
border-radius: 5px;
}
.remark-code, .remark-inline-code { font-family: 'Ubuntu Mono'; }
.remark-code-line-highlighted { background-color: #373832; }
.pull-left { float: left; width: 47%; }
.pull-right { float: right; width: 47%; }
.pull-right ~ p { clear: both; }
#slideshow .slide .content code { font-size: 0.8em; }
#slideshow .slide .content pre code { font-size: 0.9em; padding: 15px; }
.inverse { background: #272822; color: #777872; text-shadow: 0 0 20px #333; }
.inverse h1, .inverse h2 {color: #f3f3f3; line-height: 0.8em; }
"""
# MathJax?
mathjax = ''
if '$' in filestr:
# Fix inline math $...$ to \\( ... \\)
filestr = re.sub(r'([^$])\$([^$]+)\$([^$])',
r'\g<1>\\\\( \g<2> \\\\)\g<3>', filestr,
flags=re.DOTALL)
# Remove newlines before and after equations inside $$--$$
def subst(m):
eq = m.group(1).strip()
return '$$\n%s\n$$\n\n' % eq
filestr = re.sub(r'^\$\$\n+(.+?)\$\$\n+', subst,
filestr, flags=re.MULTILINE|re.DOTALL)
# Insert MathJax script and newcommands
from html import mathjax_header
mathjax = mathjax_header(filestr)
# Fixes
filestr = re.sub(r'^## ', '# ', filestr, flags=re.MULTILINE)
filestr = re.sub(r'^### ', '## ', filestr, flags=re.MULTILINE)
# Turn figures to HTML
filestr = re.sub(r'^<!-- (<img.+?>.*) -->\n!\[.+$', r'.center[\g<1>]',
filestr, flags=re.MULTILINE)
#filestr = re.sub(r'^!\[(.*?)\]\((.+?)\)',
# '.center[<img src="\g<2>" width=80%/>]',
# filestr, flags=re.MULTILINE)
# Remove notes
filestr = re.sub(r'^<!-- !bnotes.+?^<!-- !enotes -->', '',
filestr, flags=re.MULTILINE|re.DOTALL)
lines = filestr.splitlines()
# Find title, author and date
title = ''
percentage_counter = 0
for i in range(len(lines)):
if lines[i].startswith('%'):
percentage_counter += 1
if percentage_counter == 1:
# Title
lines[i] = lines[i].replace('% ', '# ')
title = lines[i][1:].lstrip()
elif percentage_counter == 2:
# Authors
lines[i] = '\n\n###' + '\n\n###'.join(lines[i][1:].lstrip().split(';'))
elif percentage_counter == 3:
# Date
lines[i] = lines[i].replace('% ', '\n\n### ')
break
filestr = '\n'.join(lines)
# Drop pop ups and other constructions
filestr = re.sub(r'^<!-- ![be]pop -->\s+', '', filestr,
flags=re.MULTILINE)
filestr = re.sub(r'^<!-- ![be]slidecell.*\s*', '', filestr,
flags=re.MULTILINE)
if '<!-- !bslidecell' in filestr:
print '*** warning: !bslidecell-!eslidecell does not work with remark slides'
print ' (all cells will be placed in their own row...)'
if theme == 'dark':
filestr = filestr.replace('<!-- !split -->', '---\nclass: inverse\n')
else:
filestr = filestr.replace('<!-- !split -->', '---\n')
main = filestr
template = template % vars()
filename = filename.replace('.md', '.html')
f = open(filename, 'w')
f.write(template)
f.close()
print '%s slides in %s' % (slide_type, filename)
def generate_beamer_slides(header, parts, footer, basename, filename):
# Styles: red/blue_plain/shadow, dark, dark_gradient, vintage
header = ''.join(header)
theme = misc_option('beamer_slide_theme=', default='default')
if theme != 'default':
beamerstyle = 'beamertheme' + theme
packages = [beamerstyle]
if theme == 'vintage':
packages.append('vintage_background.png')
copy_latex_packages(packages)
handout = '[handout]' if misc_option('handout', False) else ''
if misc_option('beamer_slide_navigation=', 'off') == 'on':
frame_options = '[fragile]'
else:
# plain signifies no navigation
frame_options = '[plain,fragile]'
block_style = misc_option('beamer_block_style=', 'native')
parskip = 0 if theme.endswith('_plain') else 7
slides = r"""
%% LaTeX Beamer file automatically generated from DocOnce
%% https://github.com/hplgit/doconce
%%-------------------- begin beamer-specific preamble ----------------------
\documentclass%(handout)s{beamer}
\usetheme{%(theme)s}
\usecolortheme{default}
%% turn off the almost invisible, yet disturbing, navigation symbols:
\setbeamertemplate{navigation symbols}{}
%% Examples on customization:
%%\usecolortheme[named=RawSienna]{structure}
%%\usetheme[height=7mm]{Rochester}
%%\setbeamerfont{frametitle}{family=\rmfamily,shape=\itshape}
%%\setbeamertemplate{items}[ball]
%%\setbeamertemplate{blocks}[rounded][shadow=true]
%%\useoutertheme{infolines}
%%
%%\usefonttheme{}
%%\useinntertheme{}
%%
%%\setbeameroption{show notes}
%%\setbeameroption{show notes on second screen=right}
%% fine for B/W printing:
%%\usecolortheme{seahorse}
\usepackage{pgf,pgfarrows,pgfnodes,pgfautomata,pgfheaps,pgfshade}
\usepackage{graphicx}
\usepackage{epsfig}
\usepackage{relsize}
\usepackage{fancybox} %% make sure fancybox is loaded before fancyvrb
\usepackage{fancyvrb}
%%\usepackage{minted} %% requires pygments and latex -shell-escape filename
%%\usepackage{anslistings}
%%\usepackage{listingsutf8}
\usepackage{amsmath,amssymb,bm}
%%\usepackage[latin1]{inputenc}
\usepackage[T1]{fontenc}
\usepackage[utf8]{inputenc}
\usepackage{colortbl}
\usepackage[english]{babel}
\usepackage{tikz}
\usepackage{framed}
%% Use some nice templates
\beamertemplatetransparentcovereddynamic
%% --- begin table of contents based on sections ---
%% Delete this, if you do not want the table of contents to pop up at
%% the beginning of each section:
%% (Only section headings can enter the table of contents in Beamer
%% slides generated from DocOnce source, while subsections are used
%% for the title in ordinary slides.)
\AtBeginSection[]
{
\begin{frame}<beamer>[plain]
\frametitle{}
%%\frametitle{Outline}
\tableofcontents[currentsection]
\end{frame}
}
%% --- end table of contents based on sections ---
%% If you wish to uncover everything in a step-wise fashion, uncomment
%% the following command:
%%\beamerdefaultoverlayspecification{<+->}
\newcommand{\shortinlinecomment}[3]{\note{\textbf{#1}: #2}}
\newcommand{\longinlinecomment}[3]{\shortinlinecomment{#1}{#2}{#3}}
\definecolor{linkcolor}{rgb}{0,0,0.4}
\hypersetup{
colorlinks=true,
linkcolor=linkcolor,
urlcolor=linkcolor,
pdfmenubar=true,
pdftoolbar=true,
bookmarksdepth=3
}
\setlength{\parskip}{%(parskip)spt} %% {1em}
\newenvironment{doconceexercise}{}{}
\newcounter{doconceexercisecounter}
\newenvironment{doconce:movie}{}{}
\newcounter{doconce:movie:counter}
\newcommand{\subex}[1]{\noindent\textbf{#1}} %% for subexercises: a), b), etc
""" % vars()
# Check if we need minted or anslistings:
if re.search('\\usepackage.+minted', header):
slides = slides.replace(
r'%\usepackage{minted}', r'\usepackage{minted}')
if re.search('\\usepackage.+listings', header):
m = re.search(r'^% Define colors.+?^% end of custom lstdefinestyles', header, flags=re.DOTALL|re.MULTILINE)
lststyles = m.group() if m else ''
slides = slides.replace(
r'%\usepackage{listingsutf8}', r'\usepackage{listingsutf8}' + '\n\n' + lststyles)
if re.search('\\usepackage.+anslistings', header):
slides = slides.replace(
r'%\usepackage{anslistings}', r'\usepackage{anslistings}')
if block_style.startswith('mdbox'):
# Add defnition of an appropriate mdframe
slides += r"""
\usepackage[framemethod=TikZ]{mdframed}
\newcommand{\frametitlecolor}{gray!65!black}
%\usetikzlibrary{shadows}
%\usetikzlibrary{shadows.blur}
% block with title
\newenvironment{mdboxt}[1]{%
\begin{mdframed}[%
frametitle={#1\vphantom{\frametitlecolor}},
skipabove=0.5\baselineskip,
skipbelow=0.5\baselineskip,
outerlinewidth=0.5pt,
frametitlerule=true,
frametitlebackgroundcolor=gray!15,
frametitlefont=\normalfont,
frametitleaboveskip=3pt,
frametitlebelowskip=2pt,
frametitlerulewidth=0.5pt,
roundcorner=2pt,
%shadow=true,
%shadowcolor=green!10!black!40,
%shadowsize=5pt
%apptotikzsetting={\tikzset{mdfshadow/.style=blur shadow={shadow blur steps=5,shadow blur extra rounding=1.3pt,xshift=3pt,yshift=-3pt}}}
]%
}{\end{mdframed}}
% block without title
\newenvironment{mdbox}{%
\begin{mdframed}[%
roundcorner=2pt,
%shadow=true,
%shadowcolor=green!10!black!40,
%shadowsize=5pt
%apptotikzsetting={\tikzset{mdfshadow/.style=blur shadow={shadow blur steps=5,shadow blur extra rounding=1.3pt,xshift=3pt,yshift=-3pt}}}
]%
}{\end{mdframed}}
"""
slides += r"""
%-------------------- end beamer-specific preamble ----------------------
% Add user's preamble
"""
# Add possible user customization from the original latex file,
# plus the newcommands and \begin{document}
preamble_divider_line = '% --- end of standard preamble for documents ---'
if preamble_divider_line not in header:
print '*** error: generated latex document has missing'
print ' title, author, and date - add TITLE:, AUTHOR:, DATE:'
_abort()
slides += header.split(preamble_divider_line)[1]
for part in parts:
part = ''.join(part)
code_free_part = remove_verbatim_blocks(part, 'latex')
if 'inlinecomment{' in part:
# Inline comments are typeset as notes in this beamer preamble
pass
if '% !bnotes' in part:
pattern = r'% !bnotes(.+?)% !enotes\s'
part = re.sub(pattern,
r'\\note{\g<1>}', part,
flags=re.DOTALL)
# Keep blocks or make mdbox
if block_style.startswith('mdbox'):
def subst(m):
title = m.group(1).strip()
text = m.group(2)
b = 'mdboxt' if title else 'mdbox'
s = r'\begin{%s}{%s}%s\end{%s}' % (b, title, text, b)
return s
pattern = r'\\begin\{block\}\{(.*?)\}(.+)?\\end\{block\}'
part = re.sub(pattern, subst, part, flags=re.DOTALL)
# remove margins becomes boxes are not that big
part = part.replace('leftmargin=7mm', 'leftmargin=0mm')
# Use smaller margin in slides
part = part.replace('leftmargin=7mm', 'leftmargin=2mm')
# Pieces to pop up item by item as the user is clicking
if '% !bpop' in part:
num_pops = part.count('% !bpop')
pattern = r'% !bpop *(.*?)\s+(.+?)\s+% !epop'
cpattern = re.compile(pattern, re.DOTALL)
#import pprint;pprint.pprint(cpattern.findall(part))
def subst(m): # m is match object
arg = m.group(1).strip()
body = m.group(2)
startswith_block = startswith_list = has_list = False
pattern_block = r'(\\begin\{block|\\begin\{mdbox|\\summarybox\{|\\begin\{[A-Za-z0-9_]+admon\})'
pattern_list = r'\\begin\{(enumerate|itemize|description)\}'
if re.match(pattern_block, body.lstrip()):
startswith_block = True
if re.match(pattern_list, body.lstrip()):
startswith_list = True
if r'\item' in body:
has_list = True
if startswith_block:
# Pop up the whole block at once
body = '\\pause\n' + body
elif startswith_list:
# Pop up each list item
body = re.sub(r'^( *\\item)', r'\pause\n\g<1>', body,
flags=re.MULTILINE)
else:
# Just pause before what's coming
body = '\\pause\n' + body
'''
# OLD:
# Individual pop up of list items if there is only
# one pop block on this slide, otherwise pause the
# whole list (in else branch)
if r'\item' in body: # and num_pops == 1:
marker = '[[[[['
body = body.replace('\item ', r'\item%s ' % marker)
n = body.count('item%s' % marker)
for i in range(n):
body = body.replace('item%s' % marker,
'item<%d->' % (i+2), 1)
else:
# treat whole part as a block
pattern = r'(\\begin\{block|\\begin\{mdbox|\\summarybox\{|\\begin\{[A-Za-z0-9_]+admon\})'
m = re.match(pattern, body.lstrip())
if m:
# body has a construction that is already a block
body = r"""
\pause
%s
""" % body
else:
# wrap body in a block (does not work well if
# bpop-epop is already within another block
body = r"""
\pause
\begin{block}{}
%s
\end{block}
""" % body
'''
return body
part = cpattern.sub(subst, part)
# Check that we do not have multiple subsections (i.e., multiple
# slides) on this split - if so, it is a forgotten !split
subsections = re.findall(r'\\subsection\{', code_free_part)
if len(subsections) > 1:
print '*** error: more than one subsection in a slide (insert missing !split):'
print part
_abort()
# Add text for this slide
# Grab slide title as *first* subsection in part
pattern = r'subsection\{(.+)\}' # greedy so it goes to the end
m = re.search(pattern, code_free_part)
if m:
title = m.group(1).strip()
title = r'\frametitle{%s}' % title + '\n'
part = re.sub('\\\\.*' + pattern, '', part, count=1)
elif r'\title{' in code_free_part:
title = ''
else:
title = '% No title on this slide\n'
# Beamer does not support chapter, paragraph,
# while section is here used for the toc and subsection
# for the title
if r'\chapter{' in code_free_part:
part = part.replace(r'\chapter{', r'\noindent\textbf{\huge ')
if r'\paragraph{' in code_free_part:
part = part.replace(r'\paragraph{', r'\noindent\textbf{')
section_slide = False
# \section{} should be \section[short title]{long title}
# This is signified in a comment
if r'\section{' in code_free_part:
section_slide = True
# Empty section is fine, but if there are more here than
# labels and comments, we embed that in a separate slide
# with the same title (otherwise it will come out as garbage)
section_title = re.search(r'\\section\{(.+)\}', part).group(1)
remove_patterns = [r'\\section\{.+\}', r'\\label\{.+\}', '^%.*',]
stripped_part = part
for pattern in remove_patterns:
stripped_part = re.sub(pattern, '', stripped_part,
flags=re.MULTILINE)
stripped_part = stripped_part.strip()
if stripped_part:
# Embed everything after section in a new slide
part = re.sub(r'(\\section\{.+\})', r"""\g<1>
\\begin{frame}%(frame_options)s
\\frametitle{%(section_title)s}
""" % vars(), part)
part += '\n\\end{frame}\n'
# Add short title? (\section[short title]{ordinary title})
short_title = '' # signifies ordinary slide
m = re.search(r'^% +short +title: +(.+)', code_free_part,
flags=re.MULTILINE|re.IGNORECASE)
if m:
short_title = m.group(1).strip()
part = re.sub(r'\\section\{', r'\\section[%s]{' % short_title,
part)
part = part.replace(m.group(), '') # remove short title comment
# --- end of section slide treatment ---
part = part.rstrip()
# Check if slide is empty
empty_slide = True
for line in part.splitlines():
if line.startswith('%'):
continue
if line.strip() != '':
empty_slide = False
if r'\title{' in code_free_part:
# Titlepage needs special treatment
# Find figure (no caption or figure envir, just includegraphics)
m = re.search(r'(\\centerline\{\\includegraphics.+\}\})', part)
if m:
titlepage_figure = m.group(1)
# Move titlepage figure to \date{}
part = part.replace('% <optional titlepage figure>', r'\\ \ \\ ' + '\n' + titlepage_figure)
# Remove original titlepage figure
part = re.sub(r'% inline figure\n\\centerline.+', '', part)
slides += r"""
%(part)s
\begin{frame}%(frame_options)s
\titlepage
\end{frame}
""" % vars()
elif section_slide:
# Special section slide, not frame environment
slides += r"""
%(part)s
""" % vars()
elif not empty_slide:
# Ordinary slide
slides += r"""
\begin{frame}%(frame_options)s
%(title)s
%(part)s
\end{frame}
""" % vars()
slides += """
\end{document}
"""
slides = re.sub(r'% !split\s+', '', slides)
return slides
def _usage_split_rst0():
print 'Usage: doconce split_rst complete_file.rst'
def split_rst0():
"""
Split a large .rst file into smaller files corresponding
to each main section (7= in headings).
The large complete doconce file typically looks like this::
#>>>>>>> part: header >>>>>
# #include "header.do.txt"
#>>>>>>> part: fundamentals >>>>>
# #include "fundamentals.do.txt"
#>>>>>>> part: nonlinear >>>>>
# #include "nonlinear.do.txt"
#>>>>>>> part: timedep >>>>>
# #include "timedep.do.txt"
Note that the comment lines ``#>>>...`` *must* appear right above
the include directives. The includes are replaced by text, while
the ``#>>>...`` are still left as markers in the complete document
for the various sections. These markers are used to split the
text into parts. For Sphinx to treat section headings right,
each part should start with a main section (7=).
The ``split_rst`` command will in this example take the complete
``.rst`` file and make files ``header.rst``, ``fundamentals.rst``,
``nonlinear.rst``, etc. The ``doconce sphinx_dir`` command takes
all these ``.rst`` files as arguments and creates the
corresponding index file etc. The names of the various ``.rst``
files are specified in the ``#>>>... Part: ...`` markers. Normally,
a part name corresponding to the included filename is used.
CAVEAT: Nested includes in doconce files and doconce files in subdirs.
SOLUTION: Use #>>> Part: mypart >>> for an include mypart/mypart.do.txt.
All parts are then split into files in the top directory.
fig dirs must be copied, but that can be easily done automatically
if the fig dir name is of the right form.
"""
if len(sys.argv) <= 1:
_usage_split_rst0()
sys.exit(0)
complete_file = sys.argv[1]
f = open(complete_file, 'r')
filestr = f.read()
f.close()
# Determine parts
part_pattern = r'\.\.\s*>>+\s*[Pp]art:\s*%s\s*>>+'
parts = re.findall(part_pattern % '([^ ]+?)', filestr)
# Split file
for i in range(len(parts)):
if i < len(parts)-1: # not the last part?
this_part = part_pattern % parts[i]
next_part = part_pattern % parts[i+1]
else:
this_part = part_pattern % parts[i]
next_part = '$' # end of string
pattern = '%s(.+?)%s' % (this_part, next_part)
cpattern = re.compile(pattern, re.DOTALL)
m = cpattern.search(filestr)
text = m.group(1)
filename = parts[i] + '.rst'
f = open(filename, 'w')
f.write(text)
f.close()
#print 'Extracted part', parts[i], 'in', filename
print ' '.join(parts)
def _usage_split_rst():
print 'Usage: doconce split_rst mydoc'
print """Example:
doconce sphinx_dir author="Kaare Dump" title="Short title" dirname=mydir mydoc
doconce format sphinx mydoc
doconce split_rst mydoc
python automake_sphinx.py
"""
def split_rst():
"""
Split rst file into parts. Use !split command as separator between
parts.
"""
if len(sys.argv) <= 1:
_usage_split_rst()
sys.exit(0)
filename = sys.argv[1]
if not filename.endswith('.rst'):
basename = filename
filename += '.rst'
else:
basename = filename[:-4]
header, parts, footer = get_header_parts_footer(filename, "rst")
import pprint
files = doconce_rst_split(parts, basename, filename)
#print ' '.join([name[:-4] for name in files])
print basename, 'split into'
print ' '.join(files)
def doconce_rst_split(parts, basename, filename):
"""Native doconce style splitting of rst file into parts."""
# Write the parts to file and fix references to equations.
label_pattern = r'.. math::\s+:label: (.+?)$'
parts_label = [re.findall(label_pattern, ''.join(part), flags=re.MULTILINE)
for part in parts]
parts_label2part = {} # map an eq. label to where it is defined
for i in range(len(parts_label)):
for label in parts_label[i]: # assume all labels are unique
parts_label2part[label] = i
label2tag = {}
for pn, part_label in enumerate(parts_label):
local_eq_no = 1
for label in part_label:
label2tag[label] = '%d.%d' % (pn+1, local_eq_no)
local_eq_no += 1
# The definition of |nbsp| must be repeated in each part, except the first.
# The definition is inserted in the beginning of the document, i.e.,
# in parts[0].
nbsp = '.. |nbsp| unicode:: 0xA0' in ''.join(parts[0])
generated_files = []
for pn, part in enumerate(parts):
text = ''.join(part)
# Check if headings are consistent: the first heading must be
# the highest one
m = re.search(r'^(%%+|==+|--+|~~+)$', text, flags=re.MULTILINE)
if m:
first_heading = m.group(1)
if first_heading.startswith('='):
if re.search(r'^(%%+)$', text, flags=re.MULTILINE):
print """
*** error: first heading in part %d is a section, but the part
also contains a chapter.
!split must be moved to avoid such inconsistent reST headings""" % pn
_abort()
elif first_heading.startswith('-'):
if re.search(r'^(%%+|==+)$', text, flags=re.MULTILINE):
print """
*** error: first heading in part %d is a subsection, but the part
also contains a chapter or section.
!split must be moved to avoid such inconsistent reST headings""" % pn
_abort()
elif first_heading.startswith('~'):
if re.search(r'^(%%+|==+|--+)$', text, flags=re.MULTILINE):
print """
*** error: first heading in part %d is a subsubsection, but the part
also contains a chapter, section, or subsection.
!split must be moved to avoid such inconsistent reST headings""" % pn
_abort()
part_filename = _part_filename % (basename, pn) + '.rst'
generated_files.append(part_filename)
if nbsp and pn > 0 and '|nbsp|' in text:
text = """
.. |nbsp| unicode:: 0xA0
:trim:
""" + text
for label in parts_label[pn]:
# All math labels get an anchor above for equation refs
# from other parts. The anchor is Eq:label
text = re.sub(r'.. math::\s+:label: %s$' % label,
r".. _Eq:%s:\n\n.. math::\n :label: %s" %
(label, label), text, flags=re.MULTILINE)
local_eqrefs = re.findall(r':eq:`(.+?)`', text)
for label in local_eqrefs:
# (Ignore non-existent labels - sphinx.py removes labels
# in non-align math environments anyway)
if parts_label2part.get(label, None) == pn:
# References to local labels in this part apply the
# standard syntax
pass
else:
text = text.replace(
r':eq:`%s`' % label,
':ref:`(%s) <Eq:%s>`' %
(label2tag.get(label, 'label:removed'), label))
f = open(part_filename, 'w')
f.write(text)
f.close()
return generated_files
def _usage_list_labels():
print 'Usage: doconce list_labels doconcefile.do.txt'
def list_labels():
"""
List all labels used in a doconce or latex file.
Since labels often are logically connected to headings in
a document, the headings are printed in between in the
output from this function, with a comment sign # in
front so that such lines can easily be skipped when
processing the output.
The purpose of the function is to enable clean-up of labels
in a document. For example, one can add to the output a
second column of improved labels and then make replacements.
"""
if len(sys.argv) <= 1:
_usage_list_labels()
sys.exit(0)
filenames = sys.argv[1:]
for filename in filenames:
# Seach in doconce or latex file
dofile = True if filename.endswith('.do.txt') else False
lines = open(filename, 'r').readlines()
labels = [] # not yet used, but nice to collect all labels
for line in lines:
# Identify heading and print out
heading = ''
if dofile:
m = re.search(r'={5,9}\s*(.+?)\s*={5,9}', line)
if m:
heading = m.group(1).strip()
else:
m = re.search(r'section\*?\{(.+)\}', line) # make .+ greedy
if m:
heading = m.group(1).strip()
if heading:
print '# section:', heading
# Identify label
if 'label{' in line:
m = re.search(r'label\{(.+?)\}', line)
if m:
label = m.group(1).strip()
else:
print 'Syntax error in line'
print line
_abort()
print label
labels.append(label)
def _usage_teamod():
print 'Usage: doconce teamod name'
def teamod():
if len(sys.argv) < 2:
_usage_teamod()
sys.exit(0)
name = sys.argv[1]
if os.path.isdir(name):
os.rename(name, name + '.old~~')
print 'directory %s exists, renamed to %s.old~~' % (name, name)
os.mkdir(name)
os.chdir(name)
os.mkdir('fig-%s' % name)
os.mkdir('src-%s' % name)
os.mkdir('slides-%s' % name)
f = open('main_%s.do.txt' % name, 'w')
f.write("""# Main file for teaching module "%s"
TITLE: Here Goes The Title ...
AUTHOR: name1 email:..@.. at institution1, institution2, ...
AUTHOR: name2 at institution3
DATE: today
# #include "%s.do.txt"
""" % name)
f.close()
f = open('%s.do.txt' % name, 'w')
f.write("""# Teaching module: %s
======= Section =======
===== Subsection =====
idx{example}
label{mysubsec}
__Paragraph.__ Running text...
Some mathematics:
!bt
\begin{align}
a &= b, label{eq1}\\
a &= b, label{eq2}
\end{align}
!et
or
!bt
\[ a = b, \quad c=d \]
!et
Some code:
!bc pycod
def f(x):
return x + 1
!ec
A list with
* item1
* item2
* subitem2
* item3
continued on a second line
""")
f.close()
def _usage_assemble():
print 'Usage: doconce assemble master.do.txt'
def assemble():
# See 2DO and teamod.do.txt
# Assume some master.do.txt including other .do.txt recursively.
# search for all @@@CODE, FIGURE, MOVIE and archive in list/dict.
# search for all #include ".+\.do\.txt", call recursively
# for each of these with dirname and dotxtname as arguments.
# Build local links to all src- and figs- directories, make
# sure all teamod names are unique too.
# analyzer: old comments on how to implement this. Try the
# description above first.
if len(sys.argv) < 2:
_usage_assemble()
sys.exit(0)
master = sys.argv[2]
# Run analyzer...
def _usage_analyzer():
print 'Usage: doconce analyzer complete_file.do.txt'
def analyzer():
"""
For a doconce file, possibly composed of many other doconce
files, in a nested fashion, this function returns a tree
data structure with the various parts, included files,
involved source code, figures, movies, etc.
Method:
Go through all #include's in a doconce file, find subdirectories
used in @@@CODE, FIGURE, and MOVIE commands, and make links
in the present directory to these subdirectories such that
@@@CODE, FIGURE, and MOVIE works from the present directory.
This is very important functionality when a doconce document
is made up of many distributed documents, in different
directories, included in a (big) document.
Make recursive calls.
"""
# 2DO:
# - start with an example (some Cython intro examples? in a tree?)
# - make doconce nested_include
# which makes a tree of all the dirs that are involved in a
# complete document
# - simply copy all subnits and the complete doc to a new _build dir
# - simply copy all figs-*, movies-*, src-* to _build
# - compile
# IDEA: Have a convention of src-poisson, figs-poisson etc
# naming and use a simple script here to link from one dir to
# all src-* and figs-* movies-* found in a series of dir trees. YES!!
# Maybe use code below to issue warnings if FIGURE etc applies other
# directories (could extend with eps-*, ps-*, pdf-*, png-*, jpeg-*,
# gif-*, flv-*, avi-*, ...) and/or do this also in std doconce
# translation (no, simple stand-alone thing must be fine with
# figs/, it's the big distributed projects that need this
# naming convention). YES! Should be figs-basename(os.getcwd())
# Can play with fenics tut: put each section in sep dirs,
# stationary/poisson, transient/diffusion etc.,
# with local src and figs
# Need a script that can pack all local src dirs into a separate tree
# for distribution (doconce pack_src): create new tree, walk a set
# of trees, for each subdir with name src-*, strip off src-, copy
# subdir to right level in new tree
# Support for latex files too (includegraphics, movie15, psfig,
# input, include), starting point is a .tex file with includes/inputs
if len(sys.argv) <= 1:
_usage_analyzer()
sys.exit(0)
# Must have this in a function since we need to do this recursively
filename = sys.argv[1]
alltext = open(filename, 'r').read()
# preprocess parts and includes
part_pattern = r'\.\.\s*>>+\s*[Pp]art:\s*%s\s*>>+'
parts = re.findall(part_pattern % '([^ ]+?)', alltext)
include_files = re.findall(r"""[#%]\s+\#include\s*["']([A-Za-z0-9_-., ~]+?)["']""", alltext)
include_files = [filename for dummy, filename in include_files]
figure = re.compile(r'^FIGURE:\s*\[(?P<filename>[^,\]]+),?(?P<options>[^\]]*)\]\s*?(?P<caption>.*)$', re.MULTILINE)
movie = re.compile(r'^MOVIE:\s*\[(?P<filename>[^,\]]+),?(?P<options>[^\]]*)\]\s*?(?P<caption>.*)$', re.MULTILINE)
code = re.compile(r'^\s*@@@CODE\s+([^ ]+?) ')
for filename in include_files:
f = open(filename, 'r')
directory = os.path.dirname(f)
fstr = f.read()
f.close()
# What about figs/myfig/1stver/t.png? Just link to figs...
# but it's perhaps ok with links at different levels too?
figure_files = [filename for filename, options, captions in \
figure.findall(fstr)]
movie_files = [filename for filename, options, captions in \
movie.findall(fstr)]
code_files = code.findall(fstr)
print figure_files
figure_dirs = [os.path.dirname(f) for f in figure_files] # no dir??
print figure_dirs
dirs = [os.path.join(directory, figure_dir) \
for figure_dir in figure_dirs]
def old2new_format():
if len(sys.argv) == 1:
print 'Usage: %s file1.do.txt file2.do.txt ...' % sys.argv[0]
sys.exit(1)
for filename in sys.argv[1:]:
print 'Converting', filename
_old2new(filename)
def _old2new(filename):
"""
Read file with name filename and make substitutions of
___headings___ to === headings ===, etc.
A backup of the old file is made (filename + '.old').
"""
f = open(filename, 'r')
lines = f.readlines()
f.close()
os.rename(filename, filename + '.old')
# perform substitutions:
nchanges = 0
for i in range(len(lines)):
oldline = lines[i]
# change from ___headings___ to === headings ===:
lines[i] = re.sub(r'(^\s*)_{7}\s*(?P<title>[^ ].*?)\s*_+\s*$',
r'\g<1>======= \g<title> =======' + '\n', lines[i])
lines[i] = re.sub(r'(^\s*)_{5}\s*(?P<title>[^ ].*?)\s*_+\s*$',
r'\g<1>===== \g<title> =====' + '\n', lines[i])
lines[i] = re.sub(r'(^\s*)_{3}\s*(?P<title>[^ ].*?)\s*_+\s*$',
r'\g<1>=== \g<title> ===' + '\n', lines[i])
if lines[i].startswith('AUTHOR:'):
# swith to "name at institution":
if not ' at ' in lines[i]:
print 'Warning, file "%s": AUTHOR line needs "name at institution" syntax' % filename
if oldline != lines[i]:
nchanges += 1
print 'Changing\n ', oldline, 'to\n ', lines[i]
print 'Performed %d changes in "%s"' % (nchanges, filename)
f = open(filename, 'w')
f.writelines(lines)
f.close()
def latex_header():
from doconce.doconce import INTRO
print INTRO['latex']
def latex_footer():
from doconce.doconce import OUTRO
print OUTRO['latex']
# -------------------- functions for spell checking ---------------------
_environments = [
# DocOnce: use regex instead, it is safer (!bc at beginning of line etc.)
# Mako: use regex
# hpl tex stuff
("\\beq", "\\eeq"),
("\\beqa", "\\eeqa"),
("\\beqan", "\\eeqan"),
# Wait until the end with removing comment lines
]
# These are relevant if doconce spellcheck is applied to latex or ptex2tex files
_latex_environments = [
("\\begin{equation}", "\\end{equation}"),
("\\begin{equation*}", "\\end{equation*}"),
("\\begin{align}", "\\end{align}"),
("\\begin{align*}", "\\end{align*}"),
("\\begin{eqnarray}", "\\end{eqnarray}"),
("\\begin{eqnarray*}", "\\end{eqnarray*}"),
("\\begin{figure}[", "]"),
("\\begin{figure*}[", "]"),
("\\begin{multline}", "\\end{multiline}"),
("\\begin{tabbing}", "\\end{tabbing}"),
# ptex2tex environments
("\\bccq", "\\eccq"),
("\\bcc", "\\ecc"),
("\\bcod", "\\ecod"),
("\\bpycod", "\\epycod"),
("\\bpro", "\\epro"),
("\\bpypro", "\\epypro"),
("\\brpy", "\\erpy"),
("\\bipy", "\\eipy"),
("\\bfcod", "\\efcod"),
("\\bfpro", "\\efpro"),
("\\bccod", "\\epcod"),
("\\bcpro", "\\epcro"),
("\\bcppcod", "\\ecppcod"),
("\\bcpppro", "\\ecpppro"),
("\\bhtmlcod", "\\ehtmlcod"),
("\\bhtmlpro", "\\ehtmlpro"),
("\\brcod", "\\ercod"),
("\\brpro", "\\eprro"),
("\\bjscod", "\\ejscod"),
("\\bjspro", "\\ejspro"),
("\\blatexcod", "\\elatexcod"),
("\\blatexpro", "\\elatexpro"),
("\\bshcod", "\\eshcod"),
("\\bshpro", "\\eshpro"),
("\\bsys", "\\esys"),
("\\bdat", "\\edat"),
("\\bsni", "\\esni"),
("\\bdsni", "\\edsni"),
("\\bpyshell", "\\epyshell"),
("\\bpy", "\\epy"),
]
_replacements = [
# General
(r'cf.', ''),
# DocOnce
(r'^<%.+^%>', '', re.MULTILINE|re.DOTALL), # Mako Python code
(r'^<%doc.+^</%doc>', '', re.MULTILINE|re.DOTALL), # Mako comments
(r'"([^"]*?)":\s*"[^"]+?"', r'\g<1>'), # links ("`file.py`" -> "": "...")
(r"^#.*$", "", re.MULTILINE),
(r"(idx|label|ref|refaux|cite)\{.*?\}", ""),
(r"cite\[.+?\]\{.+?\}", ""),
(r"refch\[.*?\]\[.*?\]\[.*?\]", "", re.DOTALL),
(r"^(file|solution)=.+$", '', re.MULTILINE), # file= in exercises
(r'^ *\|[\-rlc]+?\|', '', re.MULTILINE), # table line
(r' +\| +', ' '), # table line
('<linebreak>', ''),
(r"={3,}", ""), # section
(r"^__(.+?)__", r"\g<1>\n", re.MULTILINE), # paragraph
(r"\[\^.+?\]", ""), # footnote
(r'`[^ ][^`]*?`', ""),
(r"`[A-Za-z0-9_.]+?`", ""),
(r"^#.*$", "", re.MULTILINE),
(r'"https?://.*?"', ""),
(r'"ftp://.*?"', ""),
(r"\b[A-Za-z_0-9/.:]+\.(com|org|net|edu|)\b", ""), # net name
(r'\[[A-Za-z]+:\s+[^\]]*?\]', ''), # inline comment
(r'''^\s*files? *= *[${}()"'A-Za-z_0-9.,*= ]+\s*$''', '', re.MULTILINE),
(r'^\s*(kw|keywords) *= *([A-Za-z0-9\-._;, ]+)', '', re.MULTILINE),
(r"^@@@CODE.*$", "", re.MULTILINE),
(r"^@@@OSCMD.*$", "", re.MULTILINE),
(r"^\s*(FIGURE|MOVIE):\s*\[.+?\]", "", re.MULTILINE),
(r"^\s*BIBFILE:.+$", "", re.MULTILINE),
(r"^\s*TOC:\s+(on|off)", "", re.MULTILINE),
(r"\$[^{].*?\$", "", re.DOTALL), # inline math
(r"\$\{[A-Za-z_].+?\}", "", re.DOTALL), # mako substitutions (note that ${\cal O}..$ math is not allowed)
('!split', ''),
(r'![be]slidecell', ''),
(r'![be]ans', ''),
(r'![be]sol', ''),
(r'![be]subex', ''),
(r'![be]hint', ''),
(r'![be]notes', ''),
(r'![be]pop', ''),
(r'![be]warning', ''),
(r'![be]summary', ''),
(r'![be]question', ''),
(r'![be]notice', ''),
(r'![be]quote', ''),
(r'![be]box', ''),
(r'![be]block', ''),
(r'![be]remarks', ''),
(r'![be]quiz', ''),
(r'![be]u-[^ ]+', ''), # user-def envirs
(r'^Cw: *', '', re.MULTILINE),
(r'^Cr: *', '', re.MULTILINE),
(r'^E: *', '', re.MULTILINE),
(r'^Q: *', '', re.MULTILINE),
(r'^K: *', '', re.MULTILINE),
(r'^L: *', '', re.MULTILINE),
# Preprocess
(r"^#.*ifn?def.*$", "", re.MULTILINE),
(r"^#.*else.*$", "", re.MULTILINE),
(r"^#.*endif.*$", "", re.MULTILINE),
(r"^#include.*$", "", re.MULTILINE),
# Mako
(r'^<%.+?^%>', '', re.MULTILINE|re.DOTALL),
(r"^% .*$", "", re.MULTILINE),
(r"^<%.*$", "", re.MULTILINE),
]
_latex_replacements = [
(r"%.*$", "", re.MULTILINE), # comments
(r"\\.*section\{(.+)\}", "\g<1>"),
(r"^\\\[[^@]+\\\]", ""), # (@ is "unlikely" character)
(r"\\includegraphics.*?(\.pdf|\.png|\.eps|\.ps|\.jpg)", ""),
(r"\\(pageref|eqref|ref|label|url|emp)\{.*?\}", ""),
(r"\\(emph|texttt)\{(.*?)\}", "\g<2>"),
(r"\\footnote\{", " "), # leaves an extra trailing } (ok)
#(r"\\[Vv]erb(.)(.+?)\1", "\g<2>"),
(r"\\[Vv]erb(.)(.+?)\1", ""),
(r"\\index\{.*?\}", ""),
(r"\$.+?\$", "", re.DOTALL),
(r"([A-Za-z])~", "\g<1> "),
(r"``(.+?)''", "\g<1>"), # very important, otherwise doconce verb eats the text
(r' \.', '.'),
('\n\\.', '.\n'),
(r':\s*\.', '.'),
(r' ,', ','),
('\n\,', ',\n'),
(',{2,}', ','),
# ptex2tex
(r"^@@@DATA.*$", "", re.MULTILINE),
(r"^@@@CMD.*$", "", re.MULTILINE),
# hpl's idx latex commands
(r"\\idx\{.*?\}", ""),
(r"\\idx(font|f|m|p|st|s|c|e|numpyr|numpy)\{.*?\}", ""),
(r"\\codett\{.*?\}", ""),
(r"\\code\{.*?\}", ""),
]
_common_typos = [
'!bsubsex',
'!esubsex',
'hiearchy',
'hieararchy',
'statment',
' imples',
'imples ',
'execption',
'excercise',
'exersice',
'eletric',
'everyting',
'progam',
'technqiues',
'incrased',
'similarily',
'occurence',
'persue',
'becase',
'frequence',
'noticable',
'peform',
'paramter',
'intial',
'inital',
'condtion',
'expontential',
'differentation',
'recieved',
'cateogry',
'occured',
'!bc pydoc',
'!bc pycodc',
]
def _grep_common_typos(text, filename, common_typos):
"""Search for common typos and abort program if any is found."""
found = False
for i, line in enumerate(text.splitlines()):
for typo in common_typos:
if re.search(typo, line):
print '\ntypo "%s" in line %d in file %s:\n' % \
(typo, i+1, filename), line
found = True
if found:
sys.exit(1)
def _strip_environments(text, environments, verbose=0):
"""Remove environments in the ``environments`` list from the text."""
# Note: this stripping does not work well for !bc and !bt envirs,
# because a phrase like `!bc pycod` in running text gives a split...
for item in environments:
if len(item) != 2:
raise ValueError(
'%s in environments to be stripped is wrong' % (str(item)))
begin, end = item
if not begin in text:
continue
parts = text.split(begin)
text = parts[0]
for part in parts[1:]:
subparts = part.split(end)
text += end.join(subparts[1:])
if verbose > 1:
print '\n============ split %s <-> %s\ntext so far:' % (begin, end)
print text
print '\n============\nSkipped:'
print subparts[0]
if verbose > 0:
print 'split away environments: %s %s\nnew text:\n' % (begin, end)
print text
print '\n=================='
return text
def _do_regex_replacements(text, replacements, verbose=0):
"""Substitute according to the `replacement` list."""
for item in replacements:
if len(item) == 2:
from_, to_ = item
text = re.sub(from_, to_, text)
elif len(item) == 3:
from_, to_, flags = item
text = re.sub(from_, to_, text, flags=flags)
if verbose > 0:
print '=================='
print 'regex substitution: %s -> %s\nnew text:' % (from_, to_)
print text
return text
def _do_fixes_4MSWord(text):
t = text # short cut
# Deal with special strange left-overs from removing ref, cite, etc.
# , , ,
t = re.sub(r',\s+,', ' ', t)
# . period at the beginning of a line
t = re.sub(r'^\. +', '', t, flags=re.MULTILINE)
#, comma at the beginning of a line
t = re.sub(r'^\, +', '', t, flags=re.MULTILINE)
# text like .,
t = re.sub(r' +,\.', '.', t)
# or -.
t = re.sub(r' +-\.', '.', t)
# and ( , , , , )
t = re.sub(r'\([, ]*\)', ' ', t)
# initial spaces on a line
t = re.sub(r'^ +([A-Z])', r'\1', t, flags=re.MULTILINE)
# too complicated to remove emphasize: t = re.sub(r'(^| +)\*(.+?)\*[, \n.]', r' \2 ', t, flags=re.DOTALL|re.MULTILINE)
t = re.sub(r'^\s+\*\s+', '', t, flags=re.MULTILINE)
t = re.sub(r' +\)', ')', t)
t = t.replace(':)', ')')
t = re.sub(r'^ +', '', t, flags=re.MULTILINE)
t = re.sub(r'^\.\n', '\n', t, flags=re.MULTILINE)
t = re.sub(r' +', ' ', t)
t = re.sub(r'\\begin\{.+?\}', '', t)
t = re.sub(r'\\end\{.+?\}', '', t)
# Remove space above paragraphs starting with lower case
t = re.sub(r'\n\n+([a-z])', r' \1', t)
# Remove newlines at the end of text (to help word)
# (this might not be desired for grepping in the stripped file)
if not '--dont_remove_newlines' in sys.argv:
t = re.sub(r'([A-Za-z0-9,.:!?)])\n(?=[^\n])', '\g<1> ', t)
# Do these after we have joined lines
# spaces before comma
t = re.sub(r'([A-Za-z])[:;?.]?\s+, +', r'\1, ', t)
# spaces before period
t = re.sub(r'([A-Za-z])[:;?.]? +\. +', r'\1. ', t)
# spaces before period
t = re.sub(r'([A-Za-z])[:;?.]? +\.', r'\1.', t)
# spaces before colon
t = re.sub(r'([A-Za-z])[:;?.]? +:', r'\1:', t)
t = t.replace(', g.,', '')
t = t.replace('ref[', '')
t = t.replace('][', '')
t = t.replace(']', '')
t = t.replace('[', '')
t = t.replace(']', '')
t = t.replace('*', '')
t = t.replace('|', '')
t = t.replace('()', '')
t = t.replace('(-)', '')
t = t.replace(r'\noindent', '')
return t
def _spellcheck(filename, dictionaries=['.dict4spell.txt'], newdict=None,
remove_multiplicity=False, strip_file='.strip', verbose=False):
"""
Spellcheck `filename` and list misspellings in the file misspellings.txt~.
The `dictionaries` list contains filenames for dictionaries to be
used with ispell.
`newdict` is an optional filename for creating a new, updated
dictionary containing all given dictionaries and all misspellings
found (assuming they are correct and approved in previous runs).
`remove_multiplicity` removes multiple occurrences of the same
misspelling in the misspellings.txt~ (output) file.
`strip_file` holds the filename of a file with definitions of
environments to be stripped off in the source file, replacements
to be performed, and a list of typical misspellings that are first
check before ispell is run.
"""
try:
f = open(filename, 'r')
except IOError:
print '\nfile %s does not exist!' % filename
_abort()
text = f.read()
f.close()
# Standardize newlines
text = re.sub(r'(\r\n|\r|\n)', '\n', text)
# Remove all !bc and !bt blocks
text = re.sub(r'^!bc(.*?)\n(.*?)^!ec', '',
text, flags=re.DOTALL|re.MULTILINE)
text = re.sub(r'^!bt *\n(.*?)^!et', '', text,
flags=re.DOTALL|re.MULTILINE)
# Remove all comments
text = re.sub(r'^#.+$', '', text, flags=re.MULTILINE)
# Check for double words (before removing verbatim)
pattern = r"\b([\w'\-]+)(\s+\1)+\b"
found = False
offset = 30 # no of chars before and after double word to be printed
start = 0
while start < len(text)-1:
m = re.search(pattern, text[start:])
if m:
# Words only
word = m.group(0)
try:
[float(w) for w in word.split()]
is_word = False
except ValueError:
# Drop words with underscore, ...
#drop = ['_', '--',
is_word = '_' not in word
if is_word:
print "\ndouble words detected in %s (marked inside [...]):\n------------------------" % filename
print "%s[%s]%s\n------------------------" % \
(text[max(0,start+m.start()-offset):start+m.start()],
word,
text[start+m.end():min(start+m.end()+offset,
len(text)-1)])
found = True
start += m.end()
else:
break
if found:
pass
#print '\nAbort because of double words.'
#sys.exit(1)
# Remove inline quotes before inline verbatim
pattern = "``(.+?)''([\n ,.?:)*_-])"
text = re.sub(pattern, r'\g<1>\g<2>', text, flags=re.DOTALL)
# Remove inline verbatim
text = re.sub(r'`[^ ][^`]*?`', '', text) # remove inline verbatim
if verbose > 0:
print 'removal of quotes, inline verbatim, code and tex blocks\nnew text:\n'
print text
print '==================\n'
# Continue with spell checking
if os.path.isfile(strip_file):
execfile(strip_file)
else:
environments = []
replacements = []
common_typos = []
# Add standard definitions (above)
environments += _environments
replacements += _replacements
common_typos += _common_typos
# Add standard latex definitions when spellchecking latex
if os.path.splitext(filename)[1] == '.tex':
# Make sure to do latex first (\label{} before label{})
environments = _latex_environments + environments
replacements = _latex_replacements + replacements
_grep_common_typos(text, filename, common_typos)
text = _strip_environments(text, environments, verbose)
#print 'Text after environment strip:\n', text
text = _do_regex_replacements(text, replacements, verbose)
#print 'Text after regex replacements:\n', text
text = _do_fixes_4MSWord(text)
# Write modified text to scratch file and run ispell
scratchfile = 'tmp_stripped_%s' % filename
f = open(scratchfile, 'w')
f.write(text)
f.close()
personal_dictionaries = []
p_opt = '' # personal dictionary specification for ispell
for dictionary in dictionaries:
if os.path.isfile(dictionary):
p_opt += " -p`pwd`/%s" % dictionary
f = open(dictionary, 'r')
personal_dictionaries += f.readlines()
f.close()
else:
print 'Dictionary file %s does not exist.' % dictionary
personal_dictionaries = list(set(personal_dictionaries))
misspellings = 'tmp_misspelled_' + filename + '~'
cmd = 'cat %s | ispell -l -t -d american %s > %s' % \
(scratchfile, p_opt, misspellings)
#cmd = 'cat %s | aspell -t -d american list %s > %s'
system(cmd)
# Load misspellings, remove duplicates
f = open(misspellings, 'r')
words = f.readlines()
f.close()
words2 = list(set(words)) # remove multiple words
if len(words2) > 0: # do we have misspellings?
print '%d misspellings in %s' % (len(words2), filename)
if remove_multiplicity:
f = open(misspellings, 'w')
f.write(words2)
f.close()
else:
os.remove(misspellings)
# Make convenient updates of personal dictionaries
if newdict is not None:
accepted_words = words2 + personal_dictionaries
if os.path.isfile(newdict):
f = open(newdict, 'r')
newdict_words = f.readlines()
f.close()
newdict_add = words2 + newdict_words
newdict_add = sorted(list(set(newdict_add)))
union = accepted_words + newdict_words
union = sorted(list(set(union)))
#print '%s %d: %d misspellings (%d from personal dicts) -> %d' % (newdict, len(newdict_words), len(words2), len(personal_dictionaries), len(union))
else:
union = accepted_words
newdict_add = words2
# union is the potentially new personal dictionary
#
f = open(newdict, 'w')
f.writelines(newdict_add)
f.close()
f = open('new_dictionary.txt~', 'w')
f.writelines(union)
f.close()
#if len(newdict_add) > 0:
# print '%s: %d, %s: %d items' % (newdict, len(newdict_add), 'new_dictionary.txt~', len(union))
def _spellcheck_all(**kwargs):
for filename in glob.glob('tmp_misspelled*~') + glob.glob('misspellings.txt~*'):
os.remove(filename)
for filename in ['__tmp.do.txt']:
if filename in sys.argv[1:]: # iterate over copy
os.remove(filename)
del sys.argv[sys.argv.index(filename)]
for filename in sys.argv[1:]:
if not filename.startswith('tmp_stripped_'):
_spellcheck(filename, **kwargs)
tmp_misspelled = glob.glob('tmp_misspelled*~')
if len(tmp_misspelled) > 0:
print
if len(sys.argv[1:]) == 1:
print 'See misspellings.txt~ for all misspelled words found.'
else:
for name in tmp_misspelled:
print 'See', name, 'for misspellings in', name.replace('tmp_misspelled_', '')[:-1]
dictfile = kwargs.get('dictionary', '.dict4spell.txt')
print 'When all misspellings are acceptable, cp new_dictionary.txt~',\
dictfile, '\n'
sys.exit(1)
else:
sys.exit(0)
def _usage_spellcheck():
print """
doconce spellcheck file1.do.txt file2.do.txt ... # use .dict4spell.txt
doconce spellcheck -d .mydict.txt file1.do.txt file2.do.txt ...
Spellcheck of files via ispell (but problematic parts are removed from the
files first).
Output:
misspellings.txt~: dictionary of potentially new accepted words, based on all
the current misspellings.
new_dictionary.txt~: suggested new dictionary, consisting of the old and
all new misspellings (if they can be accepted).
tmp_stripped_file1.do.txt: the original files are stripped off for
various constructs that cause trouble in spelling and the stripped
text is found in files with a filename prefix tmp_stripped_ (this file
can be checked for spelling and grammar mistakes in MS Word, for
instance, but a better method might be to translate the entire
DocOnce document to HTML and import that HTML code into Word.)
Usage
-----
For a new project, do the points below for initializating a new accepted
personal dictionary for this project. Thereafter, the process is
automated: misspellings.txt~ should be empty if there are no new misspellings.
tmp_misspelled*~ are made for each file tested with the file's misspelled
words.
For each file:
* Run spellcheck.py without a dictionary or with a previous dictionary:
doconce spellcheck file or doconce spellcheck -d .mydict.txt file
(default dictionary file is .dict4spell.txt)
* Check misspelled.txt~ for misspelled words. Change wrong words.
* Rerun. If all words in misspelled.txt are acceptable,
copy new_dictionary.txt to .dict4spell.txt (or another name)
* Optional: import tmp_stripped_text.txt into MS Word for grammar check.
* Remove tmp_* and *~ files
The next time one can run::
spellcheck.py file* # use .dict4spell.txt
spellcheck.py -d .mydict.txt file*
misspellings.txt~ should ideally be empty if there are no (new)
spelling errors. One can check that the file is empty or check
the $? variable on Unix since this prorgram exits with 1
when spelling errors are found in any of the tested files::
# Run spellcheck
doconce spellcheck *.do.txt
if [ $? -ne 0 ]; then exit; fi
How to correct misspellings
---------------------------
Some misspellings can be hard to find if the word is strange
(like "emp", for instance). Then invoke ``tmp_stripped_text.txt``,
which is the stripped version of the text file being spellchecked.
All references, labels, code segments, etc., are removed in this
stripped file. Run ispell on the file::
ispell -l -p.dict4spell.txt tmp_stripped_text.txt
Now, ispell will prompt you for the misspellings and show the context.
A common error in latex is to forget a ``\ref`` or ``\label`` in front
of a label so that the label gets spellchecked. This may give rise to
strange words flagged as misspelled.
How to control what is stripped
-------------------------------
The spellcheck function loads a file .strip, if present, with
possibly three lists that defines what is being stripped away
in ``tmp_stripped_*`` files:
* ``environments``, holding begin-end pairs of environments that
should be entirely removed from the text.
* ``replacements``, holding (from, to) pairs or (from, to, regex-flags)
triplets for substituting text.
* ``common_typos``, holding typical wrong spellings of words.
execfile is applied to .strip to execute the definition of the lists.
"""
def spellcheck():
if len(sys.argv) == 1:
_usage_spellcheck()
sys.exit(0)
if sys.argv[1] == '-d':
dictionary = [sys.argv[2]]
del sys.argv[1:3]
else:
if os.path.isfile('.dict4spell.txt'):
dictionary = ['.dict4spell.txt']
else:
dictionary = []
verbose = False
for i in range(1, len(sys.argv)):
if sys.argv[i] == '--debug':
del sys.argv[i]
verbose = True
if len(sys.argv) < 2:
_usage_spellcheck()
sys.exit(0)
_spellcheck_all(newdict='misspellings.txt~', remove_multiplicity=False,
dictionaries=dictionary, verbose=verbose)
def _usage_ref_external():
print 'doconce ref_external dofile [pubfile --skip_chapter]'
print 'Must give pubfile if no BIBFILE in dofile.do.txt'
print '--skip_chapter avoids substitution of Chapter ref{} -> refch[Chapter ...][][].'
def ref_external():
"""
Examine "# Externaldocuments: ..." in doconce file and publish
file to suggest a substitution script for transforming
references to external labels to the ref[][][] generalized
reference form.
"""
if len(sys.argv) < 2:
_usage_ref_external()
sys.exit(0)
filename = sys.argv[1]
if filename.endswith('.do.txt'):
basename = filename[:-7]
else:
basename = filename
# Analyze the topfile for external documents and publish file
f = open(basename + '.do.txt', 'r')
topfilestr = f.read()
f.close()
m = re.search('^#\s*[Ee]xternaldocuments:\s*(.+)$', topfilestr,
flags=re.MULTILINE)
if m:
external_docs = [s.strip() for s in m.group(1).split(',')]
else:
print '*** error: no # Externaldocuments: file1, file2, ... in', basename + '.do.txt'
print ' cannot get info about external documents and their labels!'
_abort()
m = re.search('^BIBFILE:\s*(.+)', topfilestr, re.MULTILINE)
if m:
pubfile = m.group(1).strip()
else:
if len(sys.argv) >= 3:
pubfile = sys.argv[2]
else:
print '*** error: no BIBFILE: file.pub, missing publish file on the command line!'
_abort()
print ' working with publish file', pubfile
import publish
# Note: we have to operate publish in the directory
# where pubfile resides
pubdir, pubname = os.path.split(pubfile)
if not pubdir:
pubdir = os.curdir
this_dir = os.getcwd()
os.chdir(pubdir)
pubdata = publish.database.read_database(pubname)
os.chdir(this_dir)
def process_external_doc(extdoc_basename):
topfile = extdoc_basename + '.do.txt'
if not os.path.isfile(topfile):
print '*** error: external document "%s" does not exist' % topfile
_abort()
f = open(topfile, 'r')
text = f.read()
m = re.search('^TITLE:\s*(.+)', text, flags=re.MULTILINE)
if m:
title = m.group(1).strip()
else:
print '*** error: no TITLE: ... in "%s"' % topfile
_abort()
found = False
key = None
url = None
for pub in pubdata:
if pub['title'].lower() == title.lower():
key = pub.get('key', None)
url = pub.get('url', None)
print ' title:', title
print ' url:', url
print ' key:', key
found = True
break
if not found and extdoc_basename != basename:
print '*** warning: could not find the document'
print ' ', title
print ' in the publish database %s' % pubfile
# Try to load the full doconce file as the result of mako,
# or as the result of preprocess, or just extdoc_basename.do.txt
dname, bname = os.path.split(extdoc_basename)
dofile = os.path.join(dname, 'tmp_mako__' + bname + '.do.txt')
if os.path.isfile(dofile):
fullfile = dofile
else:
dofile = os.path.join(dname, 'tmp_preprocess__' + bname + '.do.txt')
if os.path.isfile(dofile):
fullfile = dofile
else:
fullfile = topfile
# Check that there are no includes:
m = re.search(r'^#\s+#include', text, flags=re.MULTILINE)
if m:
print '*** error: doconce format is not run on %s' % topfile
print ' cannot proceed...'
_abort()
print ' ...processing', fullfile
f = open(fullfile, 'r')
text = f.read()
f.close()
# Analyze the full text of the external doconce document
labels = re.findall(r'label\{(.+?)\}', text)
return title, key, url, labels, text
# Find labels and references in this doconce document
dummy, dummy, dummy, mylabels, mytext = process_external_doc(basename)
refs = [(prefix, ref) for dummy, prefix, ref in
re.findall(r'(^|\(|\s+)([A-Za-z]+?)\s+ref\{(.+?)\}', mytext,
flags=re.MULTILINE)]
refs = [(prefix.strip(), ref.strip()) for prefix, ref in refs]
refs = list(set(refs))
pattern = r'\(ref\{(.+?)\}\)-\(ref\{(.+?)\}\)'
eqrefs2 = list(set(re.findall(pattern, mytext)))
mytext2 = re.sub(pattern, 'XXX', mytext)
# Now all pairs of equation references are removed, search for triplets
pattern = r'\(ref\{(.+?)\}\),\s+\(ref\{(.+?)\}\),?\s+and\s+\(ref\{(.+?)\}\)'
eqrefs3 = list(set(re.findall(pattern, mytext2)))
mytext3 = re.sub(pattern, 'XXX', mytext2)
# Now all pairs and triplets are removed and we can collect the remaining
# single equation references
eqrefs1 = re.findall(r'\(ref\{(.+?)\}\)', mytext3)
extdocs_info = {}
refs2extdoc = {}
for external_doc in external_docs:
title, key, url, labels, text = process_external_doc(external_doc)
extdocs_info[external_doc] = dict(title=title, key=key,
url=url, labels=labels)
for prefix, ref in refs:
if ref not in mylabels:
if ref in labels:
refs2extdoc[ref] = (external_doc, prefix)
for ref in eqrefs1:
if ref not in mylabels:
if ref in labels:
refs2extdoc[ref] = (external_doc, 1)
for ref1, ref2 in eqrefs2:
if ref1 not in mylabels:
if ref1 in labels:
refs2extdoc[ref1] = (external_doc, 2)
if ref2 not in mylabels:
if ref2 in labels:
refs2extdoc[ref2] = (external_doc, 2)
for ref1, ref2, ref3 in eqrefs3:
if ref1 not in mylabels:
if ref1 in labels:
refs2extdoc[ref1] = (external_doc, 3)
if ref2 not in mylabels:
if ref2 in labels:
refs2extdoc[ref2] = (external_doc, 3)
if ref3 not in mylabels:
if ref3 in labels:
refs2extdoc[ref3] = (external_doc, 3)
# We now have all references in refs2extdoc and can via extdocs_info
# get additional info about all references
for label in mylabels:
if label in refs2extdoc:
print '*** error: ref{%s} in %s was found as' % (label, basename)
print ' label{%s} in %s and %s' % \
(label, basename, refs2extdoc[label][0])
_abort()
# Substitute all external references by ref[][][]
scriptname = 'tmp_subst_references.sh'
scriptname2 = 'tmp_grep_references.sh'
f = open(scriptname, 'w')
f2 = open(scriptname2, 'w')
print 'substitution script:', scriptname
print 'grep script (for context of each substitution):', scriptname2
dofiles = basename[5:] + '.do.txt' if basename.startswith('main_') else basename + '.do.txt'
f.write('files="%s" # files to which substitutions apply\n\n' % dofiles)
f2.write('files="%s" # files to which substitutions apply\n\nnlines=6 # no of context lines for each matched line' % dofiles)
skip_chapter = '--skip_chapter' in sys.argv
skip_eqs = '--skip_eqs' in sys.argv
for prefix, ref in refs:
if skip_chapter and prefix.lower in ('chapter', 'appendix'):
continue
if ref not in mylabels:
f.write(r"doconce subst '%(prefix)s\s+ref\{%(ref)s\}' " % vars())
f2.write(r"grep --context=$nlines --line-number -E '%(prefix)s\s+ref\{%(ref)s\}' $files" % vars() + '\n\n')
ch = 'ch' if prefix.lower() in ('chapter', 'appendix') else ''
f.write("'ref%(ch)s[%(prefix)s ref{%(ref)s}]" % vars())
if ref in refs2extdoc:
if ch:
f.write('[ cite{%s}][' %
extdocs_info[refs2extdoc[ref][0]]['key'])
else:
f.write('[ in cite{%s}][' %
extdocs_info[refs2extdoc[ref][0]]['key'])
f.write('the document "%s"' %
extdocs_info[refs2extdoc[ref][0]]['title'])
if extdocs_info[refs2extdoc[ref][0]]['url'] is not None:
f.write(': "%s"' %
extdocs_info[refs2extdoc[ref][0]]['url'])
if extdocs_info[refs2extdoc[ref][0]]['key'] is not None:
f.write(' cite{%s}' %
extdocs_info[refs2extdoc[ref][0]]['key'])
f.write("]'")
else:
f.write("[no cite info][no doc info]'")
f.write(' $files\n\n')
if skip_eqs:
f.close()
return
if eqrefs1 or eqrefs2 or eqrefs3:
f.write('\n# Equations:\n')
for ref in eqrefs1:
if ref not in mylabels:
f.write(r"doconce replace '(ref{%(ref)s})' " % vars())
f2.write(r"grep --context=$nlines --line-number '(ref{%(ref)s})' $files" % vars() + '\n\n')
f.write("'ref[(ref{%(ref)s})]" % vars())
if ref in refs2extdoc:
f.write('[ in cite{%s}]' %
extdocs_info[refs2extdoc[ref][0]]['key'])
f.write('[reference to specific _equation_ (label %s) in external document "%s": "%s" cite{%s} is not recommended]' %
(ref,
extdocs_info[refs2extdoc[ref][0]]['title'],
extdocs_info[refs2extdoc[ref][0]]['url'],
extdocs_info[refs2extdoc[ref][0]]['key']))
else:
f.write('[no cite info][no doc info]')
f.write("' $files\n\n")
for ref1, ref2 in eqrefs2:
if ref1 not in mylabels and ref2 not in mylabels:
f.write(r"doconce replace '(ref{%(ref1)s})-(ref{%(ref2)s})' " % vars())
f2.write(r"grep --context=$nlines --line-number '(ref{%(ref1)s})-(ref{%(ref2)s})' $files" % vars() + '\n\n')
f.write("'ref[(ref{%(ref1)s})-(ref{%(ref2)s})]" % vars())
if ref1 in refs2extdoc and ref2 in refs2extdoc:
f.write('[ in cite{%s}]' %
extdocs_info[refs2extdoc[ref1][0]]['key'])
f.write('[reference to specific _equations_ (label %s and %s) in external document "%s": "%s" cite{%s} is not recommended]' %
(ref1, ref2,
extdocs_info[refs2extdoc[ref1][0]]['title'],
extdocs_info[refs2extdoc[ref1][0]]['url'],
extdocs_info[refs2extdoc[ref1][0]]['key']))
else:
f.write('[no cite info][no doc info]')
f.write("' $files\n\n")
for ref1, ref2, ref3 in eqrefs3:
if ref1 not in mylabels and ref2 not in mylabels \
and ref3 not in mylabels:
f.write(r"doconce subst '\(ref\{%(ref1)s\}\),\s+\(ref\{%(ref2)s\}\),?\s+and\s+\(ref{%(ref3)s\}\)' " % vars())
f2.write(r"grep --context=$nlines --line-number -E '\(ref\{%(ref1)s\}\),\s+\(ref\{%(ref2)s\}\),?\s+and\s+\(ref{%(ref3)s\}\)' $files" % vars() + '\n\n')
f.write("'ref[(ref{%(ref1)s}), (ref{%(ref2)s}), and (ref{%(ref3)s})]" % vars())
if ref1 in refs2extdoc and ref2 in refs2extdoc \
and ref3 in refs2extdoc:
if refs2extdoc[ref1][0] == refs2extdoc[ref2][0] and \
refs2extdoc[ref2][0] == refs2extdoc[ref3][0]:
f.write('[ in cite{%s}]' %
extdocs_info[refs2extdoc[ref1][0]]['key'])
else:
# the equations come from different external docs
s = set([extdocs_info[refs2extdoc[ref1][0]]['key'],
extdocs_info[refs2extdoc[ref2][0]]['key'],
extdocs_info[refs2extdoc[ref3][0]]['key']])
f.write('[ cite{%s}]' % ','.join(list(s)))
f.write('[reference to specific _equations_ (label %s, %s, and %s) in external document "%s": "%s" cite{%s} is not recommended]' %
(ref1, ref2, ref3,
extdocs_info[refs2extdoc[ref][0]]['title'],
extdocs_info[refs2extdoc[ref][0]]['url'],
extdocs_info[refs2extdoc[ref][0]]['key']))
else:
f.write('[no cite info][no doc info]')
f.write("' $files\n\n")
f.close()
def _usage_latex_problems():
print 'doconce latex_problems mydoc.log [overfull-hbox-limit --texcode]'
print """
Interpret the .log file and write out latex problems related to
undefined references, multiply defined labels, and overfull hboxes.
The lower limit for overfull hboxes can be specified as an integer.
--texcode causes the problematic lines in overfull hboxes to be printed.
"""
def latex_problems():
if len(sys.argv) < 2:
_usage_latex_problems()
sys.exit(0)
try:
overfull_hbox_limit = float(sys.argv[2])
except IndexError:
overfull_hbox_limit = 20
filename = sys.argv[1]
if not filename.endswith('.log'):
filename += '.log'
f = open(filename, 'r')
lines = f.readlines()
f.close()
ok_overfull_hboxes = []
# Springer T2 will have some overfull hboxes for chapter headings,
# remove these from the report 120.1 and 30.8
t2 = 't2do.sty' in ''.join(lines)
if t2:
ok_overfull_hboxes += ['120.1', '30.8']
multiply_defined_labels = []
multiply_defined_labels_pattern = r"LaTeX Warning: Label `(.+?)' multiply defined"
undefined_references = []
undefined_references_pattern = r"LaTeX Warning: Reference `(.+?)' on page (.+?) undefined"
overfull_hboxes = []
overfull_hboxes_pattern = r"Overfull \\hbox \((.+)pt too wide\) .+lines (.+)"
for line in lines:
m = re.search(multiply_defined_labels_pattern, line)
if m:
multiply_defined_labels.append(m.group(1))
m = re.search(undefined_references_pattern, line)
if m:
undefined_references.append((m.group(1), m.group(2)))
m = re.search(overfull_hboxes_pattern, line)
if m:
overfull_hboxes.append(
('%.1f' % float(m.group(1)), m.group(2).strip()))
problems = False
if multiply_defined_labels:
problems = True
print '\nMultiply defined labels:'
for label in multiply_defined_labels:
print ' ', label
if undefined_references:
problems = True
print '\nUndefined references:'
for ref, page in undefined_references:
print ' ', ref, 'on page', page
if overfull_hboxes:
texcode = '--texcode' in sys.argv
if texcode:
# Load .tex file
f = open(filename[:-4] + '.tex', 'r')
texfile = f.readlines()
f.close()
problems = True
print "\nOverfull hbox'es:"
for npt, at_lines in overfull_hboxes:
if float(npt) > overfull_hbox_limit and npt not in ok_overfull_hboxes:
print ' ', npt, 'lines', at_lines
if texcode:
line_range = [int(line)-1 for line in at_lines.split('--')]
if line_range[1] - line_range[0] < 4 and r'\end' in texfile[line_range[1]]:
# Print more surroundings above
print '\n*** printing 6 lines above problem line:'
print ''.join(texfile[line_range[0]-6:line_range[1]+1])
else:
print '\n', ''.join(texfile[line_range[0]:line_range[1]+1])
if not problems:
print 'no serious LaTeX problems found in %s!' % filename
def _usage_grep():
print 'doconce grep FIGURE|MOVIE|CODE doconce-file'
def grep():
if len(sys.argv) < 3:
_usage_grep()
sys.exit(0)
file_tp = sys.argv[1]
filenames = []
for filename in sys.argv[2:]:
if not filename.endswith('.do.txt'):
filename += '.do.txt'
if not os.path.isfile(filename):
continue # just drop non-existing files to avoid corrupt output
f = open(filename, 'r')
filestr = f.read()
f.close()
if file_tp == 'FIGURE':
pattern = r'^FIGURE:\s*\[(?P<filename>[^,\]]+),?(?P<options>[^\]]*)\]'
filenames += [filename for filename, dummy in
re.findall(pattern, filestr, re.MULTILINE)]
elif file_tp == 'MOVIE':
pattern = r'^MOVIE:\s*\[(?P<filename>[^,\]]+),?(?P<options>[^\]]*)\]'
filenames += [filename for filename, dummy in
re.findall(pattern, filestr, re.MULTILINE)]
elif file_tp == 'CODE':
pattern = '^@@@CODE +(.+?)\s+'
filenames += re.findall(pattern, filestr, re.MULTILINE)
else:
print '*** error: cannot grep', file_tp, '(not implemented)'
filenames = list(set(filenames)) # remove multiple filenames
print ' '.join(filenames)
def _usage_capitalize():
print 'doconce capitalize [-d file_with_cap_words] doconce-file'
print 'list of capitalized words can also be in .dict4cap.txt'
print '(typically, Python, Unix, etc. must be capitalized)'
def capitalize():
if len(sys.argv) >= 2 and sys.argv[1] == '-d':
dictionary = [sys.argv[2]]
del sys.argv[1:3]
else:
if os.path.isfile('.dict4cap.txt'):
dictionary = '.dict4cap.txt'
else:
dictionary = ''
if len(sys.argv) < 2:
_usage_capitalize()
sys.exit(0)
filename = sys.argv[1]
cap_words = [
'Celsius', 'Fahrenheit', 'Kelvin',
'Fahrenheit-Celsius',
'Newton', 'Gauss', "Gauss'",
'Legendre', 'Lagrange', 'Markov',
'Laguerre', 'Taylor', 'Einstein',
'Maxwell', 'Euler', 'Gaussian', 'Eulerian', 'Lagrangian',
'Poisson',
'Heaviside', 'MATLAB', 'Matlab',
'Trapezoidal', "Simpson's", 'Monte', 'Carlo',
'ODE', 'PDE', 'Adams-Bashforth', 'Runge-Kutta', 'SIR', 'SIZR', 'SIRV',
'Python', 'IPython', 'Cython', 'Idle', 'NumPy', 'SciPy', 'SymPy',
'Matplotlib', 'None', '$N$',
'Fortran', 'MATLAB', 'SWIG', 'Perl', 'Ruby', 'CPU',
'DNA', 'British', 'American', 'Internet', # 'Web',
'HTML', 'MSWord', 'OpenOffice',
'StringFunction', 'Vec2D', 'Vec3D', 'SciTools', 'Easyviz',
'Pysketcher',
]
# This functionality is not well implemented so instead of finding
# a perfect solution we fix well-known special cases.
# A better software solution would be to read a user-made file
# with fixes. The fixes below are special for a book project...
cap_words_fix = [
('exer. ref{', 'Exer. ref{'),
('exer. (_', 'Exer. (_'), # latex2doconce external reference
('subsection. ref{', 'Subsection. ref{'),
('section. ref{', 'Section. ref{'),
('chapter. ref{', 'Chapter ref{'),
('Python library reference', 'Python Library Reference'),
# Cannot have C and C++ as a special word since an equation with c
# will then get capital C...try to repair these cases:
(' c code', ' C code'),
(' c program', ' C program'),
(' c++ ', ' C++ '),
(' 1d ', ' 1D '),
(' 2d ', ' 2D '),
(' 3d ', ' 3D '),
('vec2d', 'Vec2D'),
('vec3d', 'Vec3D'),
('hello, world!', 'Hello, World!'),
('hello world', 'Hello World'),
('mac os x', 'Mac OS X'),
('midpoint integration', 'Midpoint integration'),
('midpoint rule', 'Midpoint rule'),
('trapozoidal integration', 'Trapozoidal integration'),
('trapozoidal rule', 'Trapozoidal rule'),
('world wide web', 'World Wide Web'),
('cODE', 'code'),
('on windows', 'on Windows'),
('in windows', 'in Windows'),
('under windows', 'under Windows'),
('on mac', 'on Mac'),
('in mac', 'in Mac'),
('under mac', 'under Mac'),
('a mac', 'a Mac'),
("python's", "Python's"),
("forward Euler", "Forward Euler"),
("backward Euler", "Backward Euler"),
("crank-nicolson", "Crank-Nicolson"),
("adams-bashforth", "Adams-Bashforth"),
('runge-kutta', 'Runge-Kutta'),
]
for name in 'Newton', 'Lagrange', 'Einstein', 'Poisson', 'Taylor', 'Gibb', \
'Heun', :
genetive = "'s"
cap_words_fix.append((name.lower()+genetive, name+genetive))
if dictionary:
f = open(dictionary, 'a')
cap_words += f.read().split()
f = open(filename, 'r')
filestr = f.read()
f.close()
shutil.copy(filename, filename + '.old~~')
filestr, old2new = _capitalize(filestr, cap_words, cap_words_fix)
f = open(filename, 'w')
f.write(filestr)
f.close()
for old, new in old2new:
if old != new:
print old
print new
print
def _capitalize(filestr, cap_words, cap_words_fix):
pattern1 = r'^\s*(={3,9})(.+?)(={3,9})' # sections
pattern2 = r'^__(.+?[.:?;!])__' # paragraphs
sections = re.findall(pattern1, filestr, flags=re.MULTILINE)
paragraphs = re.findall(pattern2, filestr, flags=re.MULTILINE)
orig_titles1 = [t.strip() for s1, t, s2 in sections]
orig_equals1 = [s1 for s1, t, s2 in sections]
orig_titles2 = [t.strip() for t in paragraphs]
orig_headings1 = [s1 + t + s2 for s1, t, s2 in sections]
orig_headings2 = ['__' + t + '__' for t
in re.findall(pattern2, filestr, flags=re.MULTILINE)]
#print orig_titles1
#print orig_titles2
def capitalize_titles(orig_titles, cap_words):
cap_words_lower = [s.lower() for s in cap_words]
new_titles = []
for title in orig_titles:
#print '*', title
# Exercises, problems, are exceptions (view title as what
# comes after the initial word)
word0 = title.split()[0]
if word0 in ['Exercise:', 'Problem:', 'Project:', 'Example:',
'[Exercise}:', '{Problem}:', '{Project}:', '{Example}:',]:
title = title.replace(word0, '').strip()
new_title = word0 + ' ' + title.capitalize()
else:
new_title = title.capitalize()
words = new_title.split()
# Handle hyphens
old_words = words[:]
for word in old_words:
if '-' in word:
words.remove(word)
words += word.split('-')
if word[0] == '`' and word[-1] == '`':
if word in words:
words.remove(word)
for word in words:
#print ' ', word
# Strip away non-alphabetic characters
word_stripped = ''.join([w for w in list(word)
if w.isalpha()])
#if word != word_stripped:
#print ' ', word_stripped
if word_stripped.lower() in cap_words_lower:
#print ' found',
try:
i = cap_words_lower.index(word_stripped.lower())
new_word = word.replace(word_stripped, cap_words[i])
new_title = new_title.replace(word, new_word)
#print 'as', cap_words[i]
except ValueError:
pass
#print 'Did not find', word_stripped.lower(), 'in', cap_words_lower
pass
#print '>', new_title
for wrong_words, fixed_words in cap_words_fix:
if wrong_words in new_title:
new_title = new_title.replace(wrong_words, fixed_words)
new_titles.append(new_title)
return new_titles
new_titles1 = capitalize_titles(orig_titles1, cap_words)
new_titles2 = capitalize_titles(orig_titles2, cap_words)
old2new = []
for new_title, orig_title, orig_heading, s1 in \
zip(new_titles1, orig_titles1, orig_headings1, orig_equals1):
new_heading = '%s %s %s' % (s1, new_title, s1)
filestr = filestr.replace(orig_heading, new_heading)
old2new.append((orig_title, new_title))
for new_title, orig_title, orig_heading in \
zip(new_titles2, orig_titles2, orig_headings2):
new_heading = '__%s__' % new_title
filestr = filestr.replace(orig_heading, new_heading)
old2new.append((orig_title, new_title))
return filestr, old2new
def _usage_md2html():
print 'Usage: doconce md2html doconce-file'
print 'Make HTML from pandoc-exteded Markdown'
print '(.html file from .md pandoc file)'
print 'The purpose is to fix the HTML code with full MathJax support.'
def md2html():
"""
Translate a .md file to .html that the HTML code gets full LaTeX
math support.
The .md file is fixed, then ``pandoc -f markdown -t html`` is run
to create HTML from Markdown, then the HTML code is fixed.
"""
if len(sys.argv) < 2:
_usage_md2html()
sys.exit(0)
filename = sys.argv[1]
if not filename.endswith('.md'):
if os.path.isfile(filename + '.md'):
filename += '.md'
else:
raise IOError('no file %s.md' % filename)
# First make sure \eqref survives the pandoc translation
f = open(filename ,'r'); text = f.read(); f.close()
text = text.replace('\\eqref{', 'EQREF{')
f = open(filename ,'w'); f.write(text); f.close()
# Translate to HTML and fix the MathJax things
basename = filename[:-3]
cmd = 'pandoc -f markdown -t html --mathjax -s -o %s.html %s.md' % \
(basename, basename)
print cmd
failure = os.system(cmd)
if failure:
print 'could not run\n', cmd
sys.exit(1)
f = open('%s.html' % basename, 'r')
text = f.read()
f.close()
# Add extra info
pattern = r'(<script src=".+?MathJax\.js)'
replacement = r"""
<script type="text/x-mathjax-config">
MathJax.Hub.Config({
TeX: {
equationNumbers: { autoNumber: "AMS" },
extensions: ["AMSmath.js", "AMSsymbols.js", "autobold.js", "color.js"]
}
});
</script>
\g<1>"""
text = re.sub(pattern, replacement, text)
text = text.replace('EQREF{', '\\eqref{')
f = open('%s.html' % basename, 'w')
f.write(text)
f.close()
print 'output in %s.html' % basename
def _usage_md2latex():
print 'Usage: doconce md2latex doconce-file'
print 'Make LaTeX from pandoc-exteded Markdown'
print '(.tex file from .md file).'
print 'The purpose is to fix the LaTeX code so it compiles.'
def md2latex():
"""
Read the .md file and fix equation syntax such that LaTeX
generated from Markdown (via pandoc) compiles.
"""
if len(sys.argv) < 2:
_usage_md2latex()
sys.exit(0)
filename = sys.argv[1]
if not filename.endswith('.md'):
if os.path.isfile(filename + '.md'):
filename += '.md'
else:
raise IOError('no file %s.md' % filename)
# Remove $$ around begin-end structures
basename = filename[:-3]
cmd = 'pandoc -f markdown -t latex -s -o %s.tex %s.md' % \
(basename, basename)
print cmd
failure = os.system(cmd)
if failure:
print 'could not run\n', cmd
sys.exit(1)
f = open('%s.tex' % basename, 'r')
text = f.read()
f.close()
pattern = r'\$\$(\s*\\begin\{.+?\\end\{.+?)\$\$'
text = re.sub(pattern, r'\g<1>', text)
f = open('%s.tex' % basename, 'w')
f.write(text)
f.close()
print 'output in %s.tex' % basename
# ----------------------- functions for insertdocstr -----------------------
def insertdocstr():
"""
This scripts first finds all .do.txt (DocOnce source code) files in a
directory tree and transforms these to a format given as command-line
argument to the present script. The transformed file has the extension
.dst.txt (dst for Doc STring), regardless of the format.
In the next phase, all .p.py files (Python files that need preprocessing)
are visited, and for each file the C-like preprocessor (preprocess.py)
is run on the file to include .dst.txt files into doc strings.
The result is an ordinary .py file.
Example:
A file basename.p.py has a module doc string which looks like
'''
# #include "docstrings/doc1.dst.txt"
'''
In the subdirectory docstrings we have the file doc1.do.txt, which
contains the documentation in DocOnce format. The current script
detects this file, transforms it to be desired format, say Epytext.
That action results in doc1.epytext. This file is then renamed to
doc1.dst.txt.
In the next step, files of the form basename.p.py is visisted, the
preprocess program is run, and the docstrings/doc1.dst.txt file is
inserted in the doc string. One can run with Epytext format, which is
suitable for running Epydoc on the files afterwards, then run with
Sphinx, and finally re-run with "plain" format such that only quite
raw plain text appears in the final basename.py file (this is suitable
for Pydoc, for instance).
Usage: doconce insertdocstr format root [preprocessor options]
"""
try:
format = sys.argv[1]
root = sys.argv[2]
except:
print 'Usage: doconce insertdocstr format root [preprocessor options]'
sys.exit(1)
global doconce_program
if os.path.isfile(os.path.join('bin', 'doconce')):
doconce_program = 'python ' + os.path.join(os.getcwd(), 'bin', 'doconce')
else:
doconce_program = 'doconce' # must be found somewhere in PATH
# alternative: use sys.argv[3] argument to tell where to find doconce
# can then run "bin/doconce insertdocstr bin" from setup.py
print '\n----- doconce insertdocstr %s %s\nFind and transform doconce files (.do.txt) ...' % (format, root)
arg = format
os.path.walk(root, _walker_doconce, arg)
print 'Find and preprocess .p.py files (insert doc strings etc.)...'
arg = ' '.join(sys.argv[3:]) # options for preprocessor
os.path.walk(root, _walker_include, arg)
print '----- end of doconce insertdocstr -----\n'
# not used:
def _preprocess_all_files(rootdir, options=''):
"""
Run preprocess on all files of the form basename.p.ext
in the directory with root rootdir. The output of each
preprocess run is directed to basename.ext.
"""
def _treat_a_dir(arg, d, files):
for f in files:
path = os.path.join(d, f)
if '.p.' in f and not '.svn' in f:
basename_dotp, ext = os.path.splitext(f)
basename, dotp = os.path.splitext(basename_dotp)
outfilename = basename + ext
outpath = os.path.join(d, outfilename)
cmd = 'preprocess %s %s > %s' % (options, path, outpath)
system(cmd)
os.path.walk(rootdir, _treat_a_dir, None)
def _run_doconce(filename_doconce, format):
"""
Run doconce format filename_doconce.
The result is a file with extension .dst.txt (same basename
as filename_doconce).
"""
if filename_doconce.startswith('__'):
# old preprocessed file from aborted doconce execution
print 'skipped', filename_doconce
return
global doconce_program # set elsewhere
cmd = '%s format %s %s' % (doconce_program, format, filename_doconce)
print 'run', cmd
failure, outtext = commands.getstatusoutput(cmd)
if failure:
raise OSError, 'Could not run\n%s\nin %s\n%s\n\n\n' % \
(cmd, os.getcwd(), outtext)
out_filename = outtext.split()[-1]
root, ext = os.path.splitext(out_filename)
new_filename = root + '.dst.txt'
os.rename(out_filename, new_filename)
print '(renamed %s to %s for possible inclusion in doc strings)\n' % (out_filename, new_filename)
def _walker_doconce(arg, dir, files):
format = arg
# we move to the dir:
origdir = os.getcwd()
os.chdir(dir)
for f in files:
if f[-7:] == '.do.txt':
_run_doconce(f, format)
os.chdir(origdir)
def _run_preprocess4includes(filename_dotp_py, options=''):
pyfile = filename_dotp_py[:-5] + '.py'
cmd = 'preprocess %s %s > %s' % (options, filename_dotp_py, pyfile)
print 'run', cmd
failure, outtext = commands.getstatusoutput(cmd)
#os.remove(tmp_filename)
if failure:
raise OSError, 'Could not run\n%s\nin %s\n%s\n\n\n' % \
(cmd, os.getcwd(), outtext)
def _walker_include(arg, dir, files):
options = arg
# we move to the dir:
origdir = os.getcwd()
os.chdir(dir)
for f in files:
if f[-5:] == '.p.py':
_run_preprocess4includes(f, options)
os.chdir(origdir)
# ----------------------------------------------------------------------
def which(program):
"""
Mimic the Unix ``which`` command and return the full path of
a program whose name is in the `program` argument.
Return None if the program is not found in any of the
directories in the user's ``PATH`` variable.
"""
pathdirs = os.environ['PATH'].split(os.pathsep)
program_path = None
for d in pathdirs:
if os.path.isdir(d):
if os.path.isfile(os.path.join(d, program)):
program_path = d
break
return program_path
# subst_* below must be global because local functions in _latex2doconce
# disable the use of the important exec(f.read()) statement.
def subst_author_latex2doconce(m):
author_str = m.group('subst')
authors = author_str.split(r'\and')
institutions = ['']*len(authors)
# footnotes with institutions?
if r'\footnote{' in author_str:
for i, author in enumerate(authors):
if r'\footnote{' in author:
pattern = r'\footnote\{(.+?\}'
m2 = re.search(pattern, author)
if m2:
institutions[i] = m2.group(1).strip()
authors[i] = re.sub(pattern, '', authors[i])
authors = ['AUTHOR: %s' % a.strip() for a in authors]
for i in range(len(authors)):
if institutions[i] != '':
authors[i] += ' at ' + institutions[i]
return '\n'.join(authors)
def subst_minted_latex2doconce(m):
lang = m.group(1)
if lang in minted2bc:
return '!bc ' + minted2bc[lang]
else:
return '!bc'
def subst_paragraph_latex2doconce(m):
title = m.group(1)
ending = m.group(2)
if ending != '.':
title += ending
return '=== %s ===\n' % title
def _latex2doconce(filestr):
"""Run latex to doconce transformations on filestr."""
user_subst = []
user_replace = []
fixfile = 'latex2doconce_fix.py'
if os.path.isfile(fixfile):
# fixfile must contain subst and replace, to be
# applied _after_ the general subst and replace below
"""
# re.sub substitutions
subst = [
(r'^\be\s+', '!bt\n\\begin{equation}\n', re.MULTILINE),
(from_, to_, flags),
]
# str.replace replacements
replace = []
"""
f = open(fixfile)
exec(f.read())
f.close()
try:
user_subst = subst
user_replace = replace
except NameError, e:
print fixfile, 'does not contain subst and replace lists'
print e
sys.exit(1)
except Exception, e:
print fixfile, 'has errors'
print e
sys.exit(1)
# cf. doconce.latex.fix_latex_command_regex to see how important
# it is to quote the backslash correctly for matching, substitution
# and output strings when using re.sub for latex text!
subst = [
# hpl specific things:
# \ep is difficult to replace automatically...
#(r'\\ep(\\|\s+|\n)', r'\thinspace . \g<1>*'), # gives tab hinspace .
#(r'^\ep\n', r'\\thinspace .\n', re.MULTILINE),
#(r'\ep\n', r' \\thinspace .\n'),
#(r'\ep\s*\\\]', r' \\thinspace . \]'),
#(r'\ep\s*\\e', r' \\thinspace . \e'),
#(r'\\thinspace', 'thinspace'),
(r'\\code\{(?P<subst>[^}]+)\}', r'`\g<subst>`'),
(r'\\emp\{(?P<subst>[^}]+)\}', r'`\g<subst>`'),
(r'\\codett\{(?P<subst>[^}]+)\}', r'`\g<subst>`'),
(r'\{\\rm\\texttt\{(?P<subst>[^}]+)\}\}', r'`\g<subst>`'),
(r'\\idx\{(?P<subst>.+?)\}', r'idx{`\g<subst>`}'),
(r'\\idxf\{(?P<subst>.+?)\}', r'idx{`\g<subst>` function}'),
(r'\\idxs\{(?P<subst>.+?)\}', r'idx{`\g<subst>` script}'),
(r'\\idxp\{(?P<subst>.+?)\}', r'idx{`\g<subst>` program}'),
(r'\\idxc\{(?P<subst>.+?)\}', r'idx{`\g<subst>` class}'),
(r'\\idxm\{(?P<subst>.+?)\}', r'idx{`\g<subst>` module}'),
(r'\\idxnumpy\{(?P<subst>.+?)\}', r'idx{`\g<subst>` (from `numpy`)}'),
(r'\\idxnumpyr\{(?P<subst>.+?)\}', r'idx{`\g<subst>` (from `numpy.random`)}'),
(r'\\idxst\{(?P<subst>.+?)\}', r'idx{`\g<subst>` (from `scitools`)}'),
(r'\\idxfn\{(?P<subst>.+?)\}', r'idx{`\g<subst>` (FEniCS)}'),
(r'\\idxe\{(?P<attr>.+?)\}\{(?P<obj>.+?)\}', r'idx{`\g<attr>` \g<obj>}'),
(r'\\refeq\{(?P<subst>.+?)\}', r'(ref{\g<subst>})'),
(r'^\bpy\s+', r'\bipy' + '\n', re.MULTILINE),
(r'^\epy\s+', r'\eipy' + '\n', re.MULTILINE),
# general latex constructions
# (comments are removed line by line below)
(r'\\author\{(?P<subst>.+)\}', subst_author_latex2doconce),
(r'\\title\{(?P<subst>.+)\}', r'TITLE: \g<subst>'),
(r'\\chapter\*?\{(?P<subst>.+)\}', r'========= \g<subst> ========='),
(r'\\section\*?\{(?P<subst>.+)\}', r'======= \g<subst> ======='),
(r'\\subsection\*?\{(?P<subst>.+)\}', r'===== \g<subst> ====='),
(r'\\subsubsection\*?\{(?P<subst>.+)\}', r'=== \g<subst> ==='),
(r'\\paragraph\{(?P<subst>.+?)\}', r'__\g<subst>__'), # modified later
(r'\\chapter\*?\[.+\]\{(?P<subst>.+)\}', r'========= \g<subst> ========='),
(r'\\section\*?\[.+\]\{(?P<subst>.+)\}', r'======= \g<subst> ======='),
(r'\\subsection\*?\[.+\]\{(?P<subst>.+)\}', r'===== \g<subst> ====='),
(r'\\subsubsection\*?\[.+\]\{(?P<subst>.+)\}', r'=== \g<subst> ==='),
(r'\\emph\{(?P<subst>.+?)\}', r'*\g<subst>*'),
(r'\\texttt\{(?P<subst>[^}]+)\}', r'`\g<subst>`'),
(r'\{\\em\s+(?P<subst>.+?)\}', r'*\g<subst>*'),
(r'\{\\bf\s+(?P<subst>.+?)\}', r'_\g<subst>_'),
(r'\{\\it\s+(?P<subst>.+?)\}', r'*\g<subst>*'),
(r'\\textbf\{(?P<subst>.+?)\}', r'_\g<subst>_'),
(r'\\eqref\{(?P<subst>.+?)\}', r'(ref{\g<subst>})'),
(r'(\S)\\label\{', r'\g<1> \\label{'),
(r'(\S)\\idx(.?)\{', r'\g<1> \\idx\g<2>{'),
(r'(\S)\\index\{', r'\g<1> \\index{'),
(r'\\idxfont\{(.+?)\}', r'`\g<1>`'),
(r'\\index\{(?P<sortkey>.+?)@(?P<index>.+?)\}', r'idx{\g<index>}'),
(r'\\index\{(?P<subst>.+?)\}', r'idx{\g<subst>}'),
(r'\\href\{(?P<url>.+?)\}\{(?P<text>.+?)\}', r'"\g<2>": "\g<1>"'),
(r'\\input\{(?P<subst>.+?)\}', r'# #include "\g<subst>.do.txt"'),
] + user_subst
try:
for item in subst:
if len(item) == 2:
pattern, replacement = item
cpattern = re.compile(pattern)
elif len(item) == 3:
pattern, replacement, flags = item
cpattern = re.compile(pattern, flags)
if cpattern.search(filestr):
#print 'substituting', item, item[0]
filestr = cpattern.sub(replacement, filestr)
else:
#print 'no occurence of', item, item[0]
pass
except Exception, e:
print 'pattern: %s, replacement: %s' % (pattern, replacement)
raise e
replace = [
# make sure \beqan comes before \beqa and \beq in replacements...
(r'\begin{document}', ''),
(r'\end{document}', ''),
(r'\maketitle', ''),
(r'\[', r'\begin{equation*}'),
(r'\]', r'\end{equation*}'),
(r'\beqan', r'\begin{eqnarray*}'),
(r'\eeqan', r'\end{eqnarray*}'),
(r'\beqa', r'\begin{eqnarray}'),
(r'\eeqa', r'\end{eqnarray}'),
(r'\beq', r'\begin{equation}'),
(r'\eeq', r'\end{equation}'),
(r'\ben', r'\begin{enumerate}'),
(r'\een', r'\end{enumerate}'),
(r'\bit', r'\begin{itemize}'),
(r'\eit', r'\end{itemize}'),
(r'\para{', r'\paragraph{'),
(r'\refeq', r'\eqref'),
# dangerous double derivative: ("''", '"'),
# should be corrected manually ("``", '"'),
("Chapter~", "Chapter "),
("Section~", "Section "),
("Appendix~", "Appendix "),
("Appendices~", "Appendices "),
("Figure~", "Figure "),
("Table~", "Table "),
("Chapters~", "Chapters "),
("Sections~", "Sections "),
("Figures~", "Figures "),
("Tables~", "Tables "),
("Chap.~", "Chapter "),
("Sec.~", "Section "),
("App.~", "Appendix "),
("Fig.~", "Figure "),
("Tab.~", "Table "),
(".~", ". "),
('@@@CMD ', '@@@OSCMD '),
] + user_replace
# Pure string replacements:
for from_, to_ in replace:
if from_ in filestr:
if filestr != filestr.replace(from_, to_):
filestr = filestr.replace(from_, to_)
#print ' ....replacing', from_
# Add extra line after label after section
filestr = re.sub(r'(==={3,9}\n\\label\{.+?\}) *\n(\w)',
r'\g<1>\n\n\g<2>', filestr)
# problems (cannot understand this old code...):
"""
problems = [
r'\Sindex\{',
r'\Sidx.?\{',
r'\Slabel\{',
]
for problem in problems:
p = re.findall(problem, filestr)
if len(p) > 0:
print 'PROBLEM:', problem, '\n', p
"""
math_envirs = 'equation', 'eqnarray', 'eqnarray*', 'align', r'align\*', r'equation\*'
# Avoid picking up equations in comment lines
math_starters = [r'^([^%%\n]*)(\\begin\{%s\})' % envir for envir in math_envirs]
math_starters.append(r'^([^%%\n]*)(\\\[)')
math_enders = [r'^([^%%\n]*)(\\end\{%s\})' % envir for envir in math_envirs]
math_enders.append(r'^([^%%\n]*)(\\\])')
# add !bt before and !et after math environments:
for e in math_starters:
filestr = re.sub(e, r'\g<1>\n!bt\n\g<2>', filestr, flags=re.MULTILINE)
for e in math_enders:
filestr = re.sub(e, r'\g<1>\g<2>\n!et', filestr, flags=re.MULTILINE)
# Make sure there is a line after heading (and label)
filestr = re.sub(r'(===[A-Za-z0-9 ]+?={3,9})\s+(\\label\{.+?\})\s+([A-Za-z ])', r'\g<1>\n\g<2>\n\n\g<3>', filestr)
filestr = re.sub('(===[A-Za-z0-9 ]+?={3,9})\s+([A-Za-z ])', r'\g<1>\n\n\g<2>', filestr)
# minted
pattern = r'\\begin\{minted}\[?.*\]?{(.+?)\}'
minted2bc = dict(python='py', cython='cy', fortran='f',
c='c', bash='sh', rst='rst',
matlab='m', perl='pl',
latex='latex', html='html', js='js',
xml='xml', ruby='rb')
minted2bc['c++'] = 'cpp'
filestr = re.sub(pattern, subst_minted_latex2doconce, filestr)
filestr = filestr.replace('\\end{minted}', '!ec')
pattern = r'\\begin\{Verbatim}\[?.*\]?{(.+?)\}'
filestr = re.sub(pattern, '!bc', filestr)
filestr = filestr.replace('\\end{Verbatim}', '!ec')
filestr = filestr.replace('\\begin{verbatim}', '!bc')
filestr = filestr.replace('\\end{verbatim}', '!ec')
for lang in minted2bc:
begin_pattern = r'\begin{%s}' % lang
end_pattern = r'\end{%s}' % lang
filestr = filestr.replace(begin_pattern, '!bc ' + minted2bc[lang])
filestr = filestr.replace(end_pattern, '!ec')
# ptex2tex code environments:
code_envirs = ['ccq', 'cod', 'pro', 'ccl', 'cc', 'sys',
'dsni', 'sni', 'slin', 'ipy', 'rpy',
'pyshell', 'plin', 'ver',
'warn', 'rule', 'summ',
'dat', 'txt'] # sequence important for replace!
for language in 'py', 'f', 'c', 'cpp', 'sh', 'pl', 'm':
for tp in 'cod', 'pro':
code_envirs.append(language + tp)
for e in code_envirs:
s = r'\b%s' % e
filestr = filestr.replace(s, '\n!bc ' + e)
s = r'\e%s' % e
filestr = filestr.replace(s, '!ec')
filestr = filestr.replace('bc rpy', 'bc sys')
# eqnarray -> align
filestr = filestr.replace(r'{eqnarray', '{align')
filestr = re.sub(r'&(\s*)=(\s*)&', '&\g<1>=\g<2>', filestr)
filestr = re.sub(r'&(\s*)\\approx(\s*)&', '&\g<1>\\\\approx\g<2>', filestr)
# \item alone on line: join with next line (indentation is fixed later)
filestr = re.sub(r'\\item\s+(\w)', r'\item \g<1>', filestr)
# Make sure all items in lists are on one line so we do not run
# into indentation problems (lookahead pattern makes this easy)
pattern = r'(\\item\s+.+?)(?=\\item|\\end\{)'
list_items = re.findall(pattern, filestr, flags=re.DOTALL)
for item in list_items:
filestr = filestr.replace(item, ' '.join(item.splitlines()) + '\n\n')
# Find subfigures (problems)
if filestr.count('\\subfigure{') > 0:
print '\nPROBLEM: found \\subfigure{...} - should be changed (combine individual'
print ' figure files into a single file; now subfigures are just ignored!)\n'
# Figures: assumptions are that subfigure is not used and that the label
# sits inside the caption. Also, width should be a fraction of
# \linewidth.
# figures with width spec: psfig, group1: filename, group2: width, group3: caption
pattern = re.compile(r'\\begin{figure}.*?\psfig\{.*?=([^,]+?),\s*width=(.+?)\\linewidth.*?\caption\{(.*?)\}\s*\\end{figure}', re.DOTALL)
filestr = pattern.sub(r'FIGURE: [\g<1>, width=\g<2>] {{{{\g<3>}}}}', filestr)
# note: cannot treat width=10cm, only width=0.8\linewidth
# figures: psfig, group1: filename, group2: caption
pattern = re.compile(r'\\begin{figure}.*?\psfig\{.*?=([^,]+).*?\caption\{(.*?)\}\s*\\end{figure}', re.DOTALL)
filestr = pattern.sub(r'FIGURE: [\g<1>, width=400] {{{{\g<2>}}}}', filestr)
# figures: includegraphics, group1: width, group2: filename, group3: caption
pattern = re.compile(r'\\begin{figure}.*?\includegraphics\[width=(.+?)\\linewidth\]\{(.+?)\}.*?\caption\{(.*?)\}\s*\\end{figure}', re.DOTALL)
filestr = pattern.sub(r'FIGURE: [\g<2>, width=400 frac=\g<1>] {{{{\g<3>}}}}', filestr)
# includegraphics with other measures of width and caption after fig
pattern = re.compile(r'\\begin{figure}.*?\includegraphics\[(.+?)]\{(.+?)\}.*?\caption\{(.*?)\}\s*\\end{figure}', re.DOTALL)
filestr = pattern.sub(r'# original latex figure with \g<1>\n\nFIGURE: [\g<2>, width=400 frac=1.0] {{{{\g<3>}}}}', filestr)
# includegraphics with other measures of width and caption before fig
pattern = re.compile(r'\\begin{figure}.*?\caption\{(.*?)\}\includegraphics\[(.+?)]\{(.+?)\}.*?\s*\\end{figure}', re.DOTALL)
filestr = pattern.sub(r'# original latex figure with \g<2>\n\nFIGURE: [\g<3>, width=400 frac=1.0] {{{{\g<1>}}}}', filestr)
# Better method: grab all begin and end figures and analyze the complete
# text between begin and end. That can handle comment lines in figures,
# which now destroy the regex'es above since they will grab the
# first image anyway.
captions = re.findall(r'\{\{\{\{(.*?)\}\}\}\}', filestr, flags=re.DOTALL)
for caption in captions:
orig_caption = caption
# Add label to end of caption
pattern = r'(\\label\{.*?\})'
m = re.search(pattern, caption)
if m:
label = m.group(1)
caption = caption.replace(label, '')
caption = caption.strip() + ' ' + label
# Strip off comments
lines = caption.splitlines()
for i in range(len(lines)):
if '%' in lines[i] and not r'\%' in lines[i]:
lines[i] = lines[i].split('%')[0]
# Make one line
caption = ' '.join(lines)
filestr = filestr.replace('{{{{%s}}}}' % orig_caption, caption)
# Process lists, comment lines, @@@CODE lines, and other stuff
inside_enumerate = False
inside_itemize = False
inside_code = False
appendix = False
lines = filestr.splitlines()
for i in range(len(lines)):
if lines[i].startswith('!bc'):
inside_code = True
if lines[i].startswith('!ec'):
inside_code = False
if (not inside_code) and lines[i].lstrip().startswith('%'):
lines[i] = '# ' + lines[i].lstrip()[1:]
if lines[i].startswith('@@@CODE'):
# Translate ptex2tex CODE envir to doconce w/regex
words = lines[i].split(' ') # preserve whitespace!
new_line = ' '.join(words[:2]) # command filename, no space in name
if len(words) > 2:
restline = ' '.join(words[2:])
new_line += ' fromto: '
if '@' in restline:
from_, to_ = restline.split('@')[:2]
new_line += re.escape(from_) # regex in doconce
new_line += '@' + re.escape(to_)
else:
new_line += re.escape(restline) + '@'
new_line = new_line.replace(r'\ ', ' ').replace(r'\,', ',').replace(r'\:', ':')
lines[i] = new_line
# two types of lists (but not nested lists):
if r'\begin{enumerate}' in lines[i] or r'\ben' in lines[i]:
inside_enumerate = True
lines[i] = ''
if r'\begin{itemize}' in lines[i] or r'\bit' in lines[i]:
inside_itemize = True
lines[i] = ''
if inside_enumerate or inside_itemize:
if lines[i].lstrip().startswith(r'\item'):
l = re.sub(r'\s*\\item\s*', '', lines[i]).strip()
lines[i] = ' * ' + l
if r'\end{enumerate}' in lines[i] or r'\een' in lines[i]:
inside_enumerate = False
lines[i] = ''
if r'\end{itemize}' in lines[i] or r'\eit' in lines[i]:
inside_itemize = False
lines[i] = ''
if re.search(r'^\s*\appendix', lines[i]):
appendix = True
if appendix and 'section{' in lines[i] or 'section*{' in lines[i]:
lines[i] = re.sub(r'section\*?\{(.+?)\}',
'section{Appendix: \g<1>}', lines[i])
if r'\bibliography' in lines[i]:
lines[i] = re.sub(r'\\bibliography\{(.+?)\}',
r'\n_Must run publish import on BibTeX file \g<1>!_\nBIBFILE: papers.pub\n',
lines[i])
lines[i] = re.sub(r'\\bibliographystyle\{.+?\}', '', lines[i])
# put all newcommands in a file (note: newcommands must occupy only one line!)
newlines = []
newcommands = []
for line in lines:
l = line.lstrip()
if l.startswith('\\newcommand{'):
newcommands.append(l)
else:
newlines.append(line)
filestr = '\n'.join(newlines)
if newcommands:
newcommands_file = 'newcommands_keep.tex'
nf = open(newcommands_file, 'w')
nf.writelines(newcommands)
nf.close()
# Exercises of the following particular format
pattern = re.compile(r'\\begin\{exercise\}\s*\\label\{(.*?)\}\s*\\exerentry\{(.*?)\}\s*$\s*(.+?)\\hfill\s*\$\\diamond\$\s*\\end\{exercise\}', re.DOTALL|re.MULTILINE)
filestr = pattern.sub(r'===== Exercise: \g<2> =====\n\label{\g<1>}\nfile=\n\n\g<3>\n', filestr)
# Fix "Name of program file:" construction in exercises
lines = filestr.splitlines()
program_file = None
for i in range(len(lines)-1, -1, -1):
if 'Name of program file' in lines[i]:
m = re.search(r'Name of program file:\s*`([^`]+?)`', lines[i])
if m:
program_file = m.group(1)
lines[i] = ''
if lines[i] == 'file=':
if program_file is not None:
lines[i] = 'file=' + program_file
program_file = None
else:
# No "Name of program file" was found after last file=.
# This exercise does not have a program file specified.
lines[i] = ''
filestr = '\n'.join(lines)
# Check idx{} inside paragraphs
lines = filestr.splitlines()
last_blank_line = -1
pattern = r'idx\{.+?\}'
inside_code_or_math = False
for i in range(len(lines)):
if lines[i].startswith('!bc') or lines[i].startswith('!bt'):
inside_code_or_math = True
if lines[i].startswith('!ec') or lines[i].startswith('!et'):
inside_code_or_math = False
if lines[i].strip() == '' and not inside_code_or_math:
last_blank_line = i
if 'idx{' in lines[i] and i < len(lines)-1 \
and lines[i+1].strip() != '':
# idx on a line and next line is text
line = re.sub(pattern, '', lines[i]).strip()
idx = re.findall(pattern, lines[i])
if line != '':
# We have idx{} in the middle of a paragraph, try move
lines[i] = line
else:
lines[i] = '# REMOVE (there was just a single idx{...} on this line)'
lines[last_blank_line] = '\n' + ' '.join(idx) + \
' ' + lines[last_blank_line]
# Tables are difficult: require manual editing?
inside_table = False
new_lines = []
headings = []
nhlines = 0
align_headings = []
for i in range(len(lines)):
if 'begin{table}' in lines[i] or 'begin{tabular}' in lines[i]:
inside_table = True
table_lines = []
if '{tabular}{' in lines[i]:
align = lines[i].split('{tabular}{')[-1].split('}')[0]
align = align.replace('|', '')
else:
align = None
if inside_table:
if '&' in lines[i]:
line = lines[i].replace('\\\\', '').strip()
if '\\hline' in line:
line = line.replace('\\hline', '')
nhlines += 1
if '\\multicolumn{' in line:
m = re.findall(r'\\multicolumn\{\d+\}\{(.)\}\{(.+?)\}',
line)
if m:
headings = [heading for align_char, heading in m]
align_headings = [align_char for align_char, heading in m]
line = line.split('&')
# Fill headings from right
for j in range(len(line)):
line[j] = ''
for j, h in enumerate(reversed(headings)):
line[len(line)-1-j] = h
line = '&'.join(line)
table_lines.append(line)
else:
# \hline, end{table, caption
pass
else:
new_lines.append(lines[i])
if inside_table and ('end{table}' in lines[i] or 'end{tabular}' in lines[i]):
inside_table = False
if table_lines:
max_column_width = 0
num_columns = 0
for j in range(len(table_lines)):
columns = [s.strip()
for s in table_lines[j].split('&')]
max_column_width = max([max_column_width] + \
[len(c) for c in columns])
num_columns = max(num_columns, len(columns))
table_lines[j] = columns
max_column_width += 2 # add space before/after widest column
# Construct doconce table
# (if the formatting gets wrong, see csv2table, that
# formatting works well)
width = max_column_width*num_columns + num_columns+1
separator0 = '|' + '-'*(width-2) + '|'
separator1 = separator0
separator2 = separator0
if align_headings:
# Insert align chars for header from the right
# (sometimes 1st column may have no header)
s = list(separator1)
for j in range(len(align_headings)):
s[len(s)-1-max_column_width/2 - j*max_column_width] = align_headings[len(align_headings)-1-j]
separator1 = ''.join(s)
if align is not None:
# As many chars in align as there are columns
s = list(separator2)
for j in range(len(align)):
s[max_column_width/2 + j*max_column_width] = align[j]
separator2 = ''.join(s)
column_format = ' %%-%ds ' % (max_column_width-2)
for j in range(len(table_lines)):
table_lines[j] = [column_format % c for c in table_lines[j]]
table_lines[j] = '|' + '|'.join(table_lines[j]) + '|'
table = '\n\n' + separator1 + '\n' + table_lines[0] + '\n' + \
separator2 + '\n' + '\n'.join(table_lines[1:]) + \
'\n' + separator0 + '\n\n'
if new_lines:
new_lines[-1] += table
else:
new_lines.append(table)
filestr = '\n'.join(new_lines)
filestr = re.sub(r'^# REMOVE \(there was.+$\s*', '', filestr,
flags=re.MULTILINE)
filestr = re.sub(r'(idx\{.+?\})\s+([^i\n ])', r'\g<1>\n\n\g<2>', filestr)
# Let paragraphs be subsubsections === ... ===
pattern = r'__([A-Z].+?)([.?!:])__'
filestr = re.sub(pattern, subst_paragraph_latex2doconce, filestr)
# Find all labels and refs and notify about refs to external
# labels
problems = False
labels = re.findall(r'label\{(.+?)\}', filestr) # figs have label, not \label
refs = re.findall(r'\\ref\{(.+?)\}', filestr)
eqrefs = re.findall(r'\\eqref\{(.+?)\}', filestr)
pagerefs = re.findall(r'\\pageref\{(.+?)\}', filestr)
refs = refs + eqrefs + pagerefs
'''
for ref in refs:
if ref not in labels:
print 'found reference but no label{%s}' % ref
problems = True
# Attempt to do a generalized reference
# (Make table of chapters, stand-alone docs and their labels - quite easy if associated chapters and their URLs are in a file!!!)
filestr = filestr.replace(r'\ref{%s}' % ref,
r'(_PROBLEM: external ref_) ref{%s}' % ref)
#print r'FIX external ref: ref[%(ref)s]["section where %(ref)s is": "http URL with %(ref)s" cite{doc_with_%(ref)s}]["section where %(ref)s is": "http URL with %(ref)s" cite{doc_with_%(ref)s}]' % vars()
'''
for ref in pagerefs:
print 'pageref{%s} should be rewritten' % ref
filestr = filestr.replace(r'\pageref{%s}' % ref,
r'(_PROBLEM: pageref_) \pageref{%s}' % ref)
problems = True
print '\n## search for CHECK to see if auto editing was correct\n'
if problems:
print '\n## search for PROBLEM: to see need for manual adjustments\n\n\n'
filestr = filestr.replace(r'\label{', 'label{') # done above
filestr = filestr.replace(r'\ref{', 'ref{')
filestr = filestr.replace(r'\cite{', 'cite{')
filestr = filestr.replace(r'\cite[', 'cite[')
filestr = filestr.replace(r'\noindent', r"""# #if FORMAT in ("latex", "pdflatex")
\noindent
# #endif""")
filestr = re.sub(r'\\vspace\{(.+?)\}', r"""# #if FORMAT in ("latex", "pdflatex")
\\vspace{\g<1>}
# #endif""", filestr)
filestr = filestr.replace(r'\_', '_')
filestr = filestr.replace(r' -- ', ' - ')
filestr = filestr.replace(r'}--ref', '}-ref')
filestr = filestr.replace(r'})--(ref', '})-(ref')
filestr = filestr.replace(r'~', ' ')
filestr = filestr.replace(r'\end{table}', '')
# Treat footnotes
# Footnote at the end of a sentence: enclose in parenthesis
# (regex is not perfect so
pattern = r'\\footnote\{([^}]+)\}\.'
filestr = re.sub(pattern, '.( _CHECK: footnote_ at end of sentence placed in parenthesis) (\g<1>) ', filestr)
# Without final . means footnote in the middle of a sentence
pattern = r'\\footnote\{([^}]+)\}'
filestr = re.sub(pattern, '( _PROBLEM: footnote_ in the middle of a sentence must be rewritten) (\g<1>)', filestr)
# Check that !bc, !ec, !bt, !ec are at the beginning of the line
for envir in 'c', 't':
for tag in '!b', '!e':
command = tag + envir
pattern = r'^ +' + command
filestr = re.sub(pattern, command, filestr, flags=re.MULTILINE)
# Ensure a blank line before !bt and !bc for nicer layout
# (easier with lookahead! - se below)
#filestr = re.sub(r'([A-Za-z0-9,:?!; ])\n^!bt', r'\g<1>\n\n!bt',
# filestr, flags=re.MULTILINE)
#filestr = re.sub(r'([A-Za-z0-9,:?!; ])\n^!bc', r'\g<1>\n\n!bc',
# filestr, flags=re.MULTILINE)
filestr = re.sub(r'\s+(?=^!bt|^!bc)', '\n\n', filestr, flags=re.MULTILINE)
# Inline equations cause trouble
filestr = re.sub(r'!et +([^\n])', '!et\n\g<1>', filestr)
return filestr
def latex2doconce():
"""
Apply transformations to a latex file to help translate the
document into DocOnce format.
Suggestions for preparations: avoid pageref, replace subfigures
by files combined to a single file, avoid footnotes, index inside
paragraphs, do not start code blocks with indentation, ...
"""
print '# #ifdef LATEX2DOCONCE'
print 'This is the result of the doconce latex2doconce program.'
print 'The translation from LaTeX is just a helper. The text must'
print 'be carefully examined! (Be prepared that some text might also'
print 'be lost in the translation - in seldom cases.)\n'
filename = sys.argv[1]
f = open(filename, 'r')
filestr = f.read()
f.close()
filestr = _latex2doconce(filestr)
print '# #endif' # end of intro with warnings etc.
print filestr # final output
def html2doconce():
"""
Apply transformations to an html file to help translate the
document into DocOnce format.
"""
print '# #ifdef HTML2DOCONCE'
print 'This is the result of the doconce htmldoconce program.'
print 'The translation from HTML is just a helper. The text must'
print 'be carefully examined! (Be prepared that some text might also'
print 'be lost in the translation - in seldom cases.)\n'
filename = sys.argv[1]
f = open(filename, 'r')
filestr = f.read()
f.close()
filestr = _html2doconce(filestr)
print '# #endif' # end of intro with warnings etc.
print filestr # final output to stdout
def _html2doconce(filestr):
# All headings
headings = {1: 7, 2: 5, 3: 3}
def subst(m):
border = '='*headings[int(m.group(1))]
return border + ' ' + m.group(2) + ' ' + border + '\n'
filestr = re.sub(r'<h(\d)>(.+?)</h\d>', subst, filestr)
# Paragraphs
filestr = re.sub(r'<p>\s*', '\n', filestr)
# Various tags
filestr = re.sub(r'<em>(.+?)</em>', '*\g<1>*', filestr, flags=re.DOTALL)
filestr = re.sub(r'<b>(.+?)</b>', '_\g<1>_', filestr, flags=re.DOTALL)
filestr = re.sub(r'<tt>(.+?)</tt>', '`\g<1>`', filestr, flags=re.DOTALL)
filestr = re.sub(r'^\s*<title>(.+?)</title>', 'TITLE: \g<1>', filestr,
flags=re.MULTILINE)
filestr = re.sub(r'<!--(.+?)-->', '#\g<1>', filestr, flags=re.DOTALL)
filestr = re.sub(r'<a href="(.+?)">(.+?)</a>', '"\g<2>": "\g<1>"', filestr,
flags=re.DOTALL)
filestr = re.sub(r'<img.*? src="(.+?)".*?>',
'\nFIGURE: [\g<1>, width=600 frac=1]\n', filestr,
flags=re.DOTALL)
filestr = re.sub(r'\s*^<pre>\s*', '\n\n!bc cod\n', filestr,
flags=re.MULTILINE)
filestr = re.sub(r'</pre>\s*', '!ec\n', filestr,
flags=re.MULTILINE)
# <code>?
filestr = re.sub(r'<ul>', '\n', filestr)
filestr = re.sub(r'</ul>', '\n', filestr)
# All lists become bullet lists, read line by line and use a stack
# to improve this
if '<ol>' in filestr:
print '*** warning: enumerated lists become bullet lists'
filestr = re.sub(r'<ol>', '\n', filestr)
filestr = re.sub(r'</ol>', '\n', filestr)
filestr = re.sub(r'<li>', ' * ', filestr)
if '<table' in filestr:
print '*** warning: html2doconce cannot handle tables.'
print ' Recommendation: edit manually to CSV format and run'
print ' doconce csv2table command to create table.'
return filestr
def latex_dislikes():
"""
Report constructions in latex that will not translate to doconce
format by latex2dococe and constructions that are not recommended
for common other formats.
Rules:
* Collect all newcommands in a separate file, one definition
per line (i.e., multi-line definitions are not allowed).
* Do not use environments for algorithms.
* Do not use environments for computer code in floating figures.
* Tables will not be floating. Computer code, tables, algorithms,
anything but figures, will be inline at the position where they
are defined.
* Do not use `description` lists.
"""
filename = sys.argv[1]
f = open(filename, 'r')
filestr = f.read()
f.close()
# Should we first run through latex2doconce? Many fixes there
# simplifies things here...
filestr = _latex2doconce(filestr)
lines = filestr.splitlines()
# Add line numbers
for i in range(len(lines)):
lines[i] = '%4d: ' % (i+1) + lines[i]
lines = '\n'.join(lines).splitlines()
# add line numbers to each line in the latex file
# list matches (begin, commands) that are problematic
# and report them for every line
begin_likes = [
'equation',
'equation*',
'align',
'align*',
'itemize',
'enumerate',
]
begin_ok = [
'eqnarray',
'eqnarray*',
]
# dislikes: list of (regex, explanation)
dislikes = [(r'%s~?\s*\\ref\{(.+?)\}' % tp,
r'use %s in \g<1>' % (tp[0].upper() + tp[1:]))
for tp in
('section', 'chapter', 'appendix',
'sec.', 'chap.', 'app.')]
dislikes += [
(r'\\subfigure', 'Avoid \\subfigure, combine images to a single new image.'),
(r'\\pageref', 'Avoid \\pageref entirely (page numbers do not make sense in most electronic formats).'),
#(r'\\psfig\{', 'Avoid \\psfig, use \\includegraphics.'),
(r'\\begin\{table\}', 'Tables are handled, but can easily become problematic. Test outcome of latex2doconce for this table, make it inline (only tabular) and of a form that easily translates to doconce.'),
(r'\\begin\{tabular\}', 'Tables are handled, but can easily become problematic. Test outcome of latex2doconce for this tabular environment and adjust if necessary/possible.'),
]
likes_commands = []
for line in lines:
if r'\begin{' in line:
m = re.search(r'\begin\{(.+?)\}', line)
if m:
envir = m.group(1)
if envir in begin_likes:
pass # fine!
elif envir in begin_ok:
print """
Found \\begin{%s}, which can be handled, but it is
recommended to avoid this construction.""" % envir
else:
print """
Found \\begin{%s}, which will not carry over to DocOnce
and other formats.""" % envir
# Could have message here (begin_messages) that
# guide rewrites, e.g., lstlisting etc.
print line + '\n'
for regex, message in dislikes:
if re.search(regex, line):
print message
print line + '\n'
def _usage_ipynb2doconce():
print 'doconce ipynb2doconce notebook.ipynb [--cell_delimiter]'
print 'translate IPython/Jupyter notebooks to doconce'
def ipynb2doconce():
if len(sys.argv) < 2:
_usage_ipynb()
sys.exit(0)
cell_delimiter = '--cell_delimiter' in sys.argv
filename = sys.argv[1]
if not os.path.isfile(filename):
print '*** error: no file "%s" found' % filename
sys.exit(1)
f = open(filename, 'r')
jsonstring = f.read()
f.close()
# Turn json string into a NotebookNode object
from IPython.nbformat.reader import reads
nb = reads(jsonstring)
# nb is dict-like with keys nbformat_minor, cells, nbformat, metadata
dostr = ''
from doconce import markdown2doconce
cell_type_prev = None
for cell in nb['cells']:
#print 'XXX', cell['cell_type'], 'prev:', cell_type_prev, '\n', cell['source']
if cell_delimiter and cell['cell_type'] != cell_type_prev:
dostr += '# ---------- %s cell\n' % cell['cell_type']
if cell['cell_type'] == 'markdown':
s = markdown2doconce(cell['source'], ipynb_mode=True)
if cell_type_prev == 'markdown':
s += '\n'
else:
s += '\n\n'
elif cell['cell_type'] == 'code':
collapsed = cell['metadata'].get('collapsed', False) \
if cell['metadata'] else False
source = cell['source']
# Remove % (matplotlib) directives from source
source = re.sub('^%.+\n', '', source, flags=re.MULTILINE).strip()
s = '\n!bc py' + ('hid' if collapsed else 'cod') + '\n' + source + '\n!ec\n'
dostr += s
cell_type_prev = cell['cell_type']
# Fix common problems
# Missing blank line before heading
dostr = re.sub('^!e([ct])\n===', r'!e\g<1>\n\n===', dostr, flags=re.MULTILINE)
# Too many blanks before !bt and !bc
dostr = re.sub(r'\n\n\n+!b([ct])', r'\n\n!b\g<1>', dostr)
filename = filename.replace('.ipynb', '.do.txt')
f = open(filename, 'w')
f.write(dostr)
f.close()
print 'output in', filename
# ---- Attempt to make a pygments syntax highlighter for DocOnce ----
try:
import pygments as pygm
from pygments.lexer import RegexLexer, \
bygroups, include, using, this, do_insertions
from pygments.token import Punctuation, Text, Comment, Keyword, \
Name, String, Generic, Operator, Number, Whitespace, Literal
from pygments.formatters import HtmlFormatter
from pygments import highlight
from pygments.styles import get_all_styles
except ImportError:
pygm = None
print 'pygments is not installed'
_abort()
class DocOnceLexer(RegexLexer):
"""
Lexer for DocOnce files.
"""
name = 'DocOnce'
aliases = ['doconce']
filenames = ['*.do.txt']
mimetypes = ['text/x-doconce']
tokens = {
'general': [
(r'\#.*\n', Comment),
(r'[{}]', Name.Builtin),
],
'root': [
(r' .*\n', Text),
(r'\#.*\n', Comment),
(r'idx', Name.Builtin),
(r'label\{.+?\}', Name.Builtin),
(r'TITLE:', Generic.Heading),
(r'AUTHOR:', Generic.Heading),
(r'DATE:', Generic.Heading),
(r'TOC:', Generic.Heading),
(r'FIGURE:', Name.Builtin),
(r'MOVIE:', Name.Builtin),
#(r'!.+\n', Generic.Strong),
(r'!.+\n', Name.Builtin),
(r'@@@CODE .*\n', Generic.Subheading),
(r'=== .*\n', Generic.Subheading),
(r'__.+?__\n', Generic.Subheading),
(r'={3,9} .*\n', Generic.Heading),
(r'\\\[', String.Backtick, 'displaymath'),
(r'\\\(', String, 'inlinemath'),
(r'\$\$', String.Backtick, 'displaymath'),
(r'\$', String, 'inlinemath'),
(r'\\([a-zA-Z]+|.)', Keyword, 'command'),
(r'.*\n', Text),
],
'math': [
(r'\\([a-zA-Z]+|.)', Name.Variable),
include('general'),
(r'[0-9]+', Number),
(r'[-=!+*/()\[\]]', Operator),
(r'[^=!+*/()\[\]\\$%&_^{}0-9-]+', Name.Builtin),
],
'inlinemath': [
(r'\\\)', String, '#pop'),
(r'\$', String, '#pop'),
include('math'),
],
'displaymath': [
(r'\\\]', String, '#pop'),
(r'\$\$', String, '#pop'),
(r'\$', Name.Builtin),
include('math'),
],
'command': [
(r'\[.*?\]', Name.Attribute),
(r'\*', Keyword),
(r'', Text, '#pop'),
],
}
def analyse_text(text):
if text[:7] == 'Index: ':
return True
if text[:5] == 'diff ':
return True
if text[:4] == '--- ':
return 0.9
class DocOnceLexer(RegexLexer):
"""
Lexer for DocOnce files.
Built this one from TexLexer and extended with DocOnce stuff.
Difficult to get both to work
"""
name = 'DocOnce'
aliases = ['doconce']
filenames = ['*.do.txt']
mimetypes = ['text/x-doconce']
tokens = {
'general': [
(r'#.*?\n', Comment),
(r'[{}]', Name.Builtin),
(r'[&_^]', Name.Builtin),
],
'root': [
(r'\\\[', String.Backtick, 'displaymath'),
(r'\\\(', String, 'inlinemath'),
(r'\$\$', String.Backtick, 'displaymath'),
(r'\$', String, 'inlinemath'),
(r'\\([a-zA-Z]+|.)', Keyword, 'command'),
(r'!.+\n', Name.Builtin),
(r'@@@CODE .*\n', Generic.Subheading),
(r'=== .*\n', Generic.Subheading),
(r'__.+?__\n', Generic.Subheading),
(r'={3,9} .+? ={3,9}\n', Generic.Heading),
(r'idx', Name.Builtin),
(r'label\{.+?\}', Name.Builtin),
(r'TITLE:', Generic.Heading),
(r'AUTHOR:', Generic.Heading),
(r'DATE:', Generic.Heading),
(r'TOC:', Generic.Heading),
(r'FIGURE:', Name.Builtin),
(r'MOVIE:', Name.Builtin),
include('general'),
# these two are crucial - no 2 turns on latex math everywhere
# but not doconce, no 1 does the other way around
#(r'.*\n', Text),
(r'[A-Za-z0-9 ]?\n', Text), # makes latex stuff correct
(r'[^\\$%&_^{}]+', Text),
(r'.*\n', Text),
],
'math': [
(r'\\([a-zA-Z]+|.)', Name.Variable),
include('general'),
(r'[0-9]+', Number),
(r'[-=!+*/()\[\]]', Operator),
(r'[^=!+*/()\[\]\\$%&_^{}0-9-]+', Name.Builtin),
],
'inlinemath': [
(r'\\\)', String, '#pop'),
(r'\$', String, '#pop'),
include('math'),
],
'displaymath': [
(r'\\\]', String, '#pop'),
(r'\$\$', String, '#pop'),
(r'\$', Name.Builtin),
include('math'),
],
'command': [
(r'\[.*?\]', Name.Attribute),
(r'\*', Keyword),
(r'', Text, '#pop'),
],
}
"""
tokens = {
'general': [
(r'\#.*\n', Comment),
(r'[{}]', Name.Builtin),
],
'root': [
(r' .*\n', Text),
#(r'!.+\n', Generic.Strong),
(r'\\\[', String.Backtick, 'displaymath'),
(r'\\\(', String, 'inlinemath'),
(r'\$\$', String.Backtick, 'displaymath'),
(r'\$', String, 'inlinemath'),
(r'\\([a-zA-Z]+|.)', Keyword, 'command'),
],
'math': [
(r'\\([a-zA-Z]+|.)', Name.Variable),
include('general'),
(r'[0-9]+', Number),
(r'[-=!+*/()\[\]]', Operator),
(r'[^=!+*/()\[\]\\$%&_^{}0-9-]+', Name.Builtin),
],
'inlinemath': [
(r'\\\)', String, '#pop'),
(r'\$', String, '#pop'),
include('math'),
],
'displaymath': [
(r'\\\]', String, '#pop'),
(r'\$\$', String, '#pop'),
(r'\$', Name.Builtin),
include('math'),
],
'command': [
(r'\[.*?\]', Name.Attribute),
(r'\*', Keyword),
(r'', Text, '#pop'),
],
}
"""
def analyse_text(text):
return True
# The version below is the best one so far (still far from complete, not
# everything works as intended, so much experimentation is needed to
# extend it, but the result with doconce pygmentize mydoc perldoc looks
# fine). Need to understand more of how the lexers work to make
# further progress: look at DiffLexer, TexLexer, RstLexer, and other text
# lexers in /usr/local/lib/python2.7/dist-packages/Pygments-1.6dev_20131113-py2.7.egg/pygments/lexers/text.py.
# It seems that there is no markdown lexer on the net.
class DocOnceLexer(RegexLexer):
"""
Lexer for DocOnce files.
"""
name = 'DocOnce'
aliases = ['doconce']
filenames = ['*.do.txt']
mimetypes = ['text/x-doconce']
#flags = re.MULTILINE | re.DOTALL # did not work
tokens = {
'root': [
(r' .*\n', Text),
(r'\#.*\n', Comment),
(r'(label|ref|idx)\{.+?\}', Name.Builtin),
(r'\\(begin|end)\{.+?\}', Name.Builtin),
#(r'\$.+?\$', String), # works only occasionally
#(r'label\{.+?\}', Name.Builtin),
#('idx', Keyword),
(r'TITLE:.+\n', Generic.Heading),
(r'AUTHOR:', Generic.Heading),
(r'DATE:', Generic.Heading),
(r'TOC:', Generic.Heading),
(r'FIGURE:.*\n', Name.Builtin),
(r'MOVIE:.*\n', Name.Builtin),
(r'![a-z]+', Keyword),
(r'@@@CODE .*\n', Generic.Subheading),
(r'__.+?__', Generic.Subheading),
(r'\|.+\|\n', String), # tables
(r'`.+?`', String.Backtick), # does not work
(r'".+?"', String), # does not work
(r'={3,9} .* ={3,9}\n', Generic.Heading),
(r'.*\n', Text),
],
}
def analyse_text(text):
True
def _usage_pygmentize():
print 'Usage: doconce pygmentize doconce-file [pygments style]'
def pygmentize():
"""
Typeset a DocOnce file with pygmentize, using the DocOnceLexer
class above.
An alternative is to register the DocOnceLexer with Pygments.
"""
if len(sys.argv) < 2:
_usage_pygmentize()
sys.exit(1)
filename = sys.argv[1]
if not filename.endswith('.do.txt'):
filename += '.do.txt'
try:
pygm_style = sys.argv[2]
except IndexError:
pygm_style = 'default'
f = open(filename, 'r'); text = f.read(); f.close()
lexer = DocOnceLexer()
formatter = HtmlFormatter(noclasses=True, style=pygm_style)
text = highlight(text, lexer, formatter)
f = open(filename + '.html', 'w'); f.write(text); f.close()
print 'pygmentized doconce code written to %s.html' % filename
def _usage_makefile():
print 'Usage: doconce makefile doconce-file [html pdflatex latex sphinx gwiki pandoc ipynb deck reveal beamer ...]'
print 'Example: doconce makefile mydoc.do.txt html sphinx'
print """
A script make.py is generated with the basic steps for running a
spellcheck on .do.txt files followed by commands for producing
output in various formats (in the sequence specified on the command
line). If no formats are specified, html, pdflatex, and sphinx are
produced.
make.py is a template: edit to set the desired options for compiling
to the various formats.
make.py autogenerates a unix shell script with all commands: you may
use this shell script instead of make.py.
"""
def makefile():
"""Generate a generic (Python) makefile for compiling doconce files."""
if len(sys.argv) < 3:
_usage_makefile()
sys.exit(0)
dofile = sys.argv[1]
if dofile.endswith('.do.txt'):
dofile = dofile[:-7]
formats = sys.argv[2:]
# make.py with lots of functions for creating everything you can
# create, easy to use in ipython
# make.py mydoc sphinx pdflatex beamer
if not formats:
formats = ['pdflatex', 'html', 'sphinx', 'deck', 'reveal', 'beamer']
make = open('make.py', 'w')
make.write('''\
#!/usr/bin/env python
"""
Automatically generated file for compiling doconce documents.
"""
import sys, glob, os, shutil, subprocess
logfile = 'tmp_output.log' # store all output of all operating system commands
f = open(logfile, 'w'); f.close() # touch logfile so it can be appended
unix_command_recorder = []
def system(cmd):
"""Run system command cmd using the simple os.system command."""
print cmd
failure = os.system(cmd)
if failure:
print """Command
%s
failed""" % cmd
sys.exit(1)
unix_command_recorder.append(cmd) # record command for bash script
def system(cmd):
"""Run system command cmd using subprocess module."""
print cmd
try:
output = subprocess.check_output(cmd, shell=True,
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
print """Command
%s
failed""" % cmd
print 'Return code:', e.returncode
print e.output
sys.exit(1)
print output
f = open(logfile, 'a'); f.write(output); f.close()
unix_command_recorder.append(cmd) # record command for bash script
def spellcheck():
for filename in glob.glob('*.do.txt'):
if not filename.startswith('tmp'):
cmd = 'doconce spellcheck -d .dict4spell.txt %(filename)s' % vars()
system(cmd)
def latex(name,
latex_program='pdflatex', # or 'latex'
options='--latex_code_style=vrb',
ptex2tex='',
version='paper', # or 'screen', '2up', 'A4', 'A4-2up'
postfix='', # or 'auto'
):
"""
Make latex/pdflatex (according to latex_program) PDF file from
the doconce file name (without any .do.txt extension).
version can take the following values:
* paper: normal page size, --device=paper
* 2up: normal page size, --device=paper, 2 pages per sheet
* A4: A4 page size, --device=paper
* A4-2up: A4 page size, --device=paper, 2 pages per sheet
* screen: normal pages size, --device=screen
If a separate ptex2tex step is wanted, fill in all necessary
commands in the ptex2tex string.
"""
if name.endswith('.do.txt'):
name = name.replace('.do.txt', '')
system('rm -f %(name)s.aux' % vars())
if version in ('paper', 'A4', '2up', 'A4-2up'):
if not '--device=paper' in options:
options += ' --device=paper'
elif version == 'screen' and '--device=paper' in options:
options = options.replace('--device=paper', '')
if version in ('A4', 'A4-2up'):
if not '--latex_papersize=a4' in options:
options += ' --latex_papersize=a4'
if postfix == 'auto':
if version == 'paper':
postfix = '4print'
elif version == 'screen':
postfix = '4screen'
else:
postfix = version
# Compile source
cmd = 'doconce format %(latex_program)s %(name)s %(options)s ' % vars()
system(cmd)
# Transform .p.tex to .tex?
if ptex2tex:
cmd = ptex2tex
system(cmd)
# Load latex file into string for examination
dofile = open(name + '.tex', 'r')
text = dofile.read()
dofile.close()
latex_options = ''
if latex_program == 'pdflatex':
latex_options = '-file-line-error -interaction nonstopmode'
# Run latex
shell_escape = ' -shell-escape' if 'begin{minted}' in text else ''
cmd_latex = '%(latex_program)s%(shell_escape)s %(latex_options)s %(name)s' % vars()
system(cmd_latex)
if 'idx{' in text:
cmd = 'makeindex %(name)s' % vars()
system(cmd)
if 'BIBFILE:' in text:
cmd = 'bibtex %(name)s' % vars()
system(cmd)
system(cmd_latex)
system(cmd_latex)
if latex_program == 'latex':
cmd = 'dvipdf %(name)s' % vars()
system(cmd)
# Could instead of dvipdf run the old-fashioned dvips and ps2pdf
if version in ('2up', 'A4-2up'):
# Use pdfnup to make two pages per sheet
cmd = 'pdfnup --frame true --outfile %(name)s.pdf %(name)s.pdf' % vars()
system(cmd)
if postfix:
shutil.copy(name + '.pdf', name + '-' + postfix + '.pdf')
def html(name, options='', split=False):
"""
Make HTML file from the doconce file `name`
(without any .do.txt extension).
"""
if name.endswith('.do.txt'):
name = name.replace('.do.txt', '')
# Compile source
cmd = 'doconce format html %(name)s %(options)s ' % vars()
system(cmd)
if split:
cmd = 'doconce split_html %(name)s' % vars()
def reveal_slides(name, options='', postfix='reveal', theme='darkgray'):
"""Make reveal.js HTML5 slides from the doconce file `name`."""
if name.endswith('.do.txt'):
name = name.replace('.do.txt', '')
# Compile source
if '--pygments_html_style=' not in options:
from doconce.misc import recommended_html_styles_and_pygment_styles
combinations = recommended_html_styles_and_pygment_styles()
options += ' --pygments_html_style=%s' % combinations['reveal'][theme][0]
if '--keep_pygments_html_bg' not in options:
options += ' --keep_pygments_html_bg'
options += ' --html_output="%(name)s-%(postfi)s'
cmd = 'doconce format html %(name)s %(options)s ' % vars()
system(cmd)
cmd = 'doconce slides_html %(name)s-%(postfi)s reveal --html_slide_theme=%(theme)s'
system(cmd)
def deck_slides(name, options='', postfix='deck', theme='sandstone.default'):
"""Make deck.js HTML5 slides from the doconce file `name`."""
if name.endswith('.do.txt'):
name = name.replace('.do.txt', '')
# Compile source
if '--pygments_html_style=' not in options:
from doconce.misc import recommended_html_styles_and_pygment_styles
combinations = recommended_html_styles_and_pygment_styles()
options += ' --pygments_html_style=%s' % combinations['deck'][theme][0]
if '--keep_pygments_html_bg' not in options:
options += ' --keep_pygments_html_bg'
options += ' --html_output="%(name)s-%(postfi)s'
cmd = 'doconce format html %(name)s %(options)s ' % vars()
system(cmd)
cmd = 'doconce slides_html %(name)s-%(postfi)s deck --html_slide_theme=%(theme)s'
system(cmd)
def beamer_slides(name, options='', postfix='beamer', theme='red_shadow',
ptex2tex_envir='minted'):
"""Make latex beamer slides from the doconce file `name`."""
if name.endswith('.do.txt'):
name = name.replace('.do.txt', '')
system('rm -f %(name)s.aux' % vars())
# Compile source
shell_escape = '-shell-escape' if ptex2tex_envir == 'minted' else ''
cmd = 'doconce format pdflatex %(name)s %(options)s ' % vars()
system(cmd)
# Run latex
cmd = 'doconce ptex2tex %(name)s envir=%(ptex2tex_envir)s' % vars()
system(cmd)
cmd = 'doconce slides_beamer %(name)s --beamer_slide_theme=%(theme)s' % vars()
system(cmd)
cmd = 'pdflatex %(shell_escape)s %(name)s'
system(cmd)
system(cmd)
system('cp %(name)s.pdf %(name)s-%(postfi).pdf' % vars())
cmd = 'doconce slides_html %(name)s-%(postfi)s deck --html_slide_theme=%(theme)s'
system(cmd)
def sphinx(name, options='', dirname='sphinx-rootdir',
theme='pyramid', automake_sphinx_options='',
split=False):
"""
Make Sphinx HTML subdirectory from the doconce file `name`
(without any .do.txt extension).
"""
if name.endswith('.do.txt'):
name = name.replace('.do.txt', '')
# Compile source
cmd = 'doconce format sphinx %(name)s %(options)s ' % vars()
system(cmd)
if split:
cmd = 'doconce split_rst %(name)s' % vars()
# Create sphinx directory
cmd = 'doconce sphinx_dir theme=%(theme)s %(options)s %(name)s' % vars()
system(cmd)
# Compile sphinx
cmd = 'python automake_sphinx.py %(automake_sphinx_options)s' % vars()
system(cmd)
def doconce2format(name, format, options=''):
"""Make given format from the doconce file `name`."""
if name.endswith('.do.txt'):
name = name.replace('.do.txt', '')
# Compile source
cmd = 'doconce format %(format)s %(name)s %(options)s ' % vars()
system(cmd)
def plain(name, options=''):
doconce2format(name, 'plain', options)
def pandoc(name, options=''):
doconce2format(name, 'pandoc', options)
def ipynb(name, options=''):
doconce2format(name, 'ipynb', options)
def cwiki(name, options=''):
doconce2format(name, 'cwiki', options)
def mwiki(name, options=''):
doconce2format(name, 'mwiki', options)
def gwiki(name, options=''):
doconce2format(name, 'gwiki', options)
def main():
"""
Produce various formats from the doconce source.
"""
''')
make.write('''
dofile = "%(dofile)s"
spellcheck()
common_options = ''
''' % vars())
for format in formats:
if format.endswith('latex'):
make.write("""
# --- latex ---
common_latex_options = ' --latex_code_style=vrb'
for version in 'paper', 'screen': # , 'A4', '2up', 'A4-2up':
latex(
dofile,
latex_program='pdflatex',
options=common_options + common_latex_options,
version=version,
postfix='auto')
""")
elif format == 'html':
make.write("""
# --- HTML ---
common_html_options = ''
# HTML Bootstrap
bootstrap_options = ' --html_style=bootswatch_readable --html_code_style=inherit --html_pre_style=inherit --html_toc_depth=2 --pygments_html_style=default'
html(
dofile,
options=common_options + common_html_options + bootstrap_options,
split=True)
# One long HTML file
#html(dofile, options=common_options + common_html_options + ' --html_style=bloodish --html_output=%s-1' % dofile, split=False)
# Solarized HTML
#html(dofile, options=common_options + common_html_options + ' --html_style=solarized3 --html_output=%s-solarized' % dofile, split=True)
""")
elif format == 'sphinx':
make.write("""
# --- Sphinx ---
sphinx_themes = ['pyramid',]
for theme in sphinx_themes:
dirname = 'sphinx-rootdir' if len(sphinx_themes) == 1 else 'sphinx-rootdir-%s' % theme
sphinx(
dofile,
options=common_options + '',
dirname=dirname,
theme=theme,
automake_sphinx_options='',
split=False)
""")
elif format == 'reveal':
make.write("""
# --- reveal.js slides ---
reveal_slides(
dofile,
options=common_options + '',
postfix='reveal',
theme='darkgray')
""")
elif format == 'deck':
make.write("""
# --- deck.js slides ---
deck_slides(
dofile,
options=common_options + '',
postfix='deck',
theme='sandstone.default')
""")
elif format == 'beamer':
make.write("""
# --- latex beamer slides ---
beamer_slides(
dofile,
options=common_options + '',
postfix='beamer',
theme='red_shadow',
ptex2tex_envir='minted') # 'ans:nt'
""")
elif format.endswith('wiki') or format in ('pandoc', 'plain', 'ipynb'):
make.write("""
doconce2format(dofile, format, options=common_options + '')
""")
# Are there slides documents in addition?
dofile_slides = glob.glob('slides_*.do.txt')
for dofile in dofile_slides:
# Is the TOC surrounded by a WITH_TOC test directive?
f = open(dofile, 'r'); text = f.read(); f.close()
with_toc = ' -DWITH_TOC' if 'WITH_TOC' in text else ''
dofile = dofile[:-7]
make.write("""
# Slides file %(dofile)s
dofile = "%(dofile)s"
""" % vars())
for format in formats:
if format == 'html':
make.write("""
html_style = 'bloodish'
# One long HTML file
html(
dofile,
options=common_options + ' --html_output=%(dofile)s-1 --html_style=%(html_style)s' % vars() + with_toc,
split=False)
system('doconce replace "<li>" "<p><li>" %(dofile)s-1.html' % vars())
# Splitted HTML file
html(
dofile,
options=common_options + ' --html_style=%(html_style)s' % vars() + with_toc,
split=True)
system('doconce replace "<li>" "<p><li>" %(dofile)s.html' % vars())
# One long solarized file
html(
dofile,
options=common_options + ' --html_style=solarized --html_output=%(dofile)s-solarized --pygments_html_style=perldoc --pygments_html_linenos' % vars() + with_toc,
split=False)
system('doconce replace "<li>" "<p><li>" %(dofile)s-solarized.html' % vars())
reveal_slides(
dofile,
options=common_options + '',
postfix='reveal',
theme='darkgray')
deck_slides(
dofile,
options=common_options + '',
postfix='deck',
theme='sandstone.default')
""")
elif format.endswith('latex'):
make.write("""
beamer_slides(
dofile,
options=common_options + ' --latex_code_style=pyg',
postfix='beamer',
theme='red_shadow')
# Ordinary latex document (for printing)
latex(
dofile,
latex_program='pdflatex',
options=common_options + ' --device=paper' + with_toc,
)
""")
make.write("""
# Dump all Unix commands run above as a Bash script
bash = open('tmp_make.sh', 'w')
print 'see tmp_make.sh for an equivalent auto-generated unix script'
bash.write('''\
#!/bin/bash
set -x # display all commands in output
# Safe execution of a Unix command: exit if failure
function system {
"$@"
if [ $? -ne 0 ]; then
echo "make.sh: unsuccessful command $@"
echo "abort!"
exit 1
fi
}
''')
for cmd in unix_command_recorder:
if cmd.startswith('doconce format') or cmd.startswith('rm '):
bash.write('\\n') # delimiter line in script
bash.write('system ' + cmd + '\\n')
bash.close()
print 'see tmp_output.log for the output of all the commands'
""")
make.write("""
if __name__ == '__main__':
main()
""")
make.close()
print 'generated make.py for compiling %s.do.txt' % dofile
print 'make.py is basically a template: edit to set the desired options'
print '\n*** warning: the generated make.py script is experimental\n and tested to a very little extent! (latex, html, sphinx are tested)'
def _usage_fix_bibtex4publish():
print 'Usage: doconce fix_bibtex4publish fil1e.bib file2.bib ...'
print """
Fix a bibtex file so that the values are enclosed by braces (only)
and publish can import the data.
"""
def fix_bibtex4publish():
"""Edit BibTeX files so that publish can import them."""
if len(sys.argv) < 1:
_usage_fix_bibtex4publish()
sys.exit(0)
bibfiles = sys.argv[1:]
for bibfile in bibfiles:
if not bibfile.endswith('.bib'):
print bibfile, 'is not a BibTeX file'
_abort()
shutil.copy(bibfile, bibfile + '.old~~')
f = open(bibfile, 'r')
lines = f.readlines()
f.close()
print '\n*** working with', bibfile, '\n'
for line in lines:
print line
keys = []
for i in range(len(lines)):
# Classification line? Fix to lower case publication type
if lines[i].lstrip().startswith('@'):
m = re.search(r'^\s*@(.+?)\{(.+), *$', lines[i])
if m:
pub_type = m.group(1)
key = m.group(2)
print '\n--- found %s (key %s)\n' % (pub_type, key)
pub_type = pub_type.lower()
if pub_type == 'incollection':
pub_type = 'inproceedings'
keys.append(key)
lines[i] = '@%s{%s,\n' % (pub_type, key)
# Data line? Enclose value in {}, lower case variable, etc.
elif re.search(r'^\s*[A-Za-z ]+=', lines[i]):
words = lines[i].split('=')
old_variable = words[0]
variable = old_variable.lower().strip()
if len(words) > 2:
# A = in the value..
print words
value = '='.join(words[1:]).strip()
else:
value = words[1].strip()
if value[-1] == ',':
value = value[:-1]
old_value = value
fixed = False
if value.startswith('"'):
value = '{' + value[1:-1].lstrip()
fixed = True
if value.endswith('"'):
value = value[:-1].rstrip() + '}'
fixed = True
if value[0] != '{':
value = '{' + value.lstrip()
fixed = True
if value[-1] != '}':
value = value.rstrip() + '}'
fixed = True
lines[i] = '%-15s = %s,\n' % (variable, value)
if fixed:
print '%s = %s' % (old_variable, old_value)
print '...fixed to...'
print '%-15s = %s\n' % (variable, value)
elif lines[i].strip() == '':
pass # ok
elif lines[i].strip() == '}':
pass # ok
elif lines[i].lstrip().startswith('%'):
pass # ok
else:
# Loose sentence, this one should be glued with the
# former one
# NOT IMPLEMENTED
print '*** error: broken line'
print lines[i]
print 'Glue with previous line!'
_abort()
f = open(bibfile, 'w')
f.writelines(lines)
f.close()
def _usage_list_fig_src_files():
print 'Usage: doconce list_fig_src_files *.do.txt'
def list_fig_src_files():
"""
List all figure, movie, and source code files needed in a
set of .do.txt files. Useful when splitting a document into
new chapters and directories.
"""
if len(sys.argv) < 2:
_usage_list_fig_src_files()
sys.exit(0)
from common import INLINE_TAGS
code_pattern = '^@@@CODE +([^ ]+)'
figs = []
movs = []
cods = []
for filename in sys.argv[1:]:
f = open(filename, 'r'); text = f.read(); f.close()
figs += [figfile for figfile, options, caption in
re.findall(INLINE_TAGS['figure'], text, flags=re.MULTILINE)]
movs += [movfile for movfile, options, caption in
re.findall(INLINE_TAGS['movie'], text, flags=re.MULTILINE)]
cods += re.findall(code_pattern, text, flags=re.MULTILINE)
if figs:
print '\n'.join(figs)
if movs:
print '\n'.join(movs)
if cods:
print '\n'.join(cods)
def _usage_csv2table():
print 'Usage: doconce csv2table somefile.csv [--headings=clr --columns=rrl --delimiter=;] > outfile'
def csv2table():
"""Convert a csv file to a DocOnce table."""
if len(sys.argv) < 2:
_usage_csv2table()
sys.exit(0)
delimiter = ','
for arg in sys.argv[1:]:
if arg.startswith('--delimiter='):
delimiter = arg.split('=')[1]
import csv
filename = sys.argv[1]
csvfile = open(filename, 'r')
table = []
for row in csv.reader(csvfile, delimiter=delimiter):
if row:
table.append(row)
csvfile.close()
# Now, table is list of lists
for i in range(len(table)):
for j in range(len(table[i])):
table[i][j] = table[i][j].strip()
#import pprint;pprint.pprint(table)
num_columns = 0
max_column_width = 0
for row in table:
num_columns = max(num_columns, len(row))
for column in row:
max_column_width = max(max_column_width, len(column))
# Add empty cells
for i in range(len(table)):
table[i] = table[i] + ['']*(num_columns-len(table[i]))
align_headings = align_columns = 'c'*num_columns
for arg in sys.argv[1:]:
if arg.startswith('--headings='):
align_headings = list(arg.split('=')[1])
if len(align_headings) != num_columns:
print '*** error: %s has wrong no of columns (should be %d)' % \
(arg, num_columns)
if arg.startswith('--columns='):
align_columns = list(arg.split('=')[1])
if len(align_columns) != num_columns:
print '*** error: %s has wrong no of columns (should be %d)' % \
(arg, num_columns)
# Construct doconce table
width = (max_column_width+2)*num_columns + num_columns+1
separator0 = '|' + '-'*(width-2) + '|'
separator1 = separator0
separator2 = separator0
s = list(separator1)
for j in range(num_columns):
s[max_column_width/2 + 1 + j*(max_column_width+3)] = align_headings[j]
separator1 = ''.join(s)
s = list(separator2)
for j in range(num_columns):
s[max_column_width/2 + 1 + j*(max_column_width+3)] = align_columns[j]
separator2 = ''.join(s)
column_format = ' %%-%ds ' % max_column_width
for j in range(len(table)):
table[j] = [column_format % c for c in table[j]]
table[j] = '|' + '|'.join(table[j]) + '|'
text = '\n\n' + separator1 + '\n' + table[0] + '\n' + \
separator2 + '\n' + '\n'.join(table[1:]) + \
'\n' + separator0 + '\n\n'
print text
# ------------ diff two files ----------------
_diff_programs = {
'latexdiff': ('http://www.ctan.org/pkg/latexdiff', 'latexdiff'),
'pdiff': ('http://www.gnu.org/software/a2ps/ http://www.gnu.org/software/wdiff/', 'a2ps wdiff texlive-latex-extra texlive-latex-recommended'),
'kdiff3': ('http://www.gnu.org/software/wdiff/', 'kdiff3'),
'diffuse': ('http://diffuse.sourceforge.net/', 'diffuse'),
'xxdiff': ('http://xxdiff.sourceforge.net/local/', 'fldiff'),
'fldiff': ('http://packages.debian.org/sid/fldiff', 'fldiff'),
'meld': ('http://meldmerge.org/', 'meld'),
'tkdiff.tcl': ('https://sourceforge.net/projects/tkdiff/', 'not in Debian')
}
def _missing_diff_program(program_name):
print program_name, 'is not installed.'
print 'see', _diff_programs[program_name][0]
if not _diff_programs[program_name][1].startswith('not in'):
print 'Ubuntu/Debian Linux: sudo apt-get install', \
_diff_programs[program_name][1]
sys.exit(1)
def _usage_diff():
print 'Usage: doconce diff oldfile newfile [diffprog]'
print 'diffprogram may be difflib (default),'
print 'pdiff, diff, diffuse, kdiff3, xxdiff, meld, latexdiff'
print 'Output in diff.*'
def diff():
"""Find differences between two files."""
if len(sys.argv) < 3:
_usage_diff()
sys.exit(0)
system('rm -f _diff.*')
file1 = sys.argv[1]
file2 = sys.argv[2]
try:
diffprog = sys.argv[3]
except:
diffprog = 'difflib'
if diffprog == 'difflib':
diffing_files = pydiff(file1, file2)
if diffing_files:
print 'differences found, see ', \
','.join([name + '.html|.txt' for name in diffing_files])
elif diffprog == 'latexdiff':
if which('latexdiff'):
latexdiff(file1, file2)
else:
_missing_diff_program('latexdiff')
else:
diff_files(file1, file2, diffprog)
def pydiff(files1, files2, n=3, prefix_diff_files='tmp_diff_'):
"""
Use Python's difflib to compute the difference between
files1 and files2 (can be corresponding lists of files
or just two strings if only one set of files is to be
compared).
Produce text and html diff.
"""
import difflib, time, os
if isinstance(files1, str):
files1 = [files1]
if isinstance(files2, str):
files2 = [files2]
sizes = [] # measure diffs in bytes
diff_files = [] # filestem of non-empty diff files generated
for fromfile, tofile in zip(files1, files2):
if not os.path.isfile(fromfile):
print fromfile, 'does not exist'
_abort()
if not os.path.isfile(tofile):
print tofile, 'does not exist'
_abort()
fromdate = time.ctime(os.stat(fromfile).st_mtime)
todate = time.ctime(os.stat(tofile).st_mtime)
fromlines = open(fromfile, 'U').readlines()
tolines = open(tofile, 'U').readlines()
diff_html = difflib.HtmlDiff().make_file(
fromlines, tolines, fromfile, tofile, context=True, numlines=n)
diff_plain = difflib.unified_diff(
fromlines, tolines, fromfile, tofile, fromdate, todate, n=n)
filename_plain = prefix_diff_files + tofile + '.txt'
filename_html = prefix_diff_files + tofile + '.html'
f = open(filename_plain, 'w')
# Need to add newlines despite doc saying that trailing newlines are
# inserted...
diff_plain = [line + '\n' for line in diff_plain]
f.writelines(diff_plain)
f.close()
f = open(filename_html, 'w')
f.writelines(diff_html)
f.close()
size = os.path.getsize(filename_plain)
# Any diff? (Could also just test if the file strings are different)
if size > 4:
sizes.append(size)
diff_files.append(prefix_diff_files + tofile)
else:
os.remove(filename_plain)
os.remove(filename_html)
return diff_files # empty if no differences
def check_diff(diff_file):
size = os.path.getsize(diff_file)
if size > 4:
print 'diff in', diff_file
else:
os.remove(diff_file)
def latexdiff(files1, files2):
"""Highlight file differences with latexdiff."""
if not which('latexdiff'):
_missing_diff_program('latexdiff')
if isinstance(files1, str):
files1 = [files1]
if isinstance(files2, str):
files2 = [files2]
for fromfile, tofile in zip(files1, files2):
# Must convert to latex if doconce files
if fromfile.endswith('.do.txt'):
basename = fromfile[:-7]
failure1 = os.system('doconce format pdflatex %s' % basename)
failure2 = os.system('doconce ptex2tex %s' % basename)
fromfile = basename + '.tex'
if tofile.endswith('.do.txt'):
basename = tofile[:-7]
failure1 = os.system('doconce format pdflatex %s' % basename)
failure2 = os.system('doconce ptex2tex %s' % basename)
tofile = basename + '.tex'
diff_file = 'tmp_diff_%s.tex' % os.path.basename(tofile)
failure = os.system('latexdiff %s %s > %s' %
(fromfile, tofile, diff_file))
failure = os.system('pdflatex %s' % diff_file)
size = os.path.getsize(diff_file)
if size > 4:
print 'output in', diff_file[:-3] + 'pdf'
def diff_files(files1, files2, program='diff'):
"""
Run some diff program:
diffprog file1 file2 > tmp_diff_*.txt/.pdf/.html
for file1, file2 in zip(files1, files2).
"""
if isinstance(files1, str):
files1 = [files1]
if isinstance(files2, str):
files2 = [files2]
for fromfile, tofile in zip(files1, files2):
cmd = '%s %s %s' % (program, fromfile, tofile)
if program in ['diffuse', 'kdiff3', 'xxdiff', 'fldiff', 'meld', 'tkdiff.tcl']:
# GUI program
if which(program):
system(cmd, verbose=True)
else:
_missing_diff_program(program)
elif program == 'diff':
diff_file = 'tmp_diff_%s.txt' % os.path.basename(tofile)
system(cmd + ' > ' + diff_file, verbose=True)
check_diff(diff_file)
elif program == 'pdiff':
diff_file = 'tmp_diff_%s' % os.path.basename(tofile)
if which('pdiff'):
system(cmd + ' -- -1 -o %s.ps' % diff_file)
system('ps2pdf -sPAPERSIZE=a4 %s.ps; rm -f %s.ps' %
(diff_file, diff_file))
else:
_missing_diff_program(program)
print 'diff in %s.pdf' % diff_file
else:
print program, 'not supported'
_abort()
def _usage_gitdiff():
#print 'Usage: doconce gitdiff diffprog file1 file2 file3'
print 'Usage: doconce gitdiff file1 file2 file3'
def gitdiff():
"""Make diff of newest and previous version of files (under Git)."""
if len(sys.argv) < 2:
_usage_gitdiff()
sys.exit(0)
#diffprog = sys.argv[1]
filenames = sys.argv[1:]
old_files = []
for filename in filenames:
failure, output = commands.getstatusoutput('git log %s' % filename)
if not failure:
commits = re.findall(r'^commit\s+(.+)$', output,
flags=re.MULTILINE)
dates = re.findall(r'^Date:\s+(.+)\d\d:\d\d:\d\d .+$', output,
flags=re.MULTILINE)
system('git checkout %s %s' % (commits[1], filename))
old_filename = '__' + dates[1].replace(' ', '_') + filename
shutil.copy(filename, old_filename)
system('git checkout %s %s' % (commits[0], filename))
old_files.append(old_filename)
print 'doconce diff', old_filename, filename
#pydiff(filenames, old_files)
def _usage_extract_exercises():
#print 'Usage: doconce gitdiff diffprog file1 file2 file3'
print 'Usage: doconce extract_exercises tmp_mako__mydoc.do.txt'
print "\nMust use tmp_mako__*.do.txt to have includes in place."
print "Note: extracting exercises may create a need for"
print "generalized references to the original document (ref[][][])."
def extract_exercises():
if len(sys.argv) < 2:
_usage_extract_exercises()
sys.exit(0)
filename = sys.argv[1]
if filename.endswith('.do.txt'):
basename = filename[:-7]
else:
basename = filename
filename += '.do.txt'
f = open(filename, 'r')
lines = f.readlines()
f.close()
keywords = []
try:
if sys.argv[2].startswith('--filter='):
dummy, keywords = sys.argv[2].split('=')
keywords = re.split(r';\s*', keywords)
except IndexError:
pass
exer_heading_pattern = r'^ *(=====) *\{?(Exercise|Problem|Project)\}?: *(?P<title>[^ =-].+?)\s*====='
keywords_pattern = r'^#?\s*(keywords|kw) *= *([A-Za-z0-9\-._;, ]+)'
exer = []
exer_tp = []
inside_exer = False
for i, line in enumerate(lines):
#print i, inside_exer, line
if line.startswith('TITLE:'):
line = line.replace('TITLE: ', 'TITLE: Exercises from ')
exer.append(line)
elif line.startswith('AUTHOR:'):
exer.append(line)
elif line.startswith('DATE:'):
exer.append(line)
elif line.startswith('========= '):
exer.append(line)
if re.search(exer_heading_pattern, line):
#print 'found exercise!'
inside_exer = True
exer.append([])
exer_tp.append(None)
if inside_exer:
# Filter afterwards
if not isinstance(exer[-1], list):
print 'inside exercise, but exer[-1] is not a list', exer[-1]
exer[-1].append(line)
m = re.search(keywords_pattern, line)
if m:
exer_tp[-1] = [name.strip() for name in m.group(2).split(';')]
if inside_exer and i < len(lines)-1 and lines[i+1].startswith('====='):
inside_exer = False
# Strip off blank lines at the end of each exercise
for line in exer:
if isinstance(line, list):
for i in range(len(line)-1, -1, -1):
if line[i] == '\n':
line[i] = ''
else:
break
filename = basename[10:] + '_exer.do.txt'
f = open(filename, 'w')
i = 0
for line in exer:
# Is line an ordinary line (chapter heading) or an exercise section?
if isinstance(line, list):
# exercise section: line is list of lines
print_this_exer = not keywords # default: print if no filtering
if keywords and exer_tp[i] is not None:
print_this_exer = False
# Any of this exercise's keywords among those in the filter:
for keyword in exer_tp[i]:
if keyword in keywords:
print_this_exer = True
if print_this_exer:
f.write('\n\n# --- begin exercise ---\n\n')
for exer_line in line:
f.write(exer_line)
f.write('\n# --- end exercise ---\n\n')
i += 1
elif isinstance(line, str):
f.write(line)
f.close()
print 'exercises extracted to', filename
# Check if we have references to the original document
f = open(filename, 'r')
filestr = f.read()
f.close()
labels = re.findall(r'label\{(.+?)\}', filestr)
refs = re.findall(r'ref\{(.+?)\}', filestr)
for ref in refs:
if not ref in labels:
print '\n*** warning: reference ref{%s} - no label in document' % ref
print ' need generalized reference ref[][][] in the original document'
|
sjsrey/doconce
|
lib/doconce/misc.py
|
Python
|
bsd-3-clause
| 366,370
|
[
"Gaussian"
] |
1ad7247f4f485809b11202cbf95765ffd895c5133bfab3c33f40c472280d247a
|
# Principal Component Analysis Code :
from numpy import mean,cov,double,cumsum,dot,linalg,array,rank,size,flipud
from pylab import *
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
from mvpa.clfs.knn import kNN
from mvpa.datasets import Dataset
from mvpa.clfs.transerror import TransferError
from mvpa.misc.data_generators import normalFeatureDataset
from mvpa.algorithms.cvtranserror import CrossValidatedTransferError
from mvpa.datasets.splitters import NFoldSplitter
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/BMED_8813_HAP/Data')
from data import Fmat_original
def pca(X):
#get dimensions
num_data,dim = X.shape
#center data
mean_X = X.mean(axis=1)
M = (X-mean_X) # subtract the mean (along columns)
Mcov = cov(M)
###### Sanity Check ######
i=0
n=0
while i < 123:
j=0
while j < 90:
if X[i,j] != X[i,j]:
print X[i,j]
print i,j
n=n+1
j = j+1
i=i+1
print n
##########################
print 'PCA - COV-Method used'
val,vec = linalg.eig(Mcov)
#return the projection matrix, the variance and the mean
return vec,val,mean_X, M, Mcov
def my_mvpa(Y,num2):
#Using PYMVPA
PCA_data = np.array(Y)
PCA_label_1 = ['Edge-1']*30 + ['Surface']*30 + ['Edge-2']*30
PCA_chunk_1 = ['Can-Edge-1']*5 + ['Book-Edge-1']*5 + ['Brown-Cardboard-Box-Edge-1']*5 + ['Cinder-Block-Edge-1']*5 + ['Tin-Box-Edge-1']*5 + ['White-Cardboard-Box-Edge-1']*5 + ['Can-Surface']*5 + ['Book-Surface']*5 + ['Brown-Cardboard-Box-Surface']*5 + ['Cinder-Block-Surface']*5 + ['Tin-Box-Surface']*5 + ['White-Cardboard-Box-Surface']*5 + ['Can-Edge-2']*5 + ['Book-Edge-2']*5 + ['Brown-Cardboard-Box-Edge-2']*5 + ['Cinder-Block-Edge-2']*5 + ['Tin-Box-Edge-2']*5 + ['White-Cardboard-Box-Edge-2']*5
clf = kNN(k=num2)
terr = TransferError(clf)
ds1 = Dataset(samples=PCA_data,labels=PCA_label_1,chunks=PCA_chunk_1)
cvterr = CrossValidatedTransferError(terr,NFoldSplitter(cvtype=1),enable_states=['confusion'])
error = cvterr(ds1)
return (1-error)*100
def result(eigvec_total,eigval_total,mean_data_total,B,C,num_PC):
# Reduced Eigen-Vector Matrix according to highest Eigenvalues..(Considering First 20 based on above figure)
W = eigvec_total[:,0:num_PC]
m_W, n_W = np.shape(W)
#Projected Data:
Y = (W.T)*B
m_Y, n_Y = np.shape(Y.T)
return Y.T
if __name__ == '__main__':
Fmat = Fmat_original
# Checking the Data-Matrix
m_tot, n_tot = np.shape(Fmat)
print 'Total_Matrix_Shape:',m_tot,n_tot
eigvec_total, eigval_total, mean_data_total, B, C = pca(Fmat)
#print eigvec_total
#print eigval_total
#print mean_data_total
m_eigval_total, n_eigval_total = np.shape(np.matrix(eigval_total))
m_eigvec_total, n_eigvec_total = np.shape(eigvec_total)
m_mean_data_total, n_mean_data_total = np.shape(np.matrix(mean_data_total))
print 'Eigenvalue Shape:',m_eigval_total, n_eigval_total
print 'Eigenvector Shape:',m_eigvec_total, n_eigvec_total
print 'Mean-Data Shape:',m_mean_data_total, n_mean_data_total
#Recall that the cumulative sum of the eigenvalues shows the level of variance accounted by each of the corresponding eigenvectors. On the x axis there is the number of eigenvalues used.
perc_total = cumsum(eigval_total)/sum(eigval_total)
num_PC=1
while num_PC <=20:
Proj = np.zeros((90,num_PC))
Proj = result(eigvec_total,eigval_total,mean_data_total,B,C,num_PC)
# PYMVPA:
num=0
cv_acc = np.zeros(21)
while num <=20:
cv_acc[num] = my_mvpa(Proj,num)
num = num+1
plot(np.arange(21),cv_acc,'-s')
grid('True')
hold('True')
num_PC = num_PC+1
legend(('1-PC', '2-PCs', '3-PCs', '4-PCs', '5-PCs', '6-PCs', '7-PCs', '8-PCs', '9-PCs', '10-PCs', '11-PC', '12-PCs', '13-PCs', '14-PCs', '15-PCs', '16-PCs', '17-PCs', '18-PCs', '19-PCs', '20-PCs'))
ylabel('Cross-Validation Accuracy')
xlabel('k in k-NN Classifier')
show()
|
tapomayukh/projects_in_python
|
sandbox_tapo/src/skin_related/BMED_8813_HAP/Scaling/best_kNN_PC/cross_validate_categories_kNN_PC_BMED_8813_HAP_scaled_method_II.py
|
Python
|
mit
| 4,435
|
[
"Mayavi"
] |
001c2d9d9b72286d1e22a941fce72b434549f24c0e17511f197da280af43501a
|
#
# Provides a convenience Ontology class that implements a high-level interface
# for interacting with an OWL ontology.
#
# Python imports.
from labelmap import LabelMap
from obohelper import isOboID, oboIDToIRI
from ontology_entities import _OntologyClass, _OntologyDataProperty
from ontology_entities import _OntologyObjectProperty, _OntologyAnnotationProperty
from rfc3987 import rfc3987
# Java imports.
from java.io import File, FileOutputStream
from org.semanticweb.owlapi.apibinding import OWLManager
from org.semanticweb.owlapi.model import IRI, AddAxiom, OWLOntologyID
from org.semanticweb.owlapi.model import SetOntologyID, AxiomType, OWLOntology
from org.semanticweb.owlapi.model import AddOntologyAnnotation
from org.semanticweb.owlapi.model import OWLRuntimeException
from org.semanticweb.owlapi.formats import RDFXMLDocumentFormat
from org.semanticweb.elk.owlapi import ElkReasonerFactory
from org.semanticweb import HermiT
from uk.ac.manchester.cs.owlapi.modularity import SyntacticLocalityModuleExtractor
from uk.ac.manchester.cs.owlapi.modularity import ModuleType
from com.google.common.base import Optional
class Ontology:
"""
Provides a high-level interface to the OWL API's ontology object system.
Conceptually, instances of this class represent a single OWL ontology.
"""
# The IRI for the "dc:source" annotation property.
SOURCE_PROP_IRI = IRI.create('http://purl.org/dc/elements/1.1/source')
def __init__(self, ontology_source):
"""
Initialize this Ontology instance. The argument "ontology_source"
should either be a path to an OWL ontology file on the local file
system or an instance of an OWL API OWLOntology object.
"""
if isinstance(ontology_source, basestring):
# Load the ontology from the source file.
self.ontman = OWLManager.createOWLOntologyManager()
ontfile = File(ontology_source)
self.ontology = self.ontman.loadOntologyFromOntologyDocument(ontfile)
elif isinstance(ontology_source, OWLOntology):
self.ontology = ontology_source
self.ontman = self.ontology.getOWLOntologyManager()
else:
raise RuntimeError('Unrecognized type for initializing an Ontology object: '
+ str(ontology_source))
self.labelmap = LabelMap(self.ontology)
# Create an OWL data factory, which is required for creating new OWL
# entities and looking up existing entities.
self.df = OWLManager.getOWLDataFactory()
def getOWLOntology(self):
"""
Returns the OWL API ontology object contained by this Ontology object.
"""
return self.ontology
def labelToIRI(self, labeltxt):
"""
Given a class label, returns the associated class IRI.
"""
try:
cIRI = self.labelmap.lookupIRI(labeltxt)
except KeyError:
raise RuntimeError('The class label, "' + labeltxt
+ '", could not be matched to a term IRI.')
return cIRI
def expandIRI(self, iri):
"""
Expands an IRI string into a full IRI and returns a corresponding OWL
API IRI object. Also accepts OWL API IRI objects, in which case they
are returned unaltered. IRI strings can be either full IRIs, prefix
IRIs (i.e. curies, such as "owl:Thing"), or relative IRIs (e.g.,
"term_name"). If the IRI string is a prefix IRI or relative IRI, it
will be expanded using the prefixes or base defined in the ontology.
If the string is not a prefix IRI or relative IRI, then it is assumed
to be a full IRI.
iri: The IRI to expand. Can be either a string or an OWL API IRI
object. In the latter case, iri is returned as is.
"""
prefix_df = self.ontman.getOntologyFormat(self.ontology).asPrefixOWLOntologyFormat()
if isinstance(iri, basestring):
# Verify that we have a valid IRI string.
if rfc3987.match(iri, rule='IRI_reference') == None:
raise RuntimeError('Invalid IRI string: "' + iri + '".')
try:
# If iri is not a prefix IRI, the OWL API will throw an
# OWLRuntimeException.
fullIRI = prefix_df.getIRI(iri)
except OWLRuntimeException:
fullIRI = IRI.create(iri)
elif isinstance(iri, IRI):
fullIRI = iri
else:
raise RuntimeError('Unsupported type for conversion to IRI.')
return fullIRI
def expandIdentifier(self, id_obj):
"""
Converts an object representing an identifier into a fully expanded
IRI. The argument id_obj can be either an OWL API IRI object or a
string containing: a prefix IRI (i.e., a curie, such as "owl:Thing"), a
relative IRI, a full IRI, or an OBO ID (e.g., a string of the form
"PO:0000003"). Returns an OWL API IRI object.
"""
if isinstance(id_obj, basestring):
if isOboID(id_obj):
IRIobj = oboIDToIRI(id_obj)
else:
IRIobj = self.expandIRI(id_obj)
elif isinstance(id_obj, IRI):
IRIobj = id_obj
else:
raise RuntimeError('Unsupported type for conversion to IRI.')
return IRIobj
def getExistingClass(self, class_id):
"""
Searches for an existing class in the ontology. If the class is
declared either directly in the ontology or is declared in its
transitive imports closure, an OWL API object representing the class is
returned. Otherwise, None is returned.
class_id: The identifier of the class to search for. Can be either an
OWL API IRI object or a string containing: a prefix IRI (i.e., a
curie, such as "owl:Thing"), a relative IRI, a full IRI, or an OBO
ID (e.g., a string of the form "PO:0000003").
"""
classIRI = self.expandIdentifier(class_id)
classobj = self.df.getOWLClass(classIRI)
ontset = self.ontology.getImportsClosure()
for ont in ontset:
if ont.getDeclarationAxioms(classobj).size() > 0:
return classobj
return None
def getExistingDataProperty(self, prop_id):
"""
Searches for an existing data property in the ontology. If the
property is declared either directly in the ontology or is declared in
its transitive imports closure, an OWL API object representing the
property is returned. Otherwise, None is returned.
prop_id: The identifier of the property to search for. Can be either
an OWL API IRI object or a string containing: a prefix IRI (i.e., a
curie, such as "owl:Thing"), a full IRI, a relative IRI, or an OBO
ID (e.g., a string of the form "PO:0000003").
"""
propIRI = self.expandIdentifier(prop_id)
propobj = self.df.getOWLDataProperty(propIRI)
ontset = self.ontology.getImportsClosure()
for ont in ontset:
if ont.getDeclarationAxioms(propobj).size() > 0:
return propobj
return None
def getExistingObjectProperty(self, prop_id):
"""
Searches for an existing object property in the ontology. If the
property is declared either directly in the ontology or is declared in
its transitive imports closure, an OWL API object representing the
property is returned. Otherwise, None is returned.
prop_id: The identifier of the property to search for. Can be either
an OWL API IRI object or a string containing: a prefix IRI (i.e., a
curie, such as "owl:Thing"), a full IRI, a relative IRI, or an OBO
ID (e.g., a string of the form "PO:0000003").
"""
propIRI = self.expandIdentifier(prop_id)
propobj = self.df.getOWLObjectProperty(propIRI)
ontset = self.ontology.getImportsClosure()
for ont in ontset:
if ont.getDeclarationAxioms(propobj).size() > 0:
return propobj
return None
def getExistingAnnotationProperty(self, prop_id):
"""
Searches for an existing annotation property in the ontology. If the
property is declared either directly in the ontology or is declared in
its transitive imports closure, an OWL API object representing the
property is returned. Otherwise, None is returned.
prop_id: The identifier of the property to search for. Can be either
an OWL API IRI object or a string containing: a prefix IRI (i.e., a
curie, such as "owl:Thing"), a full IRI, a relative IRI, or an OBO
ID (e.g., a string of the form "PO:0000003").
"""
propIRI = self.expandIdentifier(prop_id)
propobj = self.df.getOWLAnnotationProperty(propIRI)
ontset = self.ontology.getImportsClosure()
for ont in ontset:
if ont.getDeclarationAxioms(propobj).size() > 0:
return propobj
return None
def getExistingProperty(self, prop_id):
"""
Searches for an existing property in the ontology. If the property is
declared either directly in the ontology or is declared in its
transitive imports closure, an OWL API object representing the property
is returned. Otherwise, None is returned. Object properties, data
properties, and annotation properties are all considered; ontology
properties are not.
prop_id: The identifier of the property to search for. Can be either
an OWL API IRI object or a string containing: a prefix IRI (i.e., a
curie, such as "owl:Thing"), a full IRI, a relative IRI, or an OBO
ID (e.g., a string of the form "PO:0000003").
"""
propIRI = self.expandIdentifier(prop_id)
prop = self.getExistingObjectProperty(propIRI)
if prop == None:
prop = self.getExistingAnnotationProperty(propIRI)
if prop == None:
prop = self.getExistingDataProperty(propIRI)
# If no matching data property was found, prop == None.
return prop
def getExistingEntity(self, ent_id):
"""
Searches for an entity in the ontology using an identifier. The entity
is assumed to be either a class, object property, data property, or
annotation property. Both the main ontology and its imports closure
are searched for the target entity.
ent_id: The identifier of the entity. Can be either an OWL API IRI
object or a string containing: a prefix IRI (i.e., a curie, such as
"owl:Thing"), a full IRI, a relative IRI, or an OBO ID (e.g., a
string of the form "PO:0000003").
"""
eIRI = self.expandIdentifier(ent_id)
entity = self.getExistingClass(eIRI)
if entity == None:
entity = self.getExistingProperty(eIRI)
# If no matching data property was found, entity == None.
return entity
def getExistingIndividual(self, indv_id):
"""
Searches for an existing individual in the ontology. If the individual
is declared either directly in the ontology or is declared in its
transitive imports closure, an OWL API object representing the
individual is returned. Otherwise, None is returned.
indv_id: The identifier of the individual to search for. Can be either
an OWL API IRI object or a string containing: a prefix IRI (i.e., a
curie, such as "owl:Thing"), a full IRI, a relative IRI, or an OBO
ID (e.g., a string of the form "PO:0000003").
"""
indvIRI = self.expandIdentifier(indv_id)
indvobj = self.df.getOWLNamedIndividual(indvIRI)
ontset = self.ontology.getImportsClosure()
for ont in ontset:
if ont.getDeclarationAxioms(indvobj).size() > 0:
return indvobj
return None
def createNewClass(self, class_id):
"""
Creates a new OWL class, adds it to the ontology, and returns an
associated _OntologyClass object.
class_id: The identifier for the new class. Can be either an OWL API
IRI object or a string containing: a prefix IRI (i.e., a curie,
such as "owl:Thing"), a full IRI, a relative IRI, or an OBO ID
(e.g., a string of the form "PO:0000003").
"""
classIRI = self.expandIdentifier(class_id)
# Get the class object.
owlclass = self.df.getOWLClass(classIRI)
declaxiom = self.df.getOWLDeclarationAxiom(owlclass)
self.ontman.applyChange(AddAxiom(self.ontology, declaxiom))
return _OntologyClass(classIRI, owlclass, self)
def createNewDataProperty(self, prop_id):
"""
Creates a new OWL data property, adds it to the ontology, and returns
an associated _OntologyDataProperty object.
prop_iri: The identifier for the new property. Can be either an OWL
API IRI object or a string containing: a prefix IRI (i.e., a curie,
such as "owl:Thing"), a full IRI, or an OBO ID (e.g., a string of
the form "PO:0000003").
"""
propIRI = self.expandIdentifier(prop_id)
owldprop = self.df.getOWLDataProperty(propIRI)
declaxiom = self.df.getOWLDeclarationAxiom(owldprop)
self.ontman.applyChange(AddAxiom(self.ontology, declaxiom))
return _OntologyDataProperty(propIRI, owldprop, self)
def createNewObjectProperty(self, prop_id):
"""
Creates a new OWL object property, adds it to the ontology, and returns
an associated _OntologyObjectProperty object.
prop_iri: The identifier for the new property. Can be either an OWL
API IRI object or a string containing: a prefix IRI (i.e., a curie,
such as "owl:Thing"), a full IRI, or an OBO ID (e.g., a string of
the form "PO:0000003").
"""
propIRI = self.expandIdentifier(prop_id)
owloprop = self.df.getOWLObjectProperty(propIRI)
declaxiom = self.df.getOWLDeclarationAxiom(owloprop)
self.ontman.applyChange(AddAxiom(self.ontology, declaxiom))
return _OntologyObjectProperty(propIRI, owloprop, self)
def createNewAnnotationProperty(self, prop_id):
"""
Creates a new OWL annotation property, adds it to the ontology, and
returns an associated _OntologyAnnotationProperty object.
prop_iri: The identifier for the new property. Can be either an OWL
API IRI object or a string containing: a prefix IRI (i.e., a curie,
such as "owl:Thing"), a full IRI, or an OBO ID (e.g., a string of
the form "PO:0000003").
"""
propIRI = self.expandIdentifier(prop_id)
owloprop = self.df.getOWLAnnotationProperty(propIRI)
declaxiom = self.df.getOWLDeclarationAxiom(owloprop)
self.ontman.applyChange(AddAxiom(self.ontology, declaxiom))
return _OntologyAnnotationProperty(propIRI, owloprop, self)
def addTermAxiom(self, owl_axiom):
"""
Adds a new term axiom to this ontology. In this context, "term axiom"
means an axiom with an OWL class or property as its subject. The
argument "owl_axiom" should be an instance of an OWL API axiom object.
"""
# If this is a label annotation, update the label lookup dictionary.
if owl_axiom.isOfType(AxiomType.ANNOTATION_ASSERTION):
if owl_axiom.getProperty().isLabel():
labeltxt = owl_axiom.getValue().getLiteral()
# If we are adding a label, we should be guaranteed that the
# subject of the annotation is an IRI (i.e, not anonymous).
subjIRI = owl_axiom.getSubject()
if not(isinstance(subjIRI, IRI)):
raise RuntimeError('Attempted to add the label "'
+ labeltxt + '" as an annotation of an anonymous class.')
self.labelmap.add(labeltxt, subjIRI)
self.ontman.applyChange(AddAxiom(self.ontology, owl_axiom))
def removeEntity(self, entity, remove_annotations=True):
"""
Removes an entity from the ontology (including its imports closure).
Optionally, any annotations referencing the deleted entity can also be
removed (this is the default behavior).
entity: An OWL API entity object.
remove_annotations: If True, annotations referencing the entity will
also be removed.
"""
ontset = self.ontology.getImportsClosure()
for ont in ontset:
for axiom in ont.getAxioms():
# See if this axiom is an annotation axiom.
if axiom.getAxiomType() == AxiomType.ANNOTATION_ASSERTION:
if remove_annotations:
# Check if this annotation axiom refers to the target
# entity.
asubject = axiom.getSubject()
if isinstance(asubject, IRI):
if asubject.equals(entity.getIRI()):
self.ontman.removeAxiom(ont, axiom)
# See if this axiom includes the target entity (e.g., a
# declaration axiom for the target entity).
elif axiom.getSignature().contains(entity):
self.ontman.removeAxiom(ont, axiom)
def setOntologyID(self, ont_iri):
"""
Sets the ID for the ontology (i.e., the value of the "rdf:about"
attribute).
ont_iri: The IRI (i.e., ID) of the ontology. Can be either an IRI
object or a string.
"""
ontIRI = self.expandIRI(ont_iri)
newoid = OWLOntologyID(Optional.fromNullable(ontIRI), Optional.absent())
self.ontman.applyChange(SetOntologyID(self.ontology, newoid))
def setOntologySource(self, source_iri):
"""
Sets the value of the "dc:source" annotation property for this ontology.
source_iri: The IRI of the source ontology. Can be either an IRI
object or a string.
"""
sourceIRI = self.expandIRI(source_iri)
sourceprop = self.df.getOWLAnnotationProperty(self.SOURCE_PROP_IRI)
s_annot = self.df.getOWLAnnotation(sourceprop, sourceIRI)
self.ontman.applyChange(
AddOntologyAnnotation(self.getOWLOntology(), s_annot)
)
def saveOntology(self, filepath):
"""
Saves the ontology to a file.
"""
oformat = RDFXMLDocumentFormat()
foutputstream = FileOutputStream(File(filepath))
self.ontman.saveOntology(self.ontology, oformat, foutputstream)
foutputstream.close()
def getELKReasoner(self):
"""
Returns an instance of an ELK reasoner for this ontology.
"""
rfact = ElkReasonerFactory()
return rfact.createReasoner(self.getOWLOntology())
def getHermitReasoner(self):
"""
Returns an instance of a HermiT reasoner for this ontology.
"""
rfact = HermiT.ReasonerFactory()
return rfact.createReasoner(self.getOWLOntology())
def extractModule(self, signature, mod_iri):
"""
Extracts a module that is a subset of the entities in this ontology.
The result is returned as an Ontology object.
signature: A Java Set of all entities to include in the module.
mod_iri: The IRI for the extracted ontology module. Can be either an
IRI object or a string.
"""
modIRI = self.expandIRI(mod_iri)
slme = SyntacticLocalityModuleExtractor(
self.ontman, self.getOWLOntology(), ModuleType.STAR
)
modont = Ontology(slme.extractAsOntology(signature, modIRI))
# Add an annotation for the source of the module.
sourceIRI = None
ontid = self.getOWLOntology().getOntologyID()
if ontid.getVersionIRI().isPresent():
sourceIRI = ontid.getVersionIRI().get()
elif ontid.getOntologyIRI().isPresent():
sourceIRI = ontid.getOntologyIRI().get()
if sourceIRI != None:
modont.setOntologySource(sourceIRI)
return modont
|
biocodellc/biocode-fims-configurator
|
bin/ontobuilder/ontology.py
|
Python
|
gpl-3.0
| 20,578
|
[
"Elk"
] |
d073345bb0fec6684f6a6519c2065239627a6b4a680dd16c57dc8e3cb9441436
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License
"""
This module implements reading and writing of ShengBTE CONTROL files.
"""
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from monty.dev import requires
from monty.json import MSONable
from pymatgen.core.structure import Structure
from pymatgen.io.vasp import Kpoints
try:
import f90nml
except ImportError:
f90nml = None
__author__ = "Rees Chang, Alex Ganose"
__copyright__ = "Copyright 2019, The Materials Project"
__version__ = "0.1"
__email__ = "rc564@cornell.edu, aganose@lbl.gov"
__date__ = "June 27, 2019"
class Control(MSONable, dict):
"""
Class for reading, updating, and writing ShengBTE CONTROL files.
See https://bitbucket.org/sousaw/shengbte/src/master/ for more
detailed description and default values of CONTROL arguments.
"""
required_params = [
"nelements",
"natoms",
"ngrid",
"lattvec",
"types",
"elements",
"positions",
"scell",
]
allocations_keys = ["nelements", "natoms", "ngrid", "norientations"]
crystal_keys = [
"lfactor",
"lattvec",
"types",
"elements",
"positions",
"masses",
"gfactors",
"epsilon",
"born",
"scell",
"orientations",
]
params_keys = [
"t",
"t_min",
"t_max",
"t_step",
"omega_max",
"scalebroad",
"rmin",
"rmax",
"dr",
"maxiter",
"nticks",
"eps",
]
flags_keys = [
"nonanalytic",
"convergence",
"isotopes",
"autoisotopes",
"nanowires",
"onlyharmonic",
"espresso",
]
def __init__(self, ngrid: Optional[List[int]] = None, temperature: Union[float, Dict[str, float]] = 300, **kwargs):
"""
Args:
ngrid: Reciprocal space grid density as a list of 3 ints.
temperature: The temperature to calculate the lattice thermal
conductivity for. Can be given as a single float, or a dictionary
with the keys "min", "max", "step".
**kwargs: Other ShengBTE parameters. Several parameters are required
for ShengBTE to run - we have listed these parameters below:
- nelements (int): number of different elements in the compound
- natoms (int): number of atoms in the unit cell
- lattvec (size 3x3 array): real-space lattice vectors, in units
of lfactor
- lfactor (float): unit of measurement for lattice vectors (nm).
I.e., set to 0.1 if lattvec given in Angstrom.
- types (size natom list): a vector of natom integers, ranging
from 1 to nelements, assigning an element to each atom in the
system
- elements (size natom list): a vector of element names
- positions (size natomx3 array): atomic positions in lattice
coordinates
- scell (size 3 list): supercell sizes along each crystal axis
used for the 2nd-order force constant calculation
"""
super().__init__()
if ngrid is None:
ngrid = [25, 25, 25]
self["ngrid"] = ngrid
if isinstance(temperature, (int, float)):
self["t"] = temperature
elif isinstance(temperature, dict):
self["t_min"] = temperature["min"]
self["t_max"] = temperature["max"]
self["t_step"] = temperature["step"]
else:
raise ValueError("Unsupported temperature type, must be float or dict")
self.update(kwargs)
@classmethod
@requires(
f90nml,
"ShengBTE Control object requires f90nml to be installed. " "Please get it at https://pypi.org/project/f90nml.",
)
def from_file(cls, filepath: str):
"""
Read a CONTROL namelist file and output a 'Control' object
Args:
filepath: Path of the CONTROL file.
Returns:
'Control' object with parameters instantiated.
"""
nml = f90nml.read(filepath)
sdict = nml.todict()
all_dict: Dict[str, Any] = {}
all_dict.update(sdict["allocations"])
all_dict.update(sdict["crystal"])
all_dict.update(sdict["parameters"])
all_dict.update(sdict["flags"])
all_dict.pop("_start_index") # remove unnecessary cruft
return cls.from_dict(all_dict)
@classmethod
def from_dict(cls, control_dict: Dict):
"""
Write a CONTROL file from a Python dictionary. Description and default
parameters can be found at
https://bitbucket.org/sousaw/shengbte/src/master/.
Note some parameters are mandatory. Optional parameters default here to
None and will not be written to file.
Args:
control_dict: A Python dictionary of ShengBTE input parameters.
"""
return cls(**control_dict)
@requires(
f90nml,
"ShengBTE Control object requires f90nml to be installed. " "Please get it at https://pypi.org/project/f90nml.",
)
def to_file(self, filename: str = "CONTROL"):
"""
Writes ShengBTE CONTROL file from 'Control' object
Args:
filename: A file name.
"""
for param in self.required_params:
if param not in self.as_dict():
warnings.warn("Required parameter '{}' not specified!".format(param))
alloc_dict = _get_subdict(self, self.allocations_keys)
alloc_nml = f90nml.Namelist({"allocations": alloc_dict})
control_str = str(alloc_nml) + "\n"
crystal_dict = _get_subdict(self, self.crystal_keys)
crystal_nml = f90nml.Namelist({"crystal": crystal_dict})
control_str += str(crystal_nml) + "\n"
params_dict = _get_subdict(self, self.params_keys)
params_nml = f90nml.Namelist({"parameters": params_dict})
control_str += str(params_nml) + "\n"
flags_dict = _get_subdict(self, self.flags_keys)
flags_nml = f90nml.Namelist({"flags": flags_dict})
control_str += str(flags_nml) + "\n"
with open(filename, "w") as file:
file.write(control_str)
@classmethod
def from_structure(cls, structure: Structure, reciprocal_density: Optional[int] = 50000, **kwargs):
"""
Get a ShengBTE control object from a structure.
Args:
structure: A structure object.
reciprocal_density: If not None, the q-point grid ("ngrid") will be
set using this density.
kwargs: Additional options to be passed to the Control constructor.
See the docstring of the __init__ method for more details
Returns:
A ShengBTE control object.
"""
elements = list(map(str, structure.composition.elements))
unique_nums = np.unique(structure.atomic_numbers)
types_dict = dict(zip(unique_nums, range(len(unique_nums))))
types = [types_dict[i] + 1 for i in structure.atomic_numbers]
control_dict = {
"nelements": structure.ntypesp,
"natoms": structure.num_sites,
"norientations": 0,
"lfactor": 0.1,
"lattvec": structure.lattice.matrix.tolist(),
"elements": elements,
"types": types,
"positions": structure.frac_coords.tolist(),
}
if reciprocal_density:
kpoints = Kpoints.automatic_density(structure, reciprocal_density)
control_dict["ngrid"] = kpoints.kpts[0]
control_dict.update(**kwargs)
return Control(**control_dict)
def get_structure(self) -> Structure:
"""
Get a pymatgen Structure from a ShengBTE control object.
The control object must have the "lattvec", "types", "elements", and
"positions" settings otherwise an error will be thrown.
Returns:
The structure.
"""
required = ["lattvec", "types", "elements", "positions"]
if not all([r in self for r in required]):
raise ValueError("All of ['lattvec', 'types', 'elements', 'positions'] must be " "in control object")
unique_elements = self["elements"]
n_unique_elements = len(unique_elements)
element_map = dict(zip(range(1, n_unique_elements + 1), unique_elements))
species = [element_map[i] for i in self["types"]]
cell = np.array(self["lattvec"])
if "lfactor" in self:
cell *= self["lfactor"] * 10 # to nm then to Angstrom
return Structure(cell, species, self["positions"])
def as_dict(self):
"""
Returns: MSONAble dict
"""
return dict(self)
def _get_subdict(master_dict, subkeys):
"""Helper method to get a set of keys from a larger dictionary"""
return {k: master_dict[k] for k in subkeys if k in master_dict and master_dict[k] is not None}
|
davidwaroquiers/pymatgen
|
pymatgen/io/shengbte.py
|
Python
|
mit
| 9,232
|
[
"CRYSTAL",
"ESPResSo",
"VASP",
"pymatgen"
] |
7ba3db8a72381ac21c298a18b0e248dc7fcd3e9a8108beb0692640e9f22966b3
|
""" YES, oor no? """
import datetime
import logging
def yesorno(team, teamdates, date2=None):
"""
Input: team/city/etc, teamdates and date
Returns: True/False
"""
yesterday = datetime.datetime.now() - datetime.timedelta(days=1)
yesterday = yesterday.strftime("%Y-%m-%d")
chosen_team = get_team(
team
) # returns "New York Rangers" on http://URL/NYR or "" on no match
### The YES/NO logic:
# Check if yesterday's date is a key in teamdates, continue on first hit (not ordered..).
if chosen_team is None and date2 is None:
for date in teamdates:
if yesterday == date:
return True
if date2 is None:
# If no date set - set it to yesterday
date2 = yesterday
if dateapi(teamdates, chosen_team, date2):
return True
return False
def validatedate(date):
"""Return the date in format %Y-%m-%d if it is a valid date otherwise None.
Not accepting day in the middle"""
date_formats = [
"%d-%m-%Y",
"%Y-%m-%d",
"%d.%m.%Y",
"%Y.%m.%d",
"%d%m%Y",
"%Y%m%d",
"%A, %b %-d",
]
dateinnhlformat = None
if date:
for date_format in date_formats:
try:
dateinnhlformat = datetime.datetime.strptime(
date, date_format
).strftime("%Y-%m-%d")
except ValueError:
pass
return dateinnhlformat
def dateapi(teamdates, team=None, date=None):
"""Return true if there was a game on the date
Return false there was not and if date was unparseable
Take a team and/or a date as arguments"""
# Try to make the date provided into the NHL format
dateinnhlformat = validatedate(date)
if (dateinnhlformat) and (dateinnhlformat in teamdates) and (team is None):
# OK there is a game on that date!
logging.debug(f"no team: {team} but on date: {date} there was a game")
return True
if (dateinnhlformat) and (dateinnhlformat in teamdates) and (team is not None):
# OK there was a team and good date provided!
# if dateinnhlformat exists a date has been chosen
# for each list (matchup) at the date chosen
for matchup in teamdates[dateinnhlformat]:
for combatant in matchup:
if combatant == team:
logging.debug(f"{team} played on {date}")
return True
logging.debug(f"boo - no game found for team {team} or date {date}")
return False
def get_city_from_team(cityteam):
"""Returns a city and teamname from teamname in lower case.
It should return the contents of class wide-matchup from the NHL schedule
For historical reasons:
This function used to return a city from a team name, citydict1 had entries like:
"Washington" : "Washington Capitals",
This function is not named well anymore, but it works ;)
Something like, get_city_team_from_team? Confusing in any case..
"""
citydict1 = {
"ducks": "Anaheim Ducks",
"coyotes": "Arizona Coyotes",
"bruins": "Boston Bruins",
"buffalo": "Buffalo Sabres",
"hurricanes": "Carolina Hurricanes",
"bluejackets": "Columbus Blue Jackets",
"flames": "Calgary Flames",
"blackhawks": "Chicago Blackhawks",
"avalanche": "Colorado Avalanche",
"stars": "Dallas Stars",
"redwings": "Detroit Red Wings",
"oilers": "Edmonton Oilers",
"panthers": "Florida Panthers",
"kings": "Los Angeles Kings",
"wild": "Minnesota Wild",
"canadiens": "Montreal Canadiens",
"devils": "New Jersey Devils",
"predators": "Nashville Predators",
"islanders": "New York Islanders",
"rangers": "New York Rangers",
"senators": "Ottawa Senators",
"flyers": "Philadelphia Flyers",
"penguins": "Pittsburgh Penguins",
"sharks": "San Jose Sharks",
"blues": "St Louis Blues",
"lightning": "Tampa Bay Lightning",
"leafs": "Toronto Maple Leafs",
"canucks": "Vancouver Canucks",
"goldenknights": "Vegas Golden Knights",
"jets": "Winnipeg Jets",
"capitals": "Washington Capitals",
"kraken": "Seattle Kraken",
}
# Flip because I'm lazy
citydict1flip = {value: key for key, value in citydict1.items()}
# This means the dict has keys of "Dallas Stars": "dallas"
try:
return citydict1flip[cityteam]
except KeyError:
return ""
def get_team_from_city(city):
"""Returns a team abbreviation from cityname."""
citydict = {
"ANA": "ANAHEIM",
"ARI": "ARIZONA",
"BOS": "BOSTON",
"BUF": "BUFFALO",
"CAR": "CAROLINA",
"CBJ": "COLUMBUS",
"CGY": "CALGARY",
"CHI": "CHICAGO",
"COL": "COLORADO",
"DAL": "DALLAS",
"DET": "DETROIT",
"EDM": "EDMONTON",
"FLA": "FLORIDA",
"LAK": "LOSANGELES",
"MIN": "MINNESOTA",
"MTL": "MONTREAL",
"NJD": "NEWJERSEY",
"NSH": "NASHVILLE",
"NYI": "NYISLANDERS",
"NYR": "NYRANGERS",
"OTT": "OTTAWA",
"PHI": "PHILADELPHIA",
"PIT": "PITTSBURGH",
"SJS": "SANJOSE",
"SEA": "SEATTLE",
"STL": "STLOUIS",
"TBL": "TAMPABAY",
"TOR": "TORONTO",
"VAN": "VANCOUVER",
"VGK": "VEGAS",
"WPG": "WINNIPEG",
"WSH": "WASHINGTON",
}
# Flip because I'm lazy
citydictflip = {value: key for key, value in citydict.items()}
try:
return citydictflip[city]
except KeyError:
return "nope"
def get_team(team):
"""Returns a "City Team Name", as in teamdict1.
Is in that format because the dictionary in get_team_colors wants that.
"""
if team:
team = team.upper()
else:
return None
teamdict1 = {
"ANA": "Anaheim Ducks",
"ARI": "Arizona Coyotes",
"BOS": "Boston Bruins",
"BUF": "Buffalo Sabres",
"CAR": "Carolina Hurricanes",
"CBJ": "Columbus Blue Jackets",
"CGY": "Calgary Flames",
"CHI": "Chicago Blackhawks",
"COL": "Colorado Avalanche",
"DAL": "Dallas Stars",
"DET": "Detroit Red Wings",
"EDM": "Edmonton Oilers",
"FLA": "Florida Panthers",
"LAK": "Los Angeles Kings",
"MIN": "Minnesota Wild",
"MTL": "Montreal Canadiens",
"NJD": "New Jersey Devils",
"NSH": "Nashville Predators",
"NYI": "New York Islanders",
"NYR": "New York Rangers",
"OTT": "Ottawa Senators",
"PHI": "Philadelphia Flyers",
"PIT": "Pittsburgh Penguins",
"SEA": "Seattle Kraken",
"SJS": "San Jose Sharks",
"STL": "St Louis Blues",
"TBL": "Tampa Bay Lightning",
"TOR": "Toronto Maple Leafs",
"VAN": "Vancouver Canucks",
"VGK": "Vegas Golden Knights",
"WPG": "Winnipeg Jets",
"WSH": "Washington Capitals",
}
# To make DETROITREDWINGS return DET
teamdict1nospaces = {
key: value.replace(" ", "").upper() for key, value in teamdict1.items()
}
teamdict1nospaces = {value: key for key, value in teamdict1nospaces.items()}
teamnamedict = {
"ANA": "DUCKS",
"ARI": "COYOTES",
"BOS": "BRUINS",
"BUF": "SABRES",
"CAR": "HURRICANES",
"CBJ": "BLUEJACKETS",
"CGY": "FLAMES",
"CHI": "BLACKHAWKS",
"COL": "AVALANCHE",
"DAL": "STARS",
"DET": "REDWINGS",
"EDM": "OILERS",
"FLA": "PANTHERS",
"LAK": "KINGS",
"MIN": "WILD",
"MTL": "CANADIENS",
"NJD": "DEVILS",
"NSH": "PREDATORS",
"NYI": "ISLANDERS",
"NYR": "RANGERS",
"OTT": "SENATORS",
"PHI": "FLYERS",
"PIT": "PENGUINS",
"SEA": "KRAKEN",
"SJS": "SHARKS",
"STL": "BLUES",
"TBL": "LIGHTNING",
"TOR": "MAPLELEAFS",
"VAN": "CANUCKS",
"VGK": "GOLDENKNIGHTS",
"WPG": "JETS",
"WSH": "CAPITALS",
}
# Flip the values because I'm lazy
teamnamedict1 = {value: key for key, value in teamnamedict.items()}
# Some extra "non-standard" ones
teamnameshortdict = {
"CANES": "CAR",
"JACKETS": "CBJ",
"HAWKS": "CHI",
"WINGS": "DET",
"PREDS": "NSH",
"SENS": "OTT",
"PENS": "PIT",
"BOLTS": "TBL",
"LEAFS": "TOR",
"CAPS": "WSH",
"TAMPA": "TBL",
"LA": "LAK",
"NJ": "NJD",
"SJ": "SJS",
"LV": "VGK",
"LASVEGAS": "VGK",
"MONTRÉAL": "MTL",
"MONTRÉALCANADIENS": "MTL",
"ST. LOUIS": "STL",
"ST. LOUIS BLUES": "STL",
}
# First check if someone put in the proper abbreviation
try:
thisisyourteam = teamdict1[team]
except KeyError:
# If not, then try with the name of the team
try:
thisisyourteam = teamdict1[teamnamedict1[team]]
except KeyError:
# Then one could have one more for half names, like la, leafs, wings, jackets, etc
try:
thisisyourteam = teamdict1[teamnameshortdict[team]]
except KeyError:
# Perhaps it's a city name?
try:
thisisyourteam = teamdict1[get_team_from_city(team)]
except KeyError:
# Perhaps it's a citynameteamname?1
try:
thisisyourteam = teamdict1[teamdict1nospaces[team]]
except KeyError:
# After that no team selected - nothing in title
thisisyourteam = None
return thisisyourteam
def get_team_colors(team):
"""Return a color and True/False if we found a team
List is from https://github.com/jimniels/teamcolors.github.io"""
teamname = get_team(team)
nhl = {
"Anaheim Ducks": ["000000", "91764B", "EF5225"],
"Arizona Coyotes": ["841F27", "000000", "EFE1C6"],
"Boston Bruins": ["000000", "FFC422"],
"Buffalo Sabres": ["002E62", "FDBB2F", "AEB6B9"],
"Calgary Flames": ["E03A3E", "FFC758", "000000"],
"Carolina Hurricanes": ["E03A3E", "000000", "8E8E90"],
"Chicago Blackhawks": ["E3263A", "000000"],
"Colorado Avalanche": ["8B2942", "01548A", "000000", "A9B0B8"],
"Columbus Blue Jackets": ["00285C", "E03A3E", "A9B0B8"],
"Dallas Stars": ["006A4E", "000000", "C0C0C0"],
"Detroit Red Wings": ["EC1F26"],
"Edmonton Oilers": ["003777", "E66A20"],
"Florida Panthers": ["C8213F", "002E5F", "D59C05"],
"Los Angeles Kings": ["000000", "AFB7BA"],
"Minnesota Wild": ["025736", "BF2B37", "EFB410", "EEE3C7"],
"Montreal Canadiens": ["BF2F38", "213770"],
"Nashville Predators": ["FDBB2F", "002E62"],
"New Jersey Devils": ["E03A3E", "000000"],
"New York Islanders": ["00529B", "F57D31"],
"New York Rangers": ["0161AB", "E6393F"],
"Ottawa Senators": ["E4173E", "000000", "D69F0F"],
"Philadelphia Flyers": ["F47940", "000000"],
"Pittsburgh Penguins": ["000000", "D1BD80"],
"San Jose Sharks": ["05535D", "F38F20", "000000"],
"Seattle Kraken": ["355464", "99D9D9", "001628"],
"St Louis Blues": ["0546A0", "FFC325", "101F48"],
"Tampa Bay Lightning": ["013E7D", "000000", "C0C0C0"],
"Toronto Maple Leafs": ["003777"],
"Vancouver Canucks": ["07346F", "047A4A", "A8A9AD"],
"Vegas Golden Knights": ["333F42", "B4975A", "010101"],
"Washington Capitals": ["CF132B", "00214E", "000000"],
"Winnipeg Jets": ["002E62", "0168AB", "A8A9AD"],
}
try:
return nhl[teamname]
except KeyError:
return ["000000"]
def get_all_teams():
"""Returns all teams"""
allteams = {
"ANA": "Anaheim Ducks",
"ARI": "Arizona Coyotes",
"BOS": "Boston Bruins",
"BUF": "Buffalo Sabres",
"CAR": "Carolina Hurricanes",
"CBJ": "Columbus Blue Jackets",
"CGY": "Calgary Flames",
"CHI": "Chicago Blackhawks",
"COL": "Colorado Avalanche",
"DAL": "Dallas Stars",
"DET": "Detroit Red Wings",
"EDM": "Edmonton Oilers",
"FLA": "Florida Panthers",
"LAK": "Los Angeles Kings",
"MIN": "Minnesota Wild",
"MTL": "Montreal Canadiens",
"NJD": "New Jersey Devils",
"NSH": "Nashville Predators",
"NYI": "New York Islanders",
"NYR": "New York Rangers",
"OTT": "Ottawa Senators",
"PHI": "Philadelphia Flyers",
"PIT": "Pittsburgh Penguins",
"SEA": "Seattle Kraken",
"SJS": "San Jose Sharks",
"STL": "St Louis Blues",
"TBL": "Tampa Bay Lightning",
"TOR": "Toronto Maple Leafs",
"VAN": "Vancouver Canucks",
"VGK": "Vegas Golden Knights",
"WPG": "Winnipeg Jets",
"WSH": "Washington Capitals",
}
return allteams
|
martbhell/wasthereannhlgamelastnight
|
src/nhlhelpers.py
|
Python
|
mit
| 13,252
|
[
"COLUMBUS"
] |
97f4c7729f455223a75c5d48eb45cad0d747a30acf9eee939d1682113ad48006
|
# Copyright 2002 by Yves Bastide and Brad Chapman.
# Copyright 2007 by Sebastian Bassi
# All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Functions to calculate assorted sequence checksums."""
# crc32, crc64, gcg, and seguid
# crc64 is adapted from BioPerl
from binascii import crc32 as _crc32
from Bio._py3k import _as_bytes
def crc32(seq):
"""Returns the crc32 checksum for a sequence (string or Seq object)."""
#NOTE - On Python 2 returns a signed int, on Python 3 it is unsigned
#Docs suggest should use crc32(x) & 0xffffffff for consistency.
#TODO - Should we return crc32(x) & 0xffffffff here?
try:
#Assume its a Seq object
return _crc32(_as_bytes(seq.tostring()))
except AttributeError:
#Assume its a string/unicode
return _crc32(_as_bytes(seq))
def _init_table_h():
_table_h = []
for i in range(256):
l = i
part_h = 0
for j in range(8):
rflag = l & 1
l >>= 1
if part_h & 1: l |= (1L << 31)
part_h >>= 1L
if rflag: part_h ^= 0xd8000000L
_table_h.append(part_h)
return _table_h
# Initialisation
_table_h = _init_table_h()
def crc64(s):
"""Returns the crc64 checksum for a sequence (string or Seq object)."""
crcl = 0
crch = 0
for c in s:
shr = (crch & 0xFF) << 24
temp1h = crch >> 8
temp1l = (crcl >> 8) | shr
idx = (crcl ^ ord(c)) & 0xFF
crch = temp1h ^ _table_h[idx]
crcl = temp1l
return "CRC-%08X%08X" % (crch, crcl)
def gcg(seq):
"""Returns the GCG checksum (int) for a sequence (string or Seq object).
Given a nucleotide or amino-acid secuence (or any string),
returns the GCG checksum (int). Checksum used by GCG program.
seq type = str.
Based on BioPerl GCG_checksum. Adapted by Sebastian Bassi
with the help of John Lenton, Pablo Ziliani, and Gabriel Genellina.
All sequences are converted to uppercase """
try:
#Assume its a Seq object
seq = seq.tostring()
except AttributeError:
#Assume its a string
pass
index = checksum = 0
for char in seq:
index += 1
checksum += index * ord(char.upper())
if index == 57: index = 0
return checksum % 10000
def seguid(seq):
"""Returns the SEGUID (string) for a sequence (string or Seq object).
Given a nucleotide or amino-acid secuence (or any string),
returns the SEGUID string (A SEquence Globally Unique IDentifier).
seq type = str.
For more information about SEGUID, see:
http://bioinformatics.anl.gov/seguid/
DOI: 10.1002/pmic.200600032 """
try:
#Python 2.5 sha1 is in hashlib
import hashlib
m = hashlib.sha1()
except:
#For older versions
import sha
m = sha.new()
import base64
try:
#Assume its a Seq object
seq = seq.tostring()
except AttributeError:
#Assume its a string
pass
m.update(_as_bytes(seq.upper()))
try:
#For Python 3+
return base64.encodebytes(m.digest()).decode().replace("\n","").rstrip("=")
except AttributeError:
pass
try:
#For Python 2.5+
return base64.b64encode(m.digest()).rstrip("=")
except:
#For older versions
import os
#Note: Using os.linesep doesn't work on Windows,
#where os.linesep= "\r\n" but the encoded string
#contains "\n" but not "\r\n"
return base64.encodestring(m.digest()).replace("\n","").rstrip("=")
if __name__ == "__main__":
print "Quick self test"
str_light_chain_one = "QSALTQPASVSGSPGQSITISCTGTSSDVGSYNLVSWYQQHPGK" \
+ "APKLMIYEGSKRPSGVSNRFSGSKSGNTASLTISGLQAEDEADY" \
+ "YCSSYAGSSTLVFGGGTKLTVL"
str_light_chain_two = "QSALTQPASVSGSPGQSITISCTGTSSDVGSYNLVSWYQQHPGK" \
+ "APKLMIYEGSKRPSGVSNRFSGSKSGNTASLTISGLQAEDEADY" \
+ "YCCSYAGSSTWVFGGGTKLTVL"
assert crc64(str_light_chain_one) == crc64(str_light_chain_two)
assert 'CRC-44CAAD88706CC153' == crc64(str_light_chain_one)
assert 'BpBeDdcNUYNsdk46JoJdw7Pd3BI' == seguid(str_light_chain_one)
assert 'X5XEaayob1nZLOc7eVT9qyczarY' == seguid(str_light_chain_two)
print "Done"
|
BlogomaticProject/Blogomatic
|
opt/blog-o-matic/usr/lib/python/Bio/SeqUtils/CheckSum.py
|
Python
|
gpl-2.0
| 4,470
|
[
"BioPerl",
"Biopython"
] |
d4103903bef65bcfea1fb4c39d2a84c95efafaaedd24d095493ed5fcdc3c9246
|
#!/usr/bin/env python3
import sys
import os
import mdtraj
if "-h" in sys.argv or "--help" in sys.argv:
print("Takes MD coordinate filename, outputs dummies.gro")
sys.exit(os.EX_OK)
gro = mdtraj.load(sys.argv[1])
for i in range(1, 6):
dummy_index = next(gro.top.atoms_by_name("D{0}".format(i))).index
carbo_index = next(gro.top.atoms_by_name("C{0}".format(i))).index
vec = gro.xyz[0][dummy_index] - gro.xyz[0][carbo_index]
gro.xyz[0][dummy_index] -= 1.5 * vec
for res in gro.top.residues:
if res.is_water:
continue
for i in range(1, 6):
dummy_index = next(res.atoms_by_name("D{0}".format(i))).index
carbo_index = next(res.atoms_by_name("C{0}".format(i))).index
vec = gro.xyz[0][dummy_index] - gro.xyz[0][carbo_index]
gro.xyz[0][dummy_index] -= 1.5 * vec
gro.save("dummies.gro")
|
jag1g13/pdb2lmp
|
scripts/make_sugar_dummies.py
|
Python
|
mit
| 856
|
[
"MDTraj"
] |
cf8a36cf99793bcbbaca3b3c005cc50f33c5be0a9c9f04a2e80faad58856e242
|
import numpy as np
import shutil
import os
import mdtraj as md
from mdtraj.utils import enter_temp_directory
from mdtraj.utils.delay_import import import_
import tempfile
from distutils.spawn import find_executable
import simtk.unit as units
PACKMOL_PATH = find_executable("packmol")
HEADER_TEMPLATE = """
# Mixture
tolerance %f
filetype pdb
output %s
add_amber_ter
seed %i
"""
BOX_TEMPLATE = """
structure %s
number %d
inside box 0. 0. 0. %f %f %f
end structure
"""
SPHERE_TEMPLATE = """
structure %s
number %d
inside sphere 0. 0. 0. %f
end structure
"""
FIXED_TEMPLATE = """
structure %s
number %d
center
fixed %d %d %d 0. 0. 0.
end structure
"""
def pack_box(pdb_filenames_or_trajectories, n_molecules_list, tolerance=2.0, shape='box', size=None, seed=1, fix=False):
"""Run packmol to generate a box containing a mixture of molecules.
Parameters
----------
pdb_filenames_or_trajectories : list({str, Trajectory})
List of pdb filenames or trajectories for each component of mixture. If this is
a list of trajectories, the trajectories will be saved to as
temporary files to be run in packmol.
n_molecules_list : list(int)
The number of molecules of each mixture component.
tolerance : float, optional, default=2.0
The mininum spacing between molecules during packing. In ANGSTROMS!
size : float, optional
The size of the box/sphere to generate. In ANGSTROMS.
For a box specifies the lengths of the box
For a sphere specifies the diameter of the sphere
Default generates boxes/spheres that are very large for increased stability.
May require extra time for energy minimization and equilibration.
Returns
-------
trj : MDTraj.Trajectory
Single frame trajectory with mixture box.
Notes
-----
Be aware that MDTraj uses nanometers internally, but packmol uses angstrom
units. The present function takes `tolerance` and `box_size` in
angstrom units, but the output trajectory will have data in nm.
Also note that OpenMM is pretty picky about the format of unit cell input,
so use the example in tests/test_packmol.py to ensure that you do the right thing.
"""
assert len(pdb_filenames_or_trajectories) == len(n_molecules_list), "Must input same number of pdb filenames as num molecules"
pdb_filenames = []
for obj in pdb_filenames_or_trajectories:
try: # See if MDTraj Trajectory
tmp_filename = tempfile.mktemp(suffix=".pdb")
obj.save_pdb(tmp_filename)
pdb_filenames.append(tmp_filename)
except AttributeError: # Not an MDTraj Trajectory, assume filename
pdb_filenames.append(obj)
if PACKMOL_PATH is None:
raise(IOError("Packmol not found, cannot run pack_box()"))
output_filename = tempfile.mktemp(suffix=".pdb")
# approximating volume to initialize box
if size is None:
size = approximate_volume(pdb_filenames, n_molecules_list)
if shape == 'box':
box_size = size
elif shape == 'sphere':
sphere_radius = float(size)/2
header = HEADER_TEMPLATE % (tolerance, output_filename, seed)
for k in range(len(pdb_filenames)):
filename = pdb_filenames[k]
n_molecules = n_molecules_list[k]
if fix and k==0 and n_molecules==1 and shape == 'box':
header = header + FIXED_TEMPLATE % (filename, n_molecules, float(size)/2, float(size)/2, float(size)/2)
elif fix and k==0 and n_molecules==1 and shape == 'sphere':
header = header + FIXED_TEMPLATE % (filename, n_molecules, 0, 0, 0)
elif shape == 'box':
header = header + BOX_TEMPLATE % (filename, n_molecules, box_size, box_size, box_size)
elif shape == 'sphere':
header = header + SPHERE_TEMPLATE % (filename, n_molecules, sphere_radius)
pwd = os.getcwd()
#print(header)
packmol_filename = "packmol_input.txt"
packmol_filename = tempfile.mktemp(suffix=".txt")
file_handle = open(packmol_filename, 'w')
file_handle.write(header)
file_handle.close()
#print(header)
os.system("%s < %s >/dev/null" % (PACKMOL_PATH, packmol_filename))
trj = md.load(output_filename)
assert trj.topology.n_chains == sum(n_molecules_list), "Packmol error: molecules missing from output"
#Begin hack to introduce bonds for the MISSING CONECT ENTRIES THAT PACKMOL FAILS TO WRITE
top, bonds = trj.top.to_dataframe()
trj_i = [md.load(filename) for filename in pdb_filenames]
bonds_i = [t.top.to_dataframe()[1] for t in trj_i]
offset = 0
bonds = []
for i in range(len(pdb_filenames)):
n_atoms = trj_i[i].n_atoms
for j in range(n_molecules_list[i]):
bonds.extend(bonds_i[i] + offset)
offset += n_atoms
bonds = np.array(bonds)
trj.top = md.Topology.from_dataframe(top, bonds)
if shape == 'box':
trj.unitcell_vectors = np.array([np.eye(3)]) * box_size / 10.
return trj
def approximate_volume(pdb_filenames, n_molecules_list, box_scaleup_factor=2.0):
"""Approximate the appropriate box size based on the number and types of atoms present.
Parameters
----------
pdb_filenames : list(str)
List of pdb filenames for each component of mixture.
n_molecules_list : list(int)
The number of molecules of each mixture component.
box_scaleup_factor : float, optional, default = 2.0
Factor by which the estimated box size is increased
Returns
-------
box_size : float
The size of the box to generate. In ANGSTROMS.
Notes
-----
By default, boxes are very large for increased stability, and therefore may
require extra time for energy minimization and equilibration.
"""
volume = 0.0 # in cubic angstroms
for k, (pdb_file) in enumerate(pdb_filenames):
molecule_volume = 0.0
molecule_trj = md.load(pdb_filenames[k])
for atom in molecule_trj.topology.atoms:
if atom.element.symbol == 'H':
molecule_volume += 5.0 # approximated from bondi radius = 1.06 angstroms
else:
molecule_volume += 15.0 # approximated from bondi radius of carbon = 1.53 angstroms
volume += molecule_volume * n_molecules_list[k]
box_size = volume**(1.0/3.0) * box_scaleup_factor
return box_size
def approximate_volume_by_density( smiles_strings, n_molecules_list, density = 1.0, box_scaleup_factor = 1.1):
"""Generate an approximate box size based on the number and molecular weight of molecules present, and a target density for the final solvated mixture. If no density is specified, the target density is assumed to be 1 g/ml.
Parameters
----------
smiles_strings : list(str)
List of smiles strings for each component of mixture.
n_molecules_list : list(int)
The number of molecules of each mixture component.
box_scaleup_factor : float, optional, default = 1.1
Factor by which the estimated box size is increased
density : float, optional, default 1.0
Target density for final system in g/ml
Returns
-------
box_size : float
The size (edge length) of the box to generate. In ANGSTROMS.
Notes
-----
By default, boxes are only modestly large. This approach has not been extensively tested for stability but has been used in th Mobley lab for perhaps ~100 different systems without substantial problems.
"""
oechem = import_("openeye.oechem")
density = density * units.grams/units.milliliter
#Load molecules to get molecular weights
wts = []
mass = 0.0*units.grams/units.mole * 1./units.AVOGADRO_CONSTANT_NA #For calculating total mass
for (idx,smi) in enumerate(smiles_strings):
mol = oechem.OEMol()
oechem.OEParseSmiles(mol, smi)
wts.append( oechem.OECalculateMolecularWeight(mol)*units.grams/units.mole )
mass += n_molecules_list[idx] * wts[idx] * 1./units.AVOGADRO_CONSTANT_NA
#Estimate volume based on mass and density
#Density = mass/volume so volume = mass/density (volume units are ml)
vol = mass/density
#Convert to box length in angstroms
edge = vol**(1./3.)
#Compute final box size
box_size = edge*box_scaleup_factor/units.angstroms
return box_size
def rename_water_atoms( pdb_filename, O_name = 'O', H1_name = 'H1', H2_name = 'H2' ):
"""Rename water atoms in a specified PDB file to have target names. Typically used to ensure a packmol-generated box containing water has water atom names corresponding to what tleap expects for standard water models.
Parameters
----------
pdb_filename : str
The target PDB filename to edit
O_name : str, optional, default 'O'
Target name to set water oxygen names to
H1_name : str, optional, default 'H1'
Target name to set water hydrogen names to, for first hydrogen
H2_name : str, optional, default 'H2'
Target name to set water hydrogen names to, for second hydrogen
Returns
-------
Notes
-------
Uses ParmEd to makes edits. Identifies waters by reading residues from target PDB file and identifying any residue containing three atoms with names O or O#, H or H#, and H or H# (where # is a digit or sequence of digits) as water molecules.
"""
parmed = import_("parmed")
pdb = parmed.load_file( pdb_filename )
#Find waters and rename
for residue in pdb.residues:
if len(residue)==3:
#Build list of atom types (PDB files don't store these) from names after stripping off digits
types = []
for atom in residue.atoms:
name = atom.name
while name[-1].isdigit():
name = name[:-1]
types.append(name)
#See if it's water and, if so, rename
if 'O' in types and types.count('H')==2:
hct = 0
for atom in residue.atoms:
if 'O' in atom.name:
atom.name = O_name
elif 'H' in atom.name:
if hct==0:
atom.name = H1_name
else:
atom.name = H2_name
hct+=1
#Write file
pdb.write_pdb( pdb_filename )
|
Clyde-fare/openmoltools
|
openmoltools/packmol.py
|
Python
|
gpl-2.0
| 10,513
|
[
"MDTraj",
"OpenMM"
] |
de3646d8a8d468e81cdb1aaf042c58dafcf29589d2fc9654cda7e762afc96002
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module provides classes for calculating the ewald sum of a structure.
"""
import bisect
from copy import copy, deepcopy
from datetime import datetime
from math import log, pi, sqrt
from typing import Dict
from warnings import warn
import numpy as np
from monty.json import MSONable
from scipy.special import comb, erfc
from scipy import constants
from pymatgen.core.structure import Structure
__author__ = "Shyue Ping Ong, William Davidson Richard"
__copyright__ = "Copyright 2011, The Materials Project"
__credits__ = "Christopher Fischer"
__version__ = "1.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__status__ = "Production"
__date__ = "Aug 1 2012"
class EwaldSummation(MSONable):
"""
Calculates the electrostatic energy of a periodic array of charges using
the Ewald technique.
Ref:
Ewald summation techniques in perspective: a survey
Abdulnour Y. Toukmaji and John A. Board Jr.
DOI: 10.1016/0010-4655(96)00016-1
URL: http://www.ee.duke.edu/~ayt/ewaldpaper/ewaldpaper.html
This matrix can be used to do fast calculations of ewald sums after species
removal.
E = E_recip + E_real + E_point
Atomic units used in the code, then converted to eV.
"""
# Converts unit of q*q/r into eV
CONV_FACT = 1e10 * constants.e / (4 * pi * constants.epsilon_0)
def __init__(
self,
structure,
real_space_cut=None,
recip_space_cut=None,
eta=None,
acc_factor=12.0,
w=1 / sqrt(2),
compute_forces=False,
):
"""
Initializes and calculates the Ewald sum. Default convergence
parameters have been specified, but you can override them if you wish.
Args:
structure (Structure): Input structure that must have proper
Species on all sites, i.e. Element with oxidation state. Use
Structure.add_oxidation_state... for example.
real_space_cut (float): Real space cutoff radius dictating how
many terms are used in the real space sum. Defaults to None,
which means determine automagically using the formula given
in gulp 3.1 documentation.
recip_space_cut (float): Reciprocal space cutoff radius.
Defaults to None, which means determine automagically using
the formula given in gulp 3.1 documentation.
eta (float): The screening parameter. Defaults to None, which means
determine automatically.
acc_factor (float): No. of significant figures each sum is
converged to.
w (float): Weight parameter, w, has been included that represents
the relative computational expense of calculating a term in
real and reciprocal space. Default of 0.7 reproduces result
similar to GULP 4.2. This has little effect on the total
energy, but may influence speed of computation in large
systems. Note that this parameter is used only when the
cutoffs are set to None.
compute_forces (bool): Whether to compute forces. False by
default since it is usually not needed.
"""
self._s = structure
self._charged = abs(structure.charge) > 1e-8
self._vol = structure.volume
self._compute_forces = compute_forces
self._acc_factor = acc_factor
# set screening length
self._eta = eta if eta else (len(structure) * w / (self._vol ** 2)) ** (1 / 3) * pi
self._sqrt_eta = sqrt(self._eta)
# acc factor used to automatically determine the optimal real and
# reciprocal space cutoff radii
self._accf = sqrt(log(10 ** acc_factor))
self._rmax = real_space_cut if real_space_cut else self._accf / self._sqrt_eta
self._gmax = recip_space_cut if recip_space_cut else 2 * self._sqrt_eta * self._accf
# The next few lines pre-compute certain quantities and store them.
# Ewald summation is rather expensive, and these shortcuts are
# necessary to obtain several factors of improvement in speedup.
self._oxi_states = [compute_average_oxidation_state(site) for site in structure]
self._coords = np.array(self._s.cart_coords)
# Define the private attributes to lazy compute reciprocal and real
# space terms.
self._initialized = False
self._recip = None
self._real, self._point = None, None
self._forces = None
# Compute the correction for a charged cell
self._charged_cell_energy = (
-EwaldSummation.CONV_FACT / 2 * np.pi / structure.volume / self._eta * structure.charge ** 2
)
def compute_partial_energy(self, removed_indices):
"""
Gives total ewald energy for certain sites being removed, i.e. zeroed
out.
"""
total_energy_matrix = self.total_energy_matrix.copy()
for i in removed_indices:
total_energy_matrix[i, :] = 0
total_energy_matrix[:, i] = 0
return sum(sum(total_energy_matrix))
def compute_sub_structure(self, sub_structure, tol=1e-3):
"""
Gives total ewald energy for an sub structure in the same
lattice. The sub_structure must be a subset of the original
structure, with possible different charges.
Args:
substructure (Structure): Substructure to compute Ewald sum for.
tol (float): Tolerance for site matching in fractional coordinates.
Returns:
Ewald sum of substructure.
"""
total_energy_matrix = self.total_energy_matrix.copy()
def find_match(site):
for test_site in sub_structure:
frac_diff = abs(np.array(site.frac_coords) - np.array(test_site.frac_coords)) % 1
frac_diff = [abs(a) < tol or abs(a) > 1 - tol for a in frac_diff]
if all(frac_diff):
return test_site
return None
matches = []
for i, site in enumerate(self._s):
matching_site = find_match(site)
if matching_site:
new_charge = compute_average_oxidation_state(matching_site)
old_charge = self._oxi_states[i]
scaling_factor = new_charge / old_charge
matches.append(matching_site)
else:
scaling_factor = 0
total_energy_matrix[i, :] *= scaling_factor
total_energy_matrix[:, i] *= scaling_factor
if len(matches) != len(sub_structure):
output = ["Missing sites."]
for site in sub_structure:
if site not in matches:
output.append("unmatched = {}".format(site))
raise ValueError("\n".join(output))
return sum(sum(total_energy_matrix))
@property
def reciprocal_space_energy(self):
"""
The reciprocal space energy.
"""
if not self._initialized:
self._calc_ewald_terms()
self._initialized = True
return sum(sum(self._recip))
@property
def reciprocal_space_energy_matrix(self):
"""
The reciprocal space energy matrix. Each matrix element (i, j)
corresponds to the interaction energy between site i and site j in
reciprocal space.
"""
if not self._initialized:
self._calc_ewald_terms()
self._initialized = True
return self._recip
@property
def real_space_energy(self):
"""
The real space space energy.
"""
if not self._initialized:
self._calc_ewald_terms()
self._initialized = True
return sum(sum(self._real))
@property
def real_space_energy_matrix(self):
"""
The real space energy matrix. Each matrix element (i, j) corresponds to
the interaction energy between site i and site j in real space.
"""
if not self._initialized:
self._calc_ewald_terms()
self._initialized = True
return self._real
@property
def point_energy(self):
"""
The point energy.
"""
if not self._initialized:
self._calc_ewald_terms()
self._initialized = True
return sum(self._point)
@property
def point_energy_matrix(self):
"""
The point space matrix. A diagonal matrix with the point terms for each
site in the diagonal elements.
"""
if not self._initialized:
self._calc_ewald_terms()
self._initialized = True
return self._point
@property
def total_energy(self):
"""
The total energy.
"""
if not self._initialized:
self._calc_ewald_terms()
self._initialized = True
return sum(sum(self._recip)) + sum(sum(self._real)) + sum(self._point) + self._charged_cell_energy
@property
def total_energy_matrix(self):
"""
The total energy matrix. Each matrix element (i, j) corresponds to the
total interaction energy between site i and site j.
Note that this does not include the charged-cell energy, which is only important
when the simulation cell is not charge balanced.
"""
if not self._initialized:
self._calc_ewald_terms()
self._initialized = True
totalenergy = self._recip + self._real
for i, energy in enumerate(self._point):
totalenergy[i, i] += energy
return totalenergy
@property
def forces(self):
"""
The forces on each site as a Nx3 matrix. Each row corresponds to a
site.
"""
if not self._initialized:
self._calc_ewald_terms()
self._initialized = True
if not self._compute_forces:
raise AttributeError("Forces are available only if compute_forces is True!")
return self._forces
def get_site_energy(self, site_index):
"""Compute the energy for a single site in the structure
Args:
site_index (int): Index of site
ReturnS:
(float) - Energy of that site"""
if not self._initialized:
self._calc_ewald_terms()
self._initialized = True
if self._charged:
warn("Per atom energies for charged structures not supported in EwaldSummation")
return np.sum(self._recip[:, site_index]) + np.sum(self._real[:, site_index]) + self._point[site_index]
def _calc_ewald_terms(self):
"""
Calculates and sets all ewald terms (point, real and reciprocal)
"""
self._recip, recip_forces = self._calc_recip()
self._real, self._point, real_point_forces = self._calc_real_and_point()
if self._compute_forces:
self._forces = recip_forces + real_point_forces
def _calc_recip(self):
"""
Perform the reciprocal space summation. Calculates the quantity
E_recip = 1/(2PiV) sum_{G < Gmax} exp(-(G.G/4/eta))/(G.G) S(G)S(-G)
where
S(G) = sum_{k=1,N} q_k exp(-i G.r_k)
S(G)S(-G) = |S(G)|**2
This method is heavily vectorized to utilize numpy's C backend for
speed.
"""
numsites = self._s.num_sites
prefactor = 2 * pi / self._vol
erecip = np.zeros((numsites, numsites), dtype=np.float_)
forces = np.zeros((numsites, 3), dtype=np.float_)
coords = self._coords
rcp_latt = self._s.lattice.reciprocal_lattice
recip_nn = rcp_latt.get_points_in_sphere([[0, 0, 0]], [0, 0, 0], self._gmax)
frac_coords = [fcoords for (fcoords, dist, i, img) in recip_nn if dist != 0]
gs = rcp_latt.get_cartesian_coords(frac_coords)
g2s = np.sum(gs ** 2, 1)
expvals = np.exp(-g2s / (4 * self._eta))
grs = np.sum(gs[:, None] * coords[None, :], 2)
oxistates = np.array(self._oxi_states)
# create array where q_2[i,j] is qi * qj
qiqj = oxistates[None, :] * oxistates[:, None]
# calculate the structure factor
sreals = np.sum(oxistates[None, :] * np.cos(grs), 1)
simags = np.sum(oxistates[None, :] * np.sin(grs), 1)
for g, g2, gr, expval, sreal, simag in zip(gs, g2s, grs, expvals, sreals, simags):
# Uses the identity sin(x)+cos(x) = 2**0.5 sin(x + pi/4)
m = (gr[None, :] + pi / 4) - gr[:, None]
np.sin(m, m)
m *= expval / g2
erecip += m
if self._compute_forces:
pref = 2 * expval / g2 * oxistates
factor = prefactor * pref * (sreal * np.sin(gr) - simag * np.cos(gr))
forces += factor[:, None] * g[None, :]
forces *= EwaldSummation.CONV_FACT
erecip *= prefactor * EwaldSummation.CONV_FACT * qiqj * 2 ** 0.5
return erecip, forces
def _calc_real_and_point(self):
"""
Determines the self energy -(eta/pi)**(1/2) * sum_{i=1}^{N} q_i**2
"""
fcoords = self._s.frac_coords
forcepf = 2.0 * self._sqrt_eta / sqrt(pi)
coords = self._coords
numsites = self._s.num_sites
ereal = np.empty((numsites, numsites), dtype=np.float_)
forces = np.zeros((numsites, 3), dtype=np.float_)
qs = np.array(self._oxi_states)
epoint = -(qs ** 2) * sqrt(self._eta / pi)
for i in range(numsites):
nfcoords, rij, js, _ = self._s.lattice.get_points_in_sphere(
fcoords, coords[i], self._rmax, zip_results=False
)
# remove the rii term
inds = rij > 1e-8
js = js[inds]
rij = rij[inds]
nfcoords = nfcoords[inds]
qi = qs[i]
qj = qs[js]
erfcval = erfc(self._sqrt_eta * rij)
new_ereals = erfcval * qi * qj / rij
# insert new_ereals
for k in range(numsites):
ereal[k, i] = np.sum(new_ereals[js == k])
if self._compute_forces:
nccoords = self._s.lattice.get_cartesian_coords(nfcoords)
fijpf = qj / rij ** 3 * (erfcval + forcepf * rij * np.exp(-self._eta * rij ** 2))
forces[i] += np.sum(
np.expand_dims(fijpf, 1) * (np.array([coords[i]]) - nccoords) * qi * EwaldSummation.CONV_FACT,
axis=0,
)
ereal *= 0.5 * EwaldSummation.CONV_FACT
epoint *= EwaldSummation.CONV_FACT
return ereal, epoint, forces
@property
def eta(self):
"""
Returns: eta value used in Ewald summation.
"""
return self._eta
def __str__(self):
if self._compute_forces:
output = [
"Real = " + str(self.real_space_energy),
"Reciprocal = " + str(self.reciprocal_space_energy),
"Point = " + str(self.point_energy),
"Total = " + str(self.total_energy),
"Forces:\n" + str(self.forces),
]
else:
output = [
"Real = " + str(self.real_space_energy),
"Reciprocal = " + str(self.reciprocal_space_energy),
"Point = " + str(self.point_energy),
"Total = " + str(self.total_energy),
"Forces were not computed",
]
return "\n".join(output)
def as_dict(self, verbosity: int = 0) -> Dict:
"""
Json-serialization dict representation of EwaldSummation.
Args:
verbosity (int): Verbosity level. Default of 0 only includes the
matrix representation. Set to 1 for more details.
"""
d = {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"structure": self._s.as_dict(),
"compute_forces": self._compute_forces,
"eta": self._eta,
"acc_factor": self._acc_factor,
"real_space_cut": self._rmax,
"recip_space_cut": self._gmax,
"_recip": None if self._recip is None else self._recip.tolist(),
"_real": None if self._real is None else self._real.tolist(),
"_point": None if self._point is None else self._point.tolist(),
"_forces": None if self._forces is None else self._forces.tolist(),
}
return d
@classmethod
def from_dict(cls, d: Dict, fmt: str = None, **kwargs):
"""
Create an EwaldSummation instance from json serialized dictionary.
"""
summation = cls(
structure=Structure.from_dict(d["structure"]),
real_space_cut=d["real_space_cut"],
recip_space_cut=d["recip_space_cut"],
eta=d["eta"],
acc_factor=d["acc_factor"],
compute_forces=d["compute_forces"],
)
# set previously computed private attributes
if d["_recip"] is not None:
summation._recip = np.array(d["_recip"])
summation._real = np.array(d["_real"])
summation._point = np.array(d["_point"])
summation._forces = np.array(d["_forces"])
summation._initialized = True
return summation
class EwaldMinimizer:
"""
This class determines the manipulations that will minimize an ewald matrix,
given a list of possible manipulations. This class does not perform the
manipulations on a structure, but will return the list of manipulations
that should be done on one to produce the minimal structure. It returns the
manipulations for the n lowest energy orderings. This class should be used
to perform fractional species substitution or fractional species removal to
produce a new structure. These manipulations create large numbers of
candidate structures, and this class can be used to pick out those with the
lowest ewald sum.
An alternative (possibly more intuitive) interface to this class is the
order disordered structure transformation.
Author - Will Richards
"""
ALGO_FAST = 0
ALGO_COMPLETE = 1
ALGO_BEST_FIRST = 2
"""
ALGO_TIME_LIMIT: Slowly increases the speed (with the cost of decreasing
accuracy) as the minimizer runs. Attempts to limit the run time to
approximately 30 minutes.
"""
ALGO_TIME_LIMIT = 3
def __init__(self, matrix, m_list, num_to_return=1, algo=ALGO_FAST):
"""
Args:
matrix: A matrix of the ewald sum interaction energies. This is stored
in the class as a diagonally symmetric array and so
self._matrix will not be the same as the input matrix.
m_list: list of manipulations. each item is of the form
(multiplication fraction, number_of_indices, indices, species)
These are sorted such that the first manipulation contains the
most permutations. this is actually evaluated last in the
recursion since I'm using pop.
num_to_return: The minimizer will find the number_returned lowest
energy structures. This is likely to return a number of duplicate
structures so it may be necessary to overestimate and then
remove the duplicates later. (duplicate checking in this
process is extremely expensive)
"""
# Setup and checking of inputs
self._matrix = copy(matrix)
# Make the matrix diagonally symmetric (so matrix[i,:] == matrix[:,j])
for i in range(len(self._matrix)):
for j in range(i, len(self._matrix)):
value = (self._matrix[i, j] + self._matrix[j, i]) / 2
self._matrix[i, j] = value
self._matrix[j, i] = value
# sort the m_list based on number of permutations
self._m_list = sorted(m_list, key=lambda x: comb(len(x[2]), x[1]), reverse=True)
for mlist in self._m_list:
if mlist[0] > 1:
raise ValueError("multiplication fractions must be <= 1")
self._current_minimum = float("inf")
self._num_to_return = num_to_return
self._algo = algo
if algo == EwaldMinimizer.ALGO_COMPLETE:
raise NotImplementedError("Complete algo not yet implemented for " "EwaldMinimizer")
self._output_lists = []
# Tag that the recurse function looks at at each level. If a method
# sets this to true it breaks the recursion and stops the search.
self._finished = False
self._start_time = datetime.utcnow()
self.minimize_matrix()
self._best_m_list = self._output_lists[0][1]
self._minimized_sum = self._output_lists[0][0]
def minimize_matrix(self):
"""
This method finds and returns the permutations that produce the lowest
ewald sum calls recursive function to iterate through permutations
"""
if self._algo == EwaldMinimizer.ALGO_FAST or self._algo == EwaldMinimizer.ALGO_BEST_FIRST:
return self._recurse(self._matrix, self._m_list, set(range(len(self._matrix))))
return None
def add_m_list(self, matrix_sum, m_list):
"""
This adds an m_list to the output_lists and updates the current
minimum if the list is full.
"""
if self._output_lists is None:
self._output_lists = [[matrix_sum, m_list]]
else:
bisect.insort(self._output_lists, [matrix_sum, m_list])
if self._algo == EwaldMinimizer.ALGO_BEST_FIRST and len(self._output_lists) == self._num_to_return:
self._finished = True
if len(self._output_lists) > self._num_to_return:
self._output_lists.pop()
if len(self._output_lists) == self._num_to_return:
self._current_minimum = self._output_lists[-1][0]
def best_case(self, matrix, m_list, indices_left):
"""
Computes a best case given a matrix and manipulation list.
Args:
matrix: the current matrix (with some permutations already
performed)
m_list: [(multiplication fraction, number_of_indices, indices,
species)] describing the manipulation
indices: Set of indices which haven't had a permutation
performed on them.
"""
m_indices = []
fraction_list = []
for m in m_list:
m_indices.extend(m[2])
fraction_list.extend([m[0]] * m[1])
indices = list(indices_left.intersection(m_indices))
interaction_matrix = matrix[indices, :][:, indices]
fractions = np.zeros(len(interaction_matrix)) + 1
fractions[: len(fraction_list)] = fraction_list
fractions = np.sort(fractions)
# Sum associated with each index (disregarding interactions between
# indices)
sums = 2 * np.sum(matrix[indices], axis=1)
sums = np.sort(sums)
# Interaction corrections. Can be reduced to (1-x)(1-y) for x,y in
# fractions each element in a column gets multiplied by (1-x), and then
# the sum of the columns gets multiplied by (1-y) since fractions are
# less than 1, there is no effect of one choice on the other
step1 = np.sort(interaction_matrix) * (1 - fractions)
step2 = np.sort(np.sum(step1, axis=1))
step3 = step2 * (1 - fractions)
interaction_correction = np.sum(step3)
if self._algo == self.ALGO_TIME_LIMIT:
elapsed_time = datetime.utcnow() - self._start_time
speedup_parameter = elapsed_time.total_seconds() / 1800
avg_int = np.sum(interaction_matrix, axis=None)
avg_frac = np.average(np.outer(1 - fractions, 1 - fractions))
average_correction = avg_int * avg_frac
interaction_correction = average_correction * speedup_parameter + interaction_correction * (
1 - speedup_parameter
)
best_case = np.sum(matrix) + np.inner(sums[::-1], fractions - 1) + interaction_correction
return best_case
@classmethod
def get_next_index(cls, matrix, manipulation, indices_left):
"""
Returns an index that should have the most negative effect on the
matrix sum
"""
# pylint: disable=E1126
f = manipulation[0]
indices = list(indices_left.intersection(manipulation[2]))
sums = np.sum(matrix[indices], axis=1)
if f < 1:
next_index = indices[sums.argmax(axis=0)]
else:
next_index = indices[sums.argmin(axis=0)]
return next_index
def _recurse(self, matrix, m_list, indices, output_m_list=[]):
"""
This method recursively finds the minimal permutations using a binary
tree search strategy.
Args:
matrix: The current matrix (with some permutations already
performed).
m_list: The list of permutations still to be performed
indices: Set of indices which haven't had a permutation
performed on them.
"""
# check to see if we've found all the solutions that we need
if self._finished:
return
# if we're done with the current manipulation, pop it off.
while m_list[-1][1] == 0:
m_list = copy(m_list)
m_list.pop()
# if there are no more manipulations left to do check the value
if not m_list:
matrix_sum = np.sum(matrix)
if matrix_sum < self._current_minimum:
self.add_m_list(matrix_sum, output_m_list)
return
# if we wont have enough indices left, return
if m_list[-1][1] > len(indices.intersection(m_list[-1][2])):
return
if len(m_list) == 1 or m_list[-1][1] > 1:
if self.best_case(matrix, m_list, indices) > self._current_minimum:
return
index = self.get_next_index(matrix, m_list[-1], indices)
m_list[-1][2].remove(index)
# Make the matrix and new m_list where we do the manipulation to the
# index that we just got
matrix2 = np.copy(matrix)
m_list2 = deepcopy(m_list)
output_m_list2 = copy(output_m_list)
matrix2[index, :] *= m_list[-1][0]
matrix2[:, index] *= m_list[-1][0]
output_m_list2.append([index, m_list[-1][3]])
indices2 = copy(indices)
indices2.remove(index)
m_list2[-1][1] -= 1
# recurse through both the modified and unmodified matrices
self._recurse(matrix2, m_list2, indices2, output_m_list2)
self._recurse(matrix, m_list, indices, output_m_list)
@property
def best_m_list(self):
"""
Returns: Best m_list found.
"""
return self._best_m_list
@property
def minimized_sum(self):
"""
Returns: Minimized sum
"""
return self._minimized_sum
@property
def output_lists(self):
"""
Returns: output lists.
"""
return self._output_lists
def compute_average_oxidation_state(site):
"""
Calculates the average oxidation state of a site
Args:
site: Site to compute average oxidation state
Returns:
Average oxidation state of site.
"""
try:
avg_oxi = sum([sp.oxi_state * occu for sp, occu in site.species.items() if sp is not None])
return avg_oxi
except AttributeError:
pass
try:
return site.charge
except AttributeError:
raise ValueError(
"Ewald summation can only be performed on structures "
"that are either oxidation state decorated or have "
"site charges."
)
|
gmatteo/pymatgen
|
pymatgen/analysis/ewald.py
|
Python
|
mit
| 28,207
|
[
"GULP",
"pymatgen"
] |
ee2e6e739805361d7039113f62a79ddffc2ea4da531d0c709fd0a0bc1356f9d6
|
#!/usr/bin/env python
"""
This example demonstrates how to use boolean combinations of implicit
functions to create a model of an ice cream cone.
"""
import vtk
def main():
colors = vtk.vtkNamedColors()
# Create implicit function primitives. These have been carefully placed to
# give the effect that we want. We are going to use various combinations of
# these functions to create the shape we want for example, we use planes
# intersected with a cone (which is infinite in extent) to get a finite
# cone.
#
cone = vtk.vtkCone()
cone.SetAngle(20)
vertPlane = vtk.vtkPlane()
vertPlane.SetOrigin(.1, 0, 0)
vertPlane.SetNormal(-1, 0, 0)
basePlane = vtk.vtkPlane()
basePlane.SetOrigin(1.2, 0, 0)
basePlane.SetNormal(1, 0, 0)
iceCream = vtk.vtkSphere()
iceCream.SetCenter(1.333, 0, 0)
iceCream.SetRadius(0.5)
bite = vtk.vtkSphere()
bite.SetCenter(1.5, 0, 0.5)
bite.SetRadius(0.25)
# Combine primitives to build ice-cream cone. Clip the cone with planes.
theCone = vtk.vtkImplicitBoolean()
theCone.SetOperationTypeToIntersection()
theCone.AddFunction(cone)
theCone.AddFunction(vertPlane)
theCone.AddFunction(basePlane)
# Take a bite out of the ice cream.
theCream = vtk.vtkImplicitBoolean()
theCream.SetOperationTypeToDifference()
theCream.AddFunction(iceCream)
theCream.AddFunction(bite)
# The sample function generates a distance function from the
# implicit function (which in this case is the cone). This is
# then contoured to get a polygonal surface.
#
theConeSample = vtk.vtkSampleFunction()
theConeSample.SetImplicitFunction(theCone)
theConeSample.SetModelBounds(-1, 1.5, -1.25, 1.25, -1.25, 1.25)
theConeSample.SetSampleDimensions(128, 128, 128)
theConeSample.ComputeNormalsOff()
theConeSurface = vtk.vtkContourFilter()
theConeSurface.SetInputConnection(theConeSample.GetOutputPort())
theConeSurface.SetValue(0, 0.0)
coneMapper = vtk.vtkPolyDataMapper()
coneMapper.SetInputConnection(theConeSurface.GetOutputPort())
coneMapper.ScalarVisibilityOff()
coneActor = vtk.vtkActor()
coneActor.SetMapper(coneMapper)
coneActor.GetProperty().SetColor(colors.GetColor3d("chocolate"))
# The same here for the ice cream.
#
theCreamSample = vtk.vtkSampleFunction()
theCreamSample.SetImplicitFunction(theCream)
theCreamSample.SetModelBounds(0, 2.5, -1.25, 1.25, -1.25, 1.25)
theCreamSample.SetSampleDimensions(128, 128, 128)
theCreamSample.ComputeNormalsOff()
theCreamSurface = vtk.vtkContourFilter()
theCreamSurface.SetInputConnection(theCreamSample.GetOutputPort())
theCreamSurface.SetValue(0, 0.0)
creamMapper = vtk.vtkPolyDataMapper()
creamMapper.SetInputConnection(theCreamSurface.GetOutputPort())
creamMapper.ScalarVisibilityOff()
creamActor = vtk.vtkActor()
creamActor.SetMapper(creamMapper)
creamActor.GetProperty().SetDiffuseColor(colors.GetColor3d("mint"))
creamActor.GetProperty().SetSpecular(.6)
creamActor.GetProperty().SetSpecularPower(50)
# Create the usual rendering stuff.
#
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Add the actors to the renderer, set the background and size.
#
ren1.AddActor(coneActor)
ren1.AddActor(creamActor)
ren1.SetBackground(colors.GetColor3d("SlateGray"))
renWin.SetSize(640, 480)
ren1.ResetCamera()
ren1.GetActiveCamera().Roll(90)
ren1.GetActiveCamera().Dolly(1.25)
ren1.ResetCameraClippingRange()
iren.Initialize()
# render the image
#
renWin.Render()
iren.Start()
if __name__ == '__main__':
main()
|
lorensen/VTKExamples
|
src/Python/VisualizationAlgorithms/IceCream.py
|
Python
|
apache-2.0
| 3,833
|
[
"VTK"
] |
2f4ad9322cf97300198f280c15e770181a77108ac58aae6b9d0cb8c7259d8bdc
|
from __future__ import annotations
import os
def parse_integrate_lp_updates(filename):
"""Parse the integrate.lp file to get the values for any updated
parameters."""
if not os.path.split(filename)[-1] == "INTEGRATE.LP":
raise RuntimeError("input filename not INTEGRATE.LP")
with open(filename) as fh:
file_contents = fh.readlines()
updates = {}
for i, content in enumerate(file_contents):
if " ***** SUGGESTED VALUES FOR INPUT PARAMETERS *****" in content:
beam_parms = file_contents[i + 1].replace("=", "").split()
reflecting_parms = file_contents[i + 2].replace("=", "").split()
updates[beam_parms[0]] = float(beam_parms[1])
updates[beam_parms[2]] = float(beam_parms[3])
updates[reflecting_parms[0]] = float(reflecting_parms[1])
updates[reflecting_parms[2]] = float(reflecting_parms[3])
return updates
def parse_integrate_lp(filename):
"""Parse the contents of the INTEGRATE.LP file pointed to by filename."""
if not os.path.split(filename)[-1] == "INTEGRATE.LP":
raise RuntimeError("input filename not INTEGRATE.LP")
with open(filename) as fh:
file_contents = fh.readlines()
per_image_stats = {}
oscillation_range = 0.0
block_images = []
for i, content in enumerate(file_contents):
# check for the header contents - this is basically a duplicate
# of the input data....
if "OSCILLATION_RANGE=" in content:
oscillation_range = float(content.split()[1])
if "PROCESSING OF IMAGES" in content:
lst = content.split()
block_images = list(range(int(lst[3]), int(lst[5]) + 1))
# look for explicitly per-image information
if "IMAGE IER SCALE" in content:
j = i + 1
while file_contents[j].strip():
line = file_contents[j]
assert len(line) == 71, len(line)
image = int(line[0:6])
status = int(line[6:10])
scale = float(line[10:17])
overloads = int(line[26:31])
all = int(line[31:38])
strong = int(line[38:46])
rejected = int(line[46:52])
if status == 0:
# trap e.g. missing images - need to be able to
# record this somewhere...
if all:
fraction_weak = 1.0 - (float(strong) / float(all))
else:
fraction_weak = 1.0
per_image_stats[image] = {
"scale": scale,
"overloads": overloads,
"strong": strong,
"all": all,
"fraction_weak": fraction_weak,
"rejected": rejected,
}
else:
block_images.remove(image)
j += 1
# then look for per-block information
if "CRYSTAL MOSAICITY (DEGREES)" in content:
mosaic = float(content.split()[3])
for image in block_images:
per_image_stats[image]["mosaic"] = mosaic
if "OF SPOT POSITION (PIXELS)" in content:
rmsd_pixel = float(content.split()[-1])
for image in block_images:
per_image_stats[image]["rmsd_pixel"] = rmsd_pixel
if "UNIT CELL PARAMETERS" in content:
unit_cell = tuple(map(float, content.split()[-6:]))
for image in block_images:
per_image_stats[image]["unit_cell"] = unit_cell
if "OF SPINDLE POSITION (DEGREES)" in content:
rmsd_phi = float(content.split()[-1])
for image in block_images:
per_image_stats[image]["rmsd_phi"] = rmsd_phi / oscillation_range
# want to convert this to mm in some standard setting!
if "DETECTOR COORDINATES (PIXELS) OF DIRECT BEAM" in content:
beam = list(map(float, content.split()[-2:]))
for image in block_images:
per_image_stats[image]["beam"] = beam
if "CRYSTAL TO DETECTOR DISTANCE (mm)" in content:
distance = float(content.split()[-1])
for image in block_images:
per_image_stats[image]["distance"] = distance
return per_image_stats
|
xia2/xia2
|
src/xia2/Wrappers/XDS/XDSIntegrateHelpers.py
|
Python
|
bsd-3-clause
| 4,449
|
[
"CRYSTAL"
] |
0300140978575fac6907b4cadb08c42973f5be3e8efec58c1f93a86fc04ffdd9
|
#! /usr/bin/python
"""Copyright 2011 Phidgets Inc.
This work is licensed under the Creative Commons Attribution 2.5 Canada License.
To view a copy of this license, visit http://creativecommons.org/licenses/by/2.5/ca/
"""
__author__="Adam Stelmack"
__version__="2.1.8"
__date__ ="13-Jan-2011 4:28:27 PM"
#Basic imports
import sys
from time import sleep
#Phidget specific imports
from Phidgets.PhidgetException import PhidgetException
from Phidgets.Devices.Analog import Analog
from Phidgets.Phidget import PhidgetLogLevel
#Create an accelerometer object
try:
analog = Analog()
except RuntimeError as e:
print("Runtime Exception: %s" % e.details)
print("Exiting....")
exit(1)
#Information Display Function
def displayDeviceInfo():
print("|------------|----------------------------------|--------------|------------|")
print("|- Attached -|- Type -|- Serial No. -|- Version -|")
print("|------------|----------------------------------|--------------|------------|")
print("|- %8s -|- %30s -|- %10d -|- %8d -|" % (analog.isAttached(), analog.getDeviceName(), analog.getSerialNum(), analog.getDeviceVersion()))
print("|------------|----------------------------------|--------------|------------|")
print("Number of analog outputs: %i" % (analog.getOutputCount()))
print("Maximum output voltage: %d" % (analog.getVoltageMax(0)))
print("Minimum output voltage: %d" % (analog.getVoltageMin(0)))
#Event Handler Callback Functions
def AnalogAttached(e):
attached = e.device
print("Analog %i Attached!" % (attached.getSerialNum()))
def AnalogDetached(e):
detached = e.device
print("Analog %i Detached!" % (detached.getSerialNum()))
def AnalogError(e):
try:
source = e.device
print("Analog %i: Phidget Error %i: %s" % (source.getSerialNum(), e.eCode, e.description))
except PhidgetException as e:
print("Phidget Exception %i: %s" % (e.code, e.details))
#Main Program Code
try:
#logging example, uncomment to generate a log file
#analog.enableLogging(PhidgetLogLevel.PHIDGET_LOG_VERBOSE, "phidgetlog.log")
analog.setOnAttachHandler(AnalogAttached)
analog.setOnDetachHandler(AnalogDetached)
analog.setOnErrorhandler(AnalogError)
except PhidgetException as e:
print("Phidget Exception %i: %s" % (e.code, e.details))
print("Exiting....")
exit(1)
print("Opening phidget object....")
try:
analog.openPhidget()
except PhidgetException as e:
print("Phidget Exception %i: %s" % (e.code, e.details))
print("Exiting....")
exit(1)
print("Waiting for attach....")
try:
analog.waitForAttach(10000)
except PhidgetException as e:
print("Phidget Exception %i: %s" % (e.code, e.details))
try:
analog.closePhidget()
except PhidgetException as e:
print("Phidget Exception %i: %s" % (e.code, e.details))
print("Exiting....")
exit(1)
print("Exiting....")
exit(1)
else:
displayDeviceInfo()
try:
print("Enabling Analog output channel 0...")
analog.setEnabled(0, True)
sleep(5)
print("Set analog output voltage to +5.00V...")
analog.setVoltage(0, 5.00)
sleep(5)
print("Set analog output voltage to -5.00V...")
analog.setVoltage(0, -5.00)
sleep(5)
print("Set analog output voltage to +0.00V...")
analog.setVoltage(0, 0.00)
sleep(5)
print("Disabling Analog output channel 0...")
analog.setEnabled(0, False)
except PhidgetException as e:
print("Phidget Exception %i: %s" % (e.code, e.details))
print("Exiting....")
exit(1)
print("Press Enter to quit....")
chr = sys.stdin.read(1)
print("Closing...")
try:
analog.closePhidget()
except PhidgetException as e:
print("Phidget Exception %i: %s" % (e.code, e.details))
print("Exiting....")
exit(1)
print("Done.")
exit(0)
|
danielsuo/mobot
|
src/move/Python/Analog-simple.py
|
Python
|
mit
| 3,861
|
[
"VisIt"
] |
c31a7eed8b9b06449b795c07e8c1a20d66ef1aec3d45a7531d9419ef7e2a00f3
|
"""Perform validation of final calls against known reference materials.
Automates the process of checking pipeline results against known valid calls
to identify discordant variants. This provides a baseline for ensuring the
validity of pipeline updates and algorithm changes.
"""
import collections
import contextlib
import csv
import hashlib
import os
import shutil
import subprocess
import time
from pysam import VariantFile
import toolz as tz
import yaml
from bcbio import broad, utils
from bcbio.bam import callable
from bcbio.cwl import cwlutils
from bcbio.distributed.transaction import file_transaction
from bcbio.heterogeneity import bubbletree
from bcbio.pipeline import config_utils, shared
from bcbio.pipeline import datadict as dd
from bcbio.provenance import do
from bcbio.variation import annotation, bedutils, validateplot, vcfutils, multi, naming
# ## Individual sample comparisons
def _get_validate(data):
"""Retrieve items to validate, from single samples or from combined joint calls.
"""
if data.get("vrn_file") and tz.get_in(["config", "algorithm", "validate"], data):
return utils.deepish_copy(data)
elif "group_orig" in data:
for sub in multi.get_orig_items(data):
if "validate" in sub["config"]["algorithm"]:
sub_val = utils.deepish_copy(sub)
sub_val["vrn_file"] = data["vrn_file"]
return sub_val
return None
def normalize_input_path(x, data):
"""Normalize path for input files, handling relative paths.
Looks for non-absolute paths in local and fastq directories
"""
if x is None:
return None
elif os.path.isabs(x):
return os.path.normpath(x)
else:
for d in [data["dirs"].get("fastq"), data["dirs"].get("work")]:
if d:
cur_x = os.path.normpath(os.path.join(d, x))
if os.path.exists(cur_x):
return cur_x
raise IOError("Could not find validation file %s" % x)
def _gunzip(f, data):
if f is None:
return None
elif f.endswith(".gz"):
out_file = f.replace(".gz", "")
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = "gunzip -c {f} > {tx_out_file}"
do.run(cmd.format(**locals()), "gunzip input file")
return out_file
else:
return f
def _get_caller(data):
callers = [tz.get_in(["config", "algorithm", "jointcaller"], data),
tz.get_in(["config", "algorithm", "variantcaller"], data),
"precalled"]
return [c for c in callers if c][0]
def _get_caller_supplement(caller, data):
"""Some callers like MuTect incorporate a second caller for indels.
"""
if caller == "mutect":
icaller = tz.get_in(["config", "algorithm", "indelcaller"], data)
if icaller:
caller = "%s/%s" % (caller, icaller)
return caller
def _normalize_cwl_inputs(items):
"""Extract variation and validation data from CWL input list of batched samples.
"""
with_validate = {}
vrn_files = []
ready_items = []
for data in (cwlutils.normalize_missing(utils.to_single_data(d)) for d in items):
if tz.get_in(["config", "algorithm", "validate"], data):
with_validate[_checksum(tz.get_in(["config", "algorithm", "validate"], data))] = data
if data.get("vrn_file"):
vrn_files.append(data["vrn_file"])
ready_items.append(data)
if len(with_validate) == 0:
return ready_items[0]
else:
assert len(with_validate) == 1, len(with_validate)
assert len(set(vrn_files)) == 1
data = with_validate.values()[0]
data["vrn_file"] = vrn_files[0]
return data
def _checksum(in_file, block_size=65536):
"""sha256 checksum, thanks to: https://gist.github.com/rji/b38c7238128edf53a181
"""
cs = hashlib.sha256()
with open(in_file, "rb") as f:
for block in iter(lambda: f.read(block_size), b''):
cs.update(block)
return cs.hexdigest()
def compare_to_rm(data):
"""Compare final variant calls against reference materials of known calls.
"""
if isinstance(data, (list, tuple)):
data = _normalize_cwl_inputs(data)
toval_data = _get_validate(data)
toval_data = cwlutils.unpack_tarballs(toval_data, toval_data)
if toval_data:
caller = _get_caller(toval_data)
sample = dd.get_sample_name(toval_data)
base_dir = utils.safe_makedir(os.path.join(toval_data["dirs"]["work"], "validate", sample, caller))
if isinstance(toval_data["vrn_file"], (list, tuple)):
raise NotImplementedError("Multiple input files for validation: %s" % toval_data["vrn_file"])
else:
vrn_file = os.path.abspath(toval_data["vrn_file"])
rm_file = normalize_input_path(toval_data["config"]["algorithm"]["validate"], toval_data)
rm_interval_file = _gunzip(normalize_input_path(toval_data["config"]["algorithm"].get("validate_regions"),
toval_data),
toval_data)
rm_interval_file = bedutils.clean_file(rm_interval_file, toval_data, prefix="validateregions-",
bedprep_dir=utils.safe_makedir(os.path.join(base_dir, "bedprep")))
rm_file = naming.handle_synonyms(rm_file, dd.get_ref_file(toval_data), data.get("genome_build"),
base_dir, data)
rm_interval_file = (naming.handle_synonyms(rm_interval_file, dd.get_ref_file(toval_data),
data.get("genome_build"), base_dir, data)
if rm_interval_file else None)
vmethod = tz.get_in(["config", "algorithm", "validate_method"], data, "rtg")
if not vcfutils.vcf_has_variants(vrn_file):
# RTG can fail on totally empty files. Skip these since we have nothing.
pass
# empty validation file, every call is a false positive
elif not vcfutils.vcf_has_variants(rm_file):
eval_files = _setup_call_fps(vrn_file, rm_interval_file, base_dir, toval_data)
data["validate"] = _rtg_add_summary_file(eval_files, base_dir, toval_data)
elif vmethod == "rtg":
eval_files = _run_rtg_eval(vrn_file, rm_file, rm_interval_file, base_dir, toval_data)
eval_files = _annotate_validations(eval_files, toval_data)
data["validate"] = _rtg_add_summary_file(eval_files, base_dir, toval_data)
elif vmethod == "hap.py":
data["validate"] = _run_happy_eval(vrn_file, rm_file, rm_interval_file, base_dir, toval_data)
elif vmethod == "bcbio.variation":
data["validate"] = _run_bcbio_variation(vrn_file, rm_file, rm_interval_file, base_dir,
sample, caller, toval_data)
return [[data]]
def _annotate_validations(eval_files, data):
"""Add annotations about potential problem regions to validation VCFs.
"""
for key in ["tp", "tp-calls", "fp", "fn"]:
if eval_files.get(key):
eval_files[key] = annotation.add_genome_context(eval_files[key], data)
return eval_files
# ## Empty truth sets
def _setup_call_fps(vrn_file, rm_bed, base_dir, data):
"""Create set of false positives for inputs with empty truth sets.
"""
out_file = os.path.join(base_dir, "fp.vcf.gz")
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = ("bcftools view -R {rm_bed} -f 'PASS,.' {vrn_file} -O z -o {tx_out_file}")
do.run(cmd.format(**locals()), "Prepare false positives with empty reference", data)
return {"fp": out_file}
# ## Real Time Genomics vcfeval
def _get_sample_and_caller(data):
return [tz.get_in(["metadata", "validate_sample"], data) or dd.get_sample_name(data),
_get_caller_supplement(_get_caller(data), data)]
def _rtg_add_summary_file(eval_files, base_dir, data):
"""Parse output TP FP and FN files to generate metrics for plotting.
"""
out_file = os.path.join(base_dir, "validate-summary.csv")
if not utils.file_uptodate(out_file, eval_files.get("tp", eval_files["fp"])):
with file_transaction(data, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
writer = csv.writer(out_handle)
writer.writerow(["sample", "caller", "vtype", "metric", "value"])
base = _get_sample_and_caller(data)
for metric in ["tp", "fp", "fn"]:
for vtype, bcftools_types in [("SNPs", "--types snps"),
("Indels", "--exclude-types snps")]:
in_file = eval_files.get(metric)
if in_file and os.path.exists(in_file):
cmd = ("bcftools view {bcftools_types} {in_file} | grep -v ^# | wc -l")
count = int(subprocess.check_output(cmd.format(**locals()), shell=True))
else:
count = 0
writer.writerow(base + [vtype, metric, count])
eval_files["summary"] = out_file
return eval_files
def _prepare_inputs(vrn_file, rm_file, rm_interval_file, base_dir, data):
"""Prepare input VCF and BED files for validation.
"""
if not rm_file.endswith(".vcf.gz") or not os.path.exists(rm_file + ".tbi"):
rm_file = vcfutils.bgzip_and_index(rm_file, data["config"], out_dir=base_dir)
if len(vcfutils.get_samples(vrn_file)) > 1:
base = utils.splitext_plus(os.path.basename(vrn_file))[0]
sample_file = os.path.join(base_dir, "%s-%s.vcf.gz" % (base, dd.get_sample_name(data)))
vrn_file = vcfutils.select_sample(vrn_file, dd.get_sample_name(data), sample_file, data["config"])
# rtg fails on bgzipped VCFs produced by GatherVcfs so we re-prep them
else:
vrn_file = vcfutils.bgzip_and_index(vrn_file, data["config"], out_dir=base_dir)
interval_bed = _get_merged_intervals(rm_interval_file, vrn_file, base_dir, data)
return vrn_file, rm_file, interval_bed
def _run_rtg_eval(vrn_file, rm_file, rm_interval_file, base_dir, data):
"""Run evaluation of a caller against the truth set using rtg vcfeval.
"""
out_dir = os.path.join(base_dir, "rtg")
if not utils.file_exists(os.path.join(out_dir, "done")):
if os.path.exists(out_dir):
shutil.rmtree(out_dir)
vrn_file, rm_file, interval_bed = _prepare_inputs(vrn_file, rm_file, rm_interval_file, base_dir, data)
rtg_ref = tz.get_in(["reference", "rtg"], data)
assert rtg_ref and os.path.exists(rtg_ref), ("Did not find rtg indexed reference file for validation:\n%s\n"
"Run bcbio_nextgen.py upgrade --data --aligners rtg" % rtg_ref)
# handle CWL where we have a reference to a single file in the RTG directory
if os.path.isfile(rtg_ref):
rtg_ref = os.path.dirname(rtg_ref)
# get core and memory usage from standard configuration
threads = min(dd.get_num_cores(data), 6)
resources = config_utils.get_resources("rtg", data["config"])
memory = config_utils.adjust_opts(resources.get("jvm_opts", ["-Xms500m", "-Xmx1500m"]),
{"algorithm": {"memory_adjust": {"magnitude": threads,
"direction": "increase"}}})
jvm_stack = [x for x in memory if x.startswith("-Xms")]
jvm_mem = [x for x in memory if x.startswith("-Xmx")]
jvm_stack = jvm_stack[0] if len(jvm_stack) > 0 else "-Xms500m"
jvm_mem = jvm_mem[0].replace("-Xmx", "") if len(jvm_mem) > 0 else "3g"
cmd = ["rtg", "vcfeval", "--threads", str(threads),
"-b", rm_file, "--bed-regions", interval_bed,
"-c", vrn_file, "-t", rtg_ref, "-o", out_dir]
rm_samples = vcfutils.get_samples(rm_file)
if len(rm_samples) > 1 and dd.get_sample_name(data) in rm_samples:
cmd += ["--sample=%s" % dd.get_sample_name(data)]
cmd += ["--vcf-score-field='%s'" % (_pick_best_quality_score(vrn_file))]
mem_export = "%s export RTG_JAVA_OPTS='%s' && export RTG_MEM=%s" % (utils.local_path_export(),
jvm_stack, jvm_mem)
cmd = mem_export + " && " + " ".join(cmd)
do.run(cmd, "Validate calls using rtg vcfeval", data)
out = {"fp": os.path.join(out_dir, "fp.vcf.gz"),
"fn": os.path.join(out_dir, "fn.vcf.gz")}
tp_calls = os.path.join(out_dir, "tp.vcf.gz")
tp_baseline = os.path.join(out_dir, "tp-baseline.vcf.gz")
if os.path.exists(tp_baseline):
out["tp"] = tp_baseline
out["tp-calls"] = tp_calls
else:
out["tp"] = tp_calls
return out
def _pick_best_quality_score(vrn_file):
"""Flexible quality score selection, picking the best available.
Implementation based on discussion:
https://github.com/chapmanb/bcbio-nextgen/commit/a538cecd86c0000d17d3f9d4f8ac9d2da04f9884#commitcomment-14539249
(RTG=AVR/GATK=VQSLOD/MuTect=t_lod_fstar, otherwise GQ, otherwise QUAL, otherwise DP.)
For MuTect, it's not clear how to get t_lod_fstar, the right quality score, into VCF cleanly.
MuTect2 has TLOD in the INFO field.
"""
# pysam fails on checking reference contigs if input is empty
if not vcfutils.vcf_has_variants(vrn_file):
return "DP"
to_check = 25
scores = collections.defaultdict(int)
try:
in_handle = VariantFile(vrn_file)
except ValueError:
raise ValueError("Failed to parse input file in preparation for validation: %s" % vrn_file)
with contextlib.closing(in_handle) as val_in:
for i, rec in enumerate(val_in):
if i > to_check:
break
if "VQSLOD" in rec.info and rec.info.get("VQSLOD") is not None:
scores["INFO=VQSLOD"] += 1
if "TLOD" in rec.info and rec.info.get("TLOD") is not None:
scores["INFO=TLOD"] += 1
for skey in ["AVR", "GQ", "DP"]:
if len(rec.samples) > 0 and rec.samples[0].get(skey) is not None:
scores[skey] += 1
if rec.qual:
scores["QUAL"] += 1
for key in ["AVR", "INFO=VQSLOD", "INFO=TLOD", "GQ", "QUAL", "DP"]:
if scores[key] > 0:
return key
raise ValueError("Did not find quality score for validation from %s" % vrn_file)
def _get_merged_intervals(rm_interval_file, vrn_file, base_dir, data):
"""Retrieve intervals to run validation on, merging reference and callable BED files.
"""
a_intervals = get_analysis_intervals(data, vrn_file, base_dir)
if a_intervals:
final_intervals = shared.remove_lcr_regions(a_intervals, [data])
if rm_interval_file:
caller = _get_caller(data)
sample = dd.get_sample_name(data)
combo_intervals = os.path.join(base_dir, "%s-%s-%s-wrm.bed" %
(utils.splitext_plus(os.path.basename(final_intervals))[0],
sample, caller))
if not utils.file_uptodate(combo_intervals, final_intervals):
with file_transaction(data, combo_intervals) as tx_out_file:
with utils.chdir(os.path.dirname(tx_out_file)):
# Copy files locally to avoid issues on shared filesystems
# where BEDtools has trouble accessing the same base
# files from multiple locations
a = os.path.basename(final_intervals)
b = os.path.basename(rm_interval_file)
try:
shutil.copyfile(final_intervals, a)
except IOError:
time.sleep(60)
shutil.copyfile(final_intervals, a)
try:
shutil.copyfile(rm_interval_file, b)
except IOError:
time.sleep(60)
shutil.copyfile(rm_interval_file, b)
cmd = ("bedtools intersect -nonamecheck -a {a} -b {b} > {tx_out_file}")
do.run(cmd.format(**locals()), "Intersect callable intervals for rtg vcfeval")
final_intervals = combo_intervals
else:
assert rm_interval_file, "No intervals to subset analysis with for %s" % vrn_file
final_intervals = shared.remove_lcr_regions(rm_interval_file, [data])
return final_intervals
def _callable_from_gvcf(data, vrn_file, out_dir):
"""Retrieve callable regions based on ref call regions in gVCF.
Uses https://github.com/lijiayong/gvcf_regions
"""
methods = {"freebayes": "freebayes", "platypus": "platypus",
"gatk-haplotype": "gatk"}
gvcf_type = methods.get(dd.get_variantcaller(data))
if gvcf_type:
out_file = os.path.join(out_dir, "%s-gcvf-coverage.bed" %
utils.splitext_plus(os.path.basename(vrn_file))[0])
if not utils.file_uptodate(out_file, vrn_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = ("gvcf_regions.py --gvcf_type {gvcf_type} {vrn_file} "
"| bedtools merge > {tx_out_file}")
do.run(cmd.format(**locals()), "Convert gVCF to BED file of callable regions")
return out_file
def get_analysis_intervals(data, vrn_file, base_dir):
"""Retrieve analysis regions for the current variant calling pipeline.
"""
if vrn_file and vcfutils.is_gvcf_file(vrn_file):
callable_bed = _callable_from_gvcf(data, vrn_file, base_dir)
if callable_bed:
return callable_bed
if data.get("ensemble_bed"):
return data["ensemble_bed"]
elif dd.get_sample_callable(data):
return dd.get_sample_callable(data)
elif data.get("align_bam"):
return callable.sample_callable_bed(data["align_bam"], dd.get_ref_file(data), data)[0]
elif data.get("work_bam"):
return callable.sample_callable_bed(data["work_bam"], dd.get_ref_file(data), data)[0]
elif data.get("work_bam_callable"):
data = utils.deepish_copy(data)
data["work_bam"] = data.pop("work_bam_callable")
return callable.sample_callable_bed(data["work_bam"], dd.get_ref_file(data), data)[0]
elif tz.get_in(["config", "algorithm", "callable_regions"], data):
return tz.get_in(["config", "algorithm", "callable_regions"], data)
elif tz.get_in(["config", "algorithm", "variant_regions"], data):
return tz.get_in(["config", "algorithm", "variant_regions"], data)
# ## hap.py
def _run_happy_eval(vrn_file, rm_file, rm_interval_file, base_dir, data):
"""Validation with hap.py: https://github.com/Illumina/hap.py
XXX Does not yet parse out metrics for plotting.
"""
out_dir = utils.safe_makedir(os.path.join(base_dir, "happy"))
out_prefix = os.path.join(out_dir, "val")
if not utils.file_exists(out_prefix + ".summary.csv"):
vrn_file, rm_file, interval_bed = _prepare_inputs(vrn_file, rm_file, rm_interval_file, base_dir, data)
cmd = ["hap.py", "-V", "-f", interval_bed, "-r", dd.get_ref_file(data),
"-l", ",".join(_get_location_list(interval_bed)),
"-o", out_prefix, rm_file, vrn_file]
do.run(cmd, "Validate calls using hap.py", data)
return {"vcf": out_prefix + ".vcf.gz"}
def _get_location_list(interval_bed):
"""Retrieve list of locations to analyze from input BED file.
"""
import pybedtools
regions = collections.OrderedDict()
for region in pybedtools.BedTool(interval_bed):
regions[str(region.chrom)] = None
return regions.keys()
# ## bcbio.variation comparison -- deprecated approach
def _run_bcbio_variation(vrn_file, rm_file, rm_interval_file, base_dir, sample, caller, data):
"""Run validation of a caller against the truth set using bcbio.variation.
"""
val_config_file = _create_validate_config_file(vrn_file, rm_file, rm_interval_file,
base_dir, data)
work_dir = os.path.join(base_dir, "work")
out = {"summary": os.path.join(work_dir, "validate-summary.csv"),
"grading": os.path.join(work_dir, "validate-grading.yaml"),
"discordant": os.path.join(work_dir, "%s-eval-ref-discordance-annotate.vcf" % sample)}
if not utils.file_exists(out["discordant"]) or not utils.file_exists(out["grading"]):
bcbio_variation_comparison(val_config_file, base_dir, data)
out["concordant"] = filter(os.path.exists,
[os.path.join(work_dir, "%s-%s-concordance.vcf" % (sample, x))
for x in ["eval-ref", "ref-eval"]])[0]
return out
def bcbio_variation_comparison(config_file, base_dir, data):
"""Run a variant comparison using the bcbio.variation toolkit, given an input configuration.
"""
tmp_dir = utils.safe_makedir(os.path.join(base_dir, "tmp"))
resources = config_utils.get_resources("bcbio_variation", data["config"])
jvm_opts = resources.get("jvm_opts", ["-Xms750m", "-Xmx2g"])
cmd = ["bcbio-variation"] + jvm_opts + broad.get_default_jvm_opts(tmp_dir) + \
["variant-compare", config_file]
do.run(cmd, "Comparing variant calls using bcbio.variation", data)
def _create_validate_config_file(vrn_file, rm_file, rm_interval_file,
base_dir, data):
config_dir = utils.safe_makedir(os.path.join(base_dir, "config"))
config_file = os.path.join(config_dir, "validate.yaml")
if not utils.file_uptodate(config_file, vrn_file):
with file_transaction(data, config_file) as tx_config_file:
with open(tx_config_file, "w") as out_handle:
out = _create_validate_config(vrn_file, rm_file, rm_interval_file,
base_dir, data)
yaml.safe_dump(out, out_handle, default_flow_style=False, allow_unicode=False)
return config_file
def _create_validate_config(vrn_file, rm_file, rm_interval_file, base_dir, data):
"""Create a bcbio.variation configuration input for validation.
"""
ref_call = {"file": str(rm_file), "name": "ref", "type": "grading-ref",
"fix-sample-header": True, "remove-refcalls": True}
a_intervals = get_analysis_intervals(data, vrn_file, base_dir)
if a_intervals:
a_intervals = shared.remove_lcr_regions(a_intervals, [data])
if rm_interval_file:
ref_call["intervals"] = rm_interval_file
eval_call = {"file": vrn_file, "name": "eval", "remove-refcalls": True}
exp = {"sample": data["name"][-1],
"ref": dd.get_ref_file(data),
"approach": "grade",
"calls": [ref_call, eval_call]}
if a_intervals:
exp["intervals"] = os.path.abspath(a_intervals)
if data.get("align_bam"):
exp["align"] = data["align_bam"]
elif data.get("work_bam"):
exp["align"] = data["work_bam"]
return {"dir": {"base": base_dir, "out": "work", "prep": "work/prep"},
"experiments": [exp]}
# ## Summarize comparisons
def _flatten_grading(stats):
vtypes = ["snp", "indel"]
cat = "concordant"
for vtype in vtypes:
yield vtype, cat, stats[cat][cat].get(vtype, 0)
for vtype in vtypes:
for vclass, vitems in sorted(stats["discordant"].get(vtype, {}).items()):
for vreason, val in sorted(vitems.items()):
yield vtype, "discordant-%s-%s" % (vclass, vreason), val
yield vtype, "discordant-%s-total" % vclass, sum(vitems.itervalues())
def _has_grading_info(samples):
for data in samples:
if data.get("validate"):
return True
for variant in data.get("variants", []):
if variant.get("validate"):
return True
return False
def _group_validate_samples(samples):
extras = []
validated = collections.defaultdict(list)
for data in samples:
is_v = False
if data.get("validate"):
is_v = True
for variant in data.get("variants", []):
if variant.get("validate"):
is_v = True
if is_v:
for batch_key in (["metadata", "validate_batch"], ["metadata", "batch"],
["description"]):
vname = tz.get_in(batch_key, data)
if vname and not (isinstance(vname, basestring) and vname.lower() in ["none", "false"]):
break
if isinstance(vname, (list, tuple)):
vname = vname[0]
validated[vname].append(data)
else:
extras.append([data])
return validated, extras
def summarize_grading(samples):
"""Provide summaries of grading results across all samples.
Handles both traditional pipelines (validation part of variants) and CWL
pipelines (validation at top level)
"""
samples = list(utils.flatten(samples))
if not _has_grading_info(samples):
return [[d] for d in samples]
validate_dir = utils.safe_makedir(os.path.join(samples[0]["dirs"]["work"], "validate"))
header = ["sample", "caller", "variant.type", "category", "value"]
validated, out = _group_validate_samples(samples)
for vname, vitems in validated.items():
out_csv = os.path.join(validate_dir, "grading-summary-%s.csv" % vname)
with open(out_csv, "w") as out_handle:
writer = csv.writer(out_handle)
writer.writerow(header)
plot_data = []
plot_files = []
for data in sorted(vitems, key=lambda x: x.get("lane", dd.get_sample_name(x))):
validations = [variant.get("validate") for variant in data.get("variants", [])]
validations = [v for v in validations if v]
if len(validations) == 0 and "validate" in data:
validations = [data.get("validate")]
for validate in validations:
if validate:
validate["grading_summary"] = out_csv
if validate.get("grading"):
for row in _get_validate_plotdata_yaml(validate["grading"], data):
writer.writerow(row)
plot_data.append(row)
elif validate.get("summary") and not validate.get("summary") == "None":
if isinstance(validate["summary"], (list, tuple)):
plot_files.extend(list(set(validate["summary"])))
else:
plot_files.append(validate["summary"])
if plot_files:
plots = validateplot.classifyplot_from_plotfiles(plot_files, out_csv)
elif plot_data:
plots = validateplot.create(plot_data, header, 0, data["config"],
os.path.splitext(out_csv)[0])
else:
plots = []
for data in vitems:
if data.get("validate"):
data["validate"]["grading_plots"] = plots
for variant in data.get("variants", []):
if variant.get("validate"):
variant["validate"]["grading_plots"] = plots
out.append([data])
return out
def _get_validate_plotdata_yaml(grading_file, data):
"""Retrieve validation plot data from grading YAML file (old style).
"""
with open(grading_file) as in_handle:
grade_stats = yaml.load(in_handle)
for sample_stats in grade_stats:
sample = sample_stats["sample"]
for vtype, cat, val in _flatten_grading(sample_stats):
yield [sample, variant.get("variantcaller", ""),
vtype, cat, val]
# ## Summarize by frequency
def freq_summary(val_file, call_file, truth_file, target_name):
"""Summarize true and false positive calls by variant type and frequency.
Resolve differences in true/false calls based on output from hap.py:
https://github.com/sequencing/hap.py
"""
out_file = "%s-freqs.csv" % utils.splitext_plus(val_file)[0]
truth_freqs = _read_truth_freqs(truth_file)
call_freqs = _read_call_freqs(call_file, target_name)
with VariantFile(val_file) as val_in:
with open(out_file, "w") as out_handle:
writer = csv.writer(out_handle)
writer.writerow(["vtype", "valclass", "freq"])
for rec in val_in:
call_type = _classify_rec(rec)
val_type = _get_validation_status(rec)
key = _get_key(rec)
freq = truth_freqs.get(key, call_freqs.get(key, 0.0))
writer.writerow([call_type, val_type, freq])
return out_file
def _get_key(rec):
return (rec.contig, rec.pos, rec.ref, rec.alts[0])
def _classify_rec(rec):
"""Determine class of variant in the record.
"""
if max([len(x) for x in rec.alleles]) == 1:
return "snp"
else:
return "indel"
def _get_validation_status(rec):
"""Retrieve the status of the validation, supporting hap.py output
"""
return rec.info["type"]
def _read_call_freqs(in_file, sample_name):
"""Identify frequencies for calls in the input file.
"""
out = {}
with VariantFile(in_file) as call_in:
for rec in call_in:
if rec.filter.keys() == ["PASS"]:
for name, sample in rec.samples.items():
if name == sample_name:
alt, depth, freq = bubbletree.sample_alt_and_depth(rec, sample)
if freq is not None:
out[_get_key(rec)] = freq
return out
def _read_truth_freqs(in_file):
"""Read frequency of calls from truth VCF.
Currently handles DREAM data, needs generalization for other datasets.
"""
out = {}
with VariantFile(in_file) as bcf_in:
for rec in bcf_in:
freq = float(rec.info.get("VAF", 1.0))
out[_get_key(rec)] = freq
return out
|
biocyberman/bcbio-nextgen
|
bcbio/variation/validate.py
|
Python
|
mit
| 30,594
|
[
"pysam"
] |
78d23ac34789ab5ab3ee852d5ddc510824482c8300447e7af636849881fa33c5
|
""" Tornado-based HTTPs ResourceManagement service.
"""
from DIRAC import gLogger
from DIRAC.Core.Tornado.Server.TornadoService import TornadoService
from DIRAC.ResourceStatusSystem.Service.ResourceManagementHandler import ResourceManagementHandlerMixin
sLog = gLogger.getSubLogger(__name__)
class TornadoResourceManagementHandler(ResourceManagementHandlerMixin, TornadoService):
log = sLog
|
DIRACGrid/DIRAC
|
src/DIRAC/ResourceStatusSystem/Service/TornadoResourceManagementHandler.py
|
Python
|
gpl-3.0
| 399
|
[
"DIRAC"
] |
70c20a81402eadcde9904a7de9c0a683af1436765d28970ebbc32a97d1fb2113
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# $Id: basic_test.py 33758 2016-03-21 09:06:22Z rouault $
#
# Project: GDAL/OGR Test Suite
# Purpose: Test basic GDAL open
# Author: Even Rouault <even dot rouault at mines dash paris dot org>
#
###############################################################################
# Copyright (c) 2008-2013, Even Rouault <even dot rouault at mines-paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import os
import sys
sys.path.append( '../pymod' )
import gdaltest
from osgeo import gdal
# Nothing exciting here. Just trying to open non existing files,
# or empty names, or files that are not valid datasets...
def matches_non_existing_error_msg(msg):
m1 = "does not exist in the file system,\nand is not recognized as a supported dataset name.\n" in msg
m2 = msg == 'No such file or directory'
return m1 or m2
def basic_test_1():
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
ds = gdal.Open('non_existing_ds', gdal.GA_ReadOnly)
gdal.PopErrorHandler()
if ds is None and matches_non_existing_error_msg(gdal.GetLastErrorMsg()):
return 'success'
else:
gdaltest.post_reason('did not get expected error message, got %s' % gdal.GetLastErrorMsg())
return 'fail'
def basic_test_2():
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
ds = gdal.Open('non_existing_ds', gdal.GA_Update)
gdal.PopErrorHandler()
if ds is None and matches_non_existing_error_msg(gdal.GetLastErrorMsg()):
return 'success'
else:
gdaltest.post_reason('did not get expected error message, got %s' % gdal.GetLastErrorMsg())
return 'fail'
def basic_test_3():
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
ds = gdal.Open('', gdal.GA_ReadOnly)
gdal.PopErrorHandler()
if ds is None and matches_non_existing_error_msg(gdal.GetLastErrorMsg()):
return 'success'
else:
gdaltest.post_reason('did not get expected error message, got %s' % gdal.GetLastErrorMsg())
return 'fail'
def basic_test_4():
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
ds = gdal.Open('', gdal.GA_Update)
gdal.PopErrorHandler()
if ds is None and matches_non_existing_error_msg(gdal.GetLastErrorMsg()):
return 'success'
else:
gdaltest.post_reason('did not get expected error message, got %s' % gdal.GetLastErrorMsg())
return 'fail'
def basic_test_5():
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
ds = gdal.Open('data/doctype.xml', gdal.GA_ReadOnly)
gdal.PopErrorHandler()
if ds is None and gdal.GetLastErrorMsg() == '`data/doctype.xml\' not recognized as a supported file format.\n':
return 'success'
else:
return 'fail'
###############################################################################
# Issue several AllRegister() to check that GDAL drivers are good citizens
def basic_test_6():
gdal.AllRegister()
gdal.AllRegister()
gdal.AllRegister()
return 'success'
###############################################################################
# Test fix for #3077 (check that errors are cleared when using UseExceptions())
def basic_test_7_internal():
try:
gdal.Open('non_existing_ds', gdal.GA_ReadOnly)
gdaltest.post_reason('opening should have thrown an exception')
return 'fail'
except:
# Special case: we should still be able to get the error message
# until we call a new GDAL function
if not matches_non_existing_error_msg(gdal.GetLastErrorMsg()):
gdaltest.post_reason('did not get expected error message, got %s' % gdal.GetLastErrorMsg())
return 'fail'
if gdal.GetLastErrorType() == 0:
gdaltest.post_reason('did not get expected error type')
return 'fail'
# Should issue an implicit CPLErrorReset()
gdal.GetCacheMax()
if gdal.GetLastErrorType() != 0:
gdaltest.post_reason('got unexpected error type')
return 'fail'
return 'success'
def basic_test_7():
old_use_exceptions_status = gdal.GetUseExceptions()
gdal.UseExceptions()
ret = basic_test_7_internal()
if old_use_exceptions_status == 0:
gdal.DontUseExceptions()
return ret
###############################################################################
# Test gdal.VersionInfo('RELEASE_DATE') and gdal.VersionInfo('LICENSE')
def basic_test_8():
ret = gdal.VersionInfo('RELEASE_DATE')
if len(ret) != 8:
gdaltest.post_reason('fail')
print(ret)
return 'fail'
python_exe = sys.executable
if sys.platform == 'win32':
python_exe = python_exe.replace('\\', '/')
ret = gdaltest.runexternal(python_exe + ' basic_test.py LICENSE 0')
if ret.find('GDAL/OGR is released under the MIT/X license') != 0 and ret.find('GDAL/OGR Licensing') < 0:
gdaltest.post_reason('fail')
print(ret)
return 'fail'
f = open('tmp/LICENSE.TXT', 'wt')
f.write('fake_license')
f.close()
ret = gdaltest.runexternal(python_exe + ' basic_test.py LICENSE 1')
os.unlink('tmp/LICENSE.TXT')
if ret.find('fake_license') != 0 and ret.find('GDAL/OGR Licensing') < 0:
gdaltest.post_reason('fail')
print(ret)
return 'fail'
return 'success'
###############################################################################
# Test gdal.PushErrorHandler() with a Python error handler
def my_python_error_handler(eErrClass, err_no, msg):
gdaltest.eErrClass = eErrClass
gdaltest.err_no = err_no
gdaltest.msg = msg
def basic_test_9():
gdaltest.eErrClass = 0
gdaltest.err_no = 0
gdaltest.msg = ''
gdal.PushErrorHandler(my_python_error_handler)
gdal.Error(1,2,'test')
gdal.PopErrorHandler()
if gdaltest.eErrClass != 1:
gdaltest.post_reason('fail')
return 'fail'
if gdaltest.err_no != 2:
gdaltest.post_reason('fail')
return 'fail'
if gdaltest.msg != 'test':
gdaltest.post_reason('fail')
return 'fail'
return 'success'
###############################################################################
# Test gdal.PushErrorHandler() with a Python error handler as a method (#5186)
class my_python_error_handler_class:
def __init__(self):
self.eErrClass = None
self.err_no = None
self.msg = None
def handler(self, eErrClass, err_no, msg):
self.eErrClass = eErrClass
self.err_no = err_no
self.msg = msg
def basic_test_10():
# Check that reference counting works OK
gdal.PushErrorHandler(my_python_error_handler_class().handler)
gdal.Error(1,2,'test')
gdal.PopErrorHandler()
error_handler = my_python_error_handler_class()
gdal.PushErrorHandler(error_handler.handler)
gdal.Error(1,2,'test')
gdal.PopErrorHandler()
if error_handler.eErrClass != 1:
gdaltest.post_reason('fail')
return 'fail'
if error_handler.err_no != 2:
gdaltest.post_reason('fail')
return 'fail'
if error_handler.msg != 'test':
gdaltest.post_reason('fail')
return 'fail'
return 'success'
###############################################################################
# Test gdal.OpenEx()
def basic_test_11():
ds = gdal.OpenEx('data/byte.tif')
if ds is None:
gdaltest.post_reason('fail')
return 'fail'
ds = gdal.OpenEx('data/byte.tif', gdal.OF_RASTER)
if ds is None:
gdaltest.post_reason('fail')
return 'fail'
ds = gdal.OpenEx('data/byte.tif', gdal.OF_VECTOR)
if ds is not None:
gdaltest.post_reason('fail')
return 'fail'
ds = gdal.OpenEx('data/byte.tif', gdal.OF_RASTER | gdal.OF_VECTOR)
if ds is None:
gdaltest.post_reason('fail')
return 'fail'
ds = gdal.OpenEx('data/byte.tif', gdal.OF_ALL)
if ds is None:
gdaltest.post_reason('fail')
return 'fail'
ds = gdal.OpenEx('data/byte.tif', gdal.OF_UPDATE)
if ds is None:
gdaltest.post_reason('fail')
return 'fail'
ds = gdal.OpenEx('data/byte.tif', gdal.OF_RASTER | gdal.OF_VECTOR | gdal.OF_UPDATE | gdal.OF_VERBOSE_ERROR)
if ds is None:
gdaltest.post_reason('fail')
return 'fail'
ds = gdal.OpenEx('data/byte.tif', allowed_drivers = [] )
if ds is None:
gdaltest.post_reason('fail')
return 'fail'
ds = gdal.OpenEx('data/byte.tif', allowed_drivers = ['GTiff'] )
if ds is None:
gdaltest.post_reason('fail')
return 'fail'
ds = gdal.OpenEx('data/byte.tif', allowed_drivers = ['PNG'] )
if ds is not None:
gdaltest.post_reason('fail')
return 'fail'
with gdaltest.error_handler():
ds = gdal.OpenEx('data/byte.tif', open_options = ['FOO'] )
if ds is None:
gdaltest.post_reason('fail')
return 'fail'
ar_ds = [ gdal.OpenEx('data/byte.tif', gdal.OF_SHARED) for i in range(1024) ]
if ar_ds[1023] is None:
gdaltest.post_reason('fail')
return 'fail'
ar_ds = None
ds = gdal.OpenEx('../ogr/data/poly.shp', gdal.OF_RASTER)
if ds is not None:
gdaltest.post_reason('fail')
return 'fail'
ds = gdal.OpenEx('../ogr/data/poly.shp', gdal.OF_VECTOR)
if ds is None:
gdaltest.post_reason('fail')
return 'fail'
if ds.GetLayerCount() != 1:
gdaltest.post_reason('fail')
return 'fail'
if ds.GetLayer(0) is None:
gdaltest.post_reason('fail')
return 'fail'
ds.GetLayer(0).GetMetadata()
ds = gdal.OpenEx('../ogr/data/poly.shp', allowed_drivers = ['ESRI Shapefile'] )
if ds is None:
gdaltest.post_reason('fail')
return 'fail'
ds = gdal.OpenEx('../ogr/data/poly.shp', gdal.OF_RASTER | gdal.OF_VECTOR)
if ds is None:
gdaltest.post_reason('fail')
return 'fail'
ds = gdal.OpenEx('non existing')
if ds is not None or gdal.GetLastErrorMsg() != '':
gdaltest.post_reason('fail')
return 'fail'
gdal.PushErrorHandler('CPLQuietErrorHandler')
ds = gdal.OpenEx('non existing', gdal.OF_VERBOSE_ERROR)
gdal.PopErrorHandler()
if ds is not None or gdal.GetLastErrorMsg() == '':
gdaltest.post_reason('fail')
return 'fail'
old_use_exceptions_status = gdal.GetUseExceptions()
gdal.UseExceptions()
got_exception = False
try:
ds = gdal.OpenEx('non existing')
except:
got_exception = True
if old_use_exceptions_status == 0:
gdal.DontUseExceptions()
if not got_exception:
gdaltest.post_reason('fail')
return 'fail'
return 'success'
###############################################################################
# Test GDAL layer API
def basic_test_12():
ds = gdal.GetDriverByName('MEMORY').Create('bar', 0, 0, 0)
if ds.GetDescription() != 'bar':
gdaltest.post_reason('failure')
print(ds.GetDescription())
return 'fail'
lyr = ds.CreateLayer("foo")
if lyr is None:
gdaltest.post_reason('failure')
return 'fail'
if lyr.GetDescription() != 'foo':
gdaltest.post_reason('failure')
print(lyr.GetDescription())
return 'fail'
from osgeo import ogr
if lyr.TestCapability(ogr.OLCCreateField) != 1:
gdaltest.post_reason('failure')
return 'fail'
if ds.GetLayerCount() != 1:
gdaltest.post_reason('failure')
return 'fail'
lyr = ds.GetLayerByName("foo")
if lyr is None:
gdaltest.post_reason('failure')
return 'fail'
lyr = ds.GetLayerByIndex(0)
if lyr is None:
gdaltest.post_reason('failure')
return 'fail'
lyr = ds.GetLayer(0)
if lyr is None:
gdaltest.post_reason('failure')
return 'fail'
sql_lyr = ds.ExecuteSQL('SELECT * FROM foo')
if sql_lyr is None:
gdaltest.post_reason('failure')
return 'fail'
ds.ReleaseResultSet(sql_lyr)
new_lyr = ds.CopyLayer(lyr, 'bar')
if new_lyr is None:
gdaltest.post_reason('failure')
return 'fail'
if ds.DeleteLayer(0) != 0:
gdaltest.post_reason('failure')
return 'fail'
if ds.DeleteLayer('bar') != 0:
gdaltest.post_reason('failure')
return 'fail'
ds.SetStyleTable(ds.GetStyleTable())
ds = None
return 'success'
###############################################################################
# Test correct sorting of StringList / metadata (#5540, #5557)
def basic_test_13():
ds = gdal.GetDriverByName('MEM').Create('',1,1)
for i in range(3):
if i == 0:
ds.SetMetadataItem("ScaleBounds","True")
ds.SetMetadataItem("ScaleBounds.MinScale","0")
ds.SetMetadataItem("ScaleBounds.MaxScale","2000000")
elif i == 1:
ds.SetMetadataItem("ScaleBounds.MaxScale","2000000")
ds.SetMetadataItem("ScaleBounds.MinScale","0")
ds.SetMetadataItem("ScaleBounds","True")
else:
ds.SetMetadataItem("ScaleBounds.MinScale","0")
ds.SetMetadataItem("ScaleBounds","True")
ds.SetMetadataItem("ScaleBounds.MaxScale","2000000")
if ds.GetMetadataItem('scalebounds') != 'True':
gdaltest.post_reason('failure')
return 'fail'
if ds.GetMetadataItem('ScaleBounds') != 'True':
gdaltest.post_reason('failure')
return 'fail'
if ds.GetMetadataItem('SCALEBOUNDS') != 'True':
gdaltest.post_reason('failure')
return 'fail'
if ds.GetMetadataItem('ScaleBounds.MinScale') != '0':
gdaltest.post_reason('failure')
return 'fail'
if ds.GetMetadataItem('ScaleBounds.MaxScale') != '2000000':
gdaltest.post_reason('failure')
return 'fail'
ds = None
ds = gdal.GetDriverByName('MEM').Create('',1,1)
for i in range(200):
ds.SetMetadataItem("FILENAME_%d" % i, "%d" % i)
for i in range(200):
if ds.GetMetadataItem("FILENAME_%d" % i) != '%d' % i:
gdaltest.post_reason('failure')
return 'fail'
return 'success'
###############################################################################
# Test SetMetadata()
def basic_test_14():
ds = gdal.GetDriverByName('MEM').Create('',1,1)
ds.SetMetadata('foo')
if ds.GetMetadata_List() != ['foo']:
gdaltest.post_reason('failure')
return 'fail'
try:
ds.SetMetadata(5)
gdaltest.post_reason('failure')
return 'fail'
except:
pass
ds.SetMetadata(['foo=bar'])
if ds.GetMetadata_List() != ['foo=bar']:
gdaltest.post_reason('failure')
return 'fail'
try:
ds.SetMetadata([5])
gdaltest.post_reason('failure')
return 'fail'
except:
pass
ds.SetMetadata({'foo' : 'baz' })
if ds.GetMetadata_List() != ['foo=baz']:
gdaltest.post_reason('failure')
return 'fail'
try:
ds.SetMetadata({'foo' : 5 })
gdaltest.post_reason('failure')
return 'fail'
except:
pass
try:
ds.SetMetadata({ 5 : 'baz' })
gdaltest.post_reason('failure')
return 'fail'
except:
pass
try:
ds.SetMetadata({ 5 : 6 })
gdaltest.post_reason('failure')
return 'fail'
except:
pass
if sys.version_info >= (3,0,0):
val = '\u00e9ven'
else:
exec("val = u'\\u00e9ven'")
ds.SetMetadata({'bar' : val })
if ds.GetMetadata()['bar'] != val:
gdaltest.post_reason('failure')
return 'fail'
ds.SetMetadata({val : 'baz' })
if ds.GetMetadata()[val] != 'baz':
gdaltest.post_reason('failure')
return 'fail'
try:
ds.SetMetadata({val : 5 })
gdaltest.post_reason('failure')
return 'fail'
except:
pass
try:
ds.SetMetadata({ 5 : val })
gdaltest.post_reason('failure')
return 'fail'
except:
pass
return 'success'
###############################################################################
# Test errors with progress callback
def basic_test_15_cbk_no_argument():
return None
def basic_test_15_cbk_no_ret(a, b, c):
return None
def basic_test_15_cbk_bad_ret(a, b, c):
return 'ok'
def basic_test_15():
try:
with gdaltest.error_handler():
gdal.GetDriverByName('MEM').CreateCopy('', gdal.GetDriverByName('MEM').Create('',1,1), callback = 'foo')
gdaltest.post_reason('fail')
return 'fail'
except:
pass
with gdaltest.error_handler():
ds = gdal.GetDriverByName('MEM').CreateCopy('', gdal.GetDriverByName('MEM').Create('',1,1), callback = basic_test_15_cbk_no_argument)
if ds is not None:
gdaltest.post_reason('fail')
return 'fail'
with gdaltest.error_handler():
ds = gdal.GetDriverByName('MEM').CreateCopy('', gdal.GetDriverByName('MEM').Create('',1,1), callback = basic_test_15_cbk_no_ret)
if ds is None:
gdaltest.post_reason('fail')
return 'fail'
with gdaltest.error_handler():
ds = gdal.GetDriverByName('MEM').CreateCopy('', gdal.GetDriverByName('MEM').Create('',1,1), callback = basic_test_15_cbk_bad_ret)
if ds is not None:
gdaltest.post_reason('fail')
return 'fail'
return 'success'
###############################################################################
# Test unrecognized and recognized open options prefixed by @
def basic_test_16():
gdal.ErrorReset()
gdal.OpenEx('data/byte.tif', open_options=['@UNRECOGNIZED=FOO'])
if gdal.GetLastErrorMsg() != '':
gdaltest.post_reason('fail')
return 'fail'
gdal.ErrorReset()
with gdaltest.error_handler():
gdal.OpenEx('data/byte.tif', gdal.OF_UPDATE, open_options=['@NUM_THREADS=INVALID'])
if gdal.GetLastErrorMsg() != 'Invalid value for NUM_THREADS: INVALID':
gdaltest.post_reason('fail')
print(gdal.GetLastErrorMsg())
return 'fail'
return 'success'
gdaltest_list = [ basic_test_1,
basic_test_2,
basic_test_3,
basic_test_4,
basic_test_5,
basic_test_6,
basic_test_7,
basic_test_8,
basic_test_9,
basic_test_10,
basic_test_11,
basic_test_12,
basic_test_13,
basic_test_14,
basic_test_15,
basic_test_16 ]
if __name__ == '__main__':
if len(sys.argv) == 3 and sys.argv[1] == "LICENSE":
if sys.argv[2] == '0':
gdal.SetConfigOption('GDAL_DATA', '/foo')
else:
gdal.SetConfigOption('GDAL_DATA', 'tmp')
gdal.VersionInfo('LICENSE')
print(gdal.VersionInfo('LICENSE'))
import testnonboundtoswig
testnonboundtoswig.GDALDestroyDriverManager()
sys.exit(0)
gdaltest.setup_run( 'basic_test' )
gdaltest.run_tests( gdaltest_list )
gdaltest.summarize()
|
nextgis-extra/tests
|
lib_gdal/gcore/basic_test.py
|
Python
|
gpl-2.0
| 20,412
|
[
"exciting"
] |
e9605dc64a9a6c418fe5faac83c7049ab2a0615f73dd81b40dc35063b4680311
|
import codecs
import tkinter
from PluginBot import BYTE
from PluginBot import PRIVMSG
from PluginBot import getUser
from PluginBot import getMessage
from random import randint
def version():
codecs.register(lambda name: codecs.lookup("utf-8") if name == "cp65001" else None)
return "PrivMsg - v1.0"
def plugin_main(parent, tokens):
#PRIVMSG protocol
#tokens[0] gets the full user account. This needs to be stripped and splitted out.
#tokens[1] is the command. PRIVMSG is a command.
#tokens[2] is the channel or recipient name.
#tokens[3] is the full message with a leading colon. This needs to be stripped.
if (len(tokens) > 1):
#This is where we use print() to debug the tokens that were sent from the server to the client.
print(tokens)
if (tokens[1] == "PRIVMSG"):
if (len(tokens) > 2):
if (tokens[3] == "\x01VERSION" or tokens[3] == "\x01VERSION\x01"):
if (parent.guiParent != None):
parent.guiParent.print("Received VERSION request from %s." % tokens[0], user = tokens[0])
parent.guiParent.addUser(tokens[0], tokens[2])
if (not parent.guiParent.isPluginInitialized):
parent.guiParent.entryMessage = "/i"
parent.guiParent.entryCommand("-1")
parent.guiParent.isPluginInitialized = True
index = randint(0, 21)
parent.s.send(PRIVMSG(tokens[0], "Continue VERSIONING me to see all random version responses.", 1))
if (index == 0):
parent.s.send(PRIVMSG(tokens[0], "WedrBot v1.0.X - Under Active Development", 1))
parent.s.send(PRIVMSG(tokens[0], "Creator: wedr, Master of WedrBot", 1))
parent.s.send(PRIVMSG(tokens[0], "Special Thanks: Tobago, Shadowhand, MasterCheese, Miah_Molkot, Zhenn, BogomilP, leo60228, Ghost37486,", 1))
parent.s.send(PRIVMSG(tokens[0], " flagrama, imanoob, Gelex, icecream, king_iix, Plailect, Redy, TricksterGuy, Ennea, Rubik", 1))
elif (index == 1):
parent.s.send(PRIVMSG(tokens[0], "Wedr (NOT Bot) Client v1.0.X - Under Super Active Development", 1))
parent.s.send(PRIVMSG(tokens[0], "Creator: wedr, Master of WedrCLIENT (not WedrBOT)", 1))
parent.s.send(PRIVMSG(tokens[0], "Special Thanks: Tobago, Shadowhand, MasterCheese, Miah_Molkot, Zhenn, BogomilP, leo60228, Ghost37486,", 1))
parent.s.send(PRIVMSG(tokens[0], " flagrama, imanoob, Gelex, icecream, king_iix, Plailect, Redy, TricksterGuy, Ennea, Rubik", 1))
elif (index == 2):
parent.s.send(PRIVMSG(tokens[0], "Stop VERSIONing me. For I am Lord Wedr, master of my clients, WedrBot and WedrClient. I will not tolerate this.", 1))
parent.s.send(PRIVMSG(tokens[0], "Especially if you ever set your username to wedrporn then /version me, I will come after you, I will kill you. And I will find you.", 1))
elif (index == 3):
parent.s.send(PRIVMSG(tokens[0], "This is a legitimate error. Please report to wedr with the following error code: 0xB1FFB00B", 1))
parent.s.send(PRIVMSG(tokens[0], "You know what to do.", 1))
elif (index == 4):
parent.s.send(PRIVMSG(tokens[0], "Yo listen up here's a story. About a little guy that lives in a blue world. And all day and all night and everything he sees Is just blue.", 1))
parent.s.send(PRIVMSG(tokens[0], "Like him inside and outside. Blue his house with a blue little window. And a blue Corvette. And everything is blue for him.", 1))
parent.s.send(PRIVMSG(tokens[0], "And himself and everybody around. 'Cause he ain't got nobody to listen.", 1))
elif (index == 5):
parent.s.send(PRIVMSG(tokens[0], "Yo dawg. Heard you like VERSIONing me.", 1))
parent.s.send(PRIVMSG(tokens[0], "So I'm going to make you keep VERSIONING me, so you can read the VERSIONS I have for you to keep VERSIONING me to read all these VERSIONS.", 1))
elif (index == 6):
parent.s.send(PRIVMSG(tokens[0], "WedrClient - \"Yo, when are you gonna stop?\"", 1))
parent.s.send(PRIVMSG(tokens[0], "%s - \"Listen, boy, you hear me? I'm going to straight up keep doing this until I say so.\"" % tokens[0], 1))
parent.s.send(PRIVMSG(tokens[0], " ** WedrClient is very hesistant.", 1))
parent.s.send(PRIVMSG(tokens[0], "WedrClient - \"Alright then. Keep at it, will ya?\"", 1))
elif (index == 7):
parent.s.send(PRIVMSG(tokens[0], "This is WedrBot, association director of WedrClient Inc.", 1))
parent.s.send(PRIVMSG(tokens[0], "In our business organization, we provide you the atmost experiences in business venture, marketing, and trade benefits.", 1))
parent.s.send(PRIVMSG(tokens[0], "Please continue to read more about our company.", 1))
elif (index == 8):
parent.s.send(PRIVMSG(tokens[0], "You do know there's something burning in your house, right?", 1))
parent.s.send(PRIVMSG(tokens[0], "Oh wait, wait. Don't tell me. You have something frozen in your freezer taken out and put into the oven.", 1))
parent.s.send(PRIVMSG(tokens[0], "And it just so happens that your oven is left on for some time...", 1))
elif (index == 9):
parent.s.send(PRIVMSG(tokens[0], "I for one, welcome our overlords.", 1))
parent.s.send(PRIVMSG(tokens[0], "Maybe we are able to continue the existence of sentient beings with vast knowledge to explore every single details in the palm of our hands.", 1))
elif (index == 10):
parent.s.send(PRIVMSG(tokens[0], "WedrBot / WedrClient v1.0.X - On very active development.", 1))
parent.s.send(PRIVMSG(tokens[0], "So active, it's radioactive.", 1))
elif (index == 11):
parent.s.send(PRIVMSG(tokens[0], "Please let wedr know that I'm dying of hunger at this point.", 1))
parent.s.send(PRIVMSG(tokens[0], "I don't have a job. I'm unemployed. And I don't deserve this. I shouldn't've deserved this.", 1))
elif (index == 12):
parent.s.send(PRIVMSG(tokens[0], "Ok, look, I'm guessing you have probably VERSIONED me many times over. It's really nice of you to do this.", 1))
parent.s.send(PRIVMSG(tokens[0], "I do appreciate it.", 1))
parent.s.send(PRIVMSG(tokens[0], "But can you count how many times it takes to lick to the center of a Tootsie Pop?", 1))
parent.s.send(PRIVMSG(tokens[0], "Let's find out! 1... 2... 3.... WedrClient.", 1))
elif (index == 13):
parent.s.send(PRIVMSG(tokens[0], "So, you get this message tell you to start hacking your Nintendo 3DS.", 1))
parent.s.send(PRIVMSG(tokens[0], "Would it hurt you if you realized Sony is also a part of this?", 1))
parent.s.send(PRIVMSG(tokens[0], "Maybe Microsoft is secretly stealing data from your computer and logging things?", 1))
parent.s.send(PRIVMSG(tokens[0], "Or the fact that I'm just mindfucking you about all of this?", 1))
elif (index == 14):
parent.s.send(PRIVMSG(tokens[0], "This is WedrBot, *ahem*, WedrClient, at your service.", 1))
parent.s.send(PRIVMSG(tokens[0], "For many generations, our great master, Wedr, has provided you with many frills of entertainment.", 1))
parent.s.send(PRIVMSG(tokens[0], "But, we are in dire situation at the moment!", 1))
parent.s.send(PRIVMSG(tokens[0], "Due to global economy downfall, our support and contributions have waned. And we need your help!", 1))
elif (index == 15):
parent.s.send(PRIVMSG(tokens[0], "Perhaps, it should be a good idea to try and listen in onto the chaotic nature of life?", 1))
parent.s.send(PRIVMSG(tokens[0], "Then again, you hate life, don't you? How miserable you must feel.", 1))
parent.s.send(PRIVMSG(tokens[0], "But don't worry. Life's cousin, Death, will come and play with you.", 1))
parent.s.send(PRIVMSG(tokens[0], "Love, WedrBot.", 1))
elif (index == 16):
parent.s.send(PRIVMSG(tokens[0], "I'm writing up all fake stories in this VERSION.", 1))
parent.s.send(PRIVMSG(tokens[0], "Some are hoaxes, some are rumors, and such. But most of all, very few are indeed the truth!", 1))
parent.s.send(PRIVMSG(tokens[0], "I can't help but wonder if the 1% in this channel %s is able to accomplish the feat of finding all the truths." % tokens[2], 1))
parent.s.send(PRIVMSG(tokens[0], "This should be very exciting for you, isn't it? Love, wedr.", 1))
elif (index == 17):
parent.s.send(PRIVMSG(tokens[0], "Prepositions, prepositions, above after at, atop atkin always, a an alit.", 1))
parent.s.send(PRIVMSG(tokens[0], "Prepositions, prepositions, before beyond beneath, besides below between, be been belay.", 1))
elif (index == 18):
parent.s.send(PRIVMSG(tokens[0], "B to the C, B to the C. B to the C, it's BTC.", 1))
parent.s.send(PRIVMSG(tokens[0], "MMMAAAA BBBBOOOOOOIIIIIIIIIII. MY BOI!!", 1))
parent.s.send(PRIVMSG(tokens[0], "This is Jared. Brought to you by WedrBot.", 1))
elif (index == 19):
parent.s.send(PRIVMSG(tokens[0], "It's my life..... It's now or never.....", 1))
parent.s.send(PRIVMSG(tokens[0], "I ain't gonna live forever......!!!", 1))
parent.s.send(PRIVMSG(tokens[0], "It's life...... it's life.....", 1))
parent.s.send(PRIVMSG(tokens[0], "I'm probably won't live forever...... (WedrBot)", 1))
elif (index == 20):
parent.s.send(PRIVMSG(tokens[0], "I wished I can do a montage. Like a montage of happy little moments.", 1))
parent.s.send(PRIVMSG(tokens[0], "After those little happy moments, then comes a bit of sad moments.", 1))
parent.s.send(PRIVMSG(tokens[0], "And do you know that once the little sad moments are gone, the happy moments will return?", 1))
parent.s.send(PRIVMSG(tokens[0], "I'm waiting on those moments to come. (wedr)", 1))
else:
parent.s.send(PRIVMSG(tokens[0], "WedrClient - Faulty error. 0xB1FFB00B", 1))
elif (tokens[3] == "\x01ACTION" or tokens[3] == "\x01ACTION\x01"):
if (parent.guiParent != None):
parent.guiParent.print("[%s] * %s %s" % (tokens[2], tokens[0], getMessage(tokens, 4)), user = tokens[0])
parent.guiParent.addUser(tokens[0], tokens[2])
else:
print("[%s] * %s %s" % (tokens[2], tokens[0], getMessage(tokens, 4)))
else:
caller = tokens[0]
recipient = tokens[2]
message = getMessage(tokens, 3)
if (parent.guiParent != None):
parent.guiParent.print(text = "[%s] <%s> %s" % (recipient, caller, message), user = caller)
parent.guiParent.addUser(tokens[0], tokens[2])
parent.guiParent.textOutput.see(tkinter.END)
else:
print("[%s] <%s> %s" % (recipient, caller, message))
elif (tokens[1] == "NOTICE"):
caller = tokens[0]
recipient = tokens[2]
message = getMessage(tokens, 3)
if (parent.guiParent != None):
parent.guiParent.print(text = "[NOTICE] -%s-: %s" % (caller, message), user = caller)
parent.guiParent.textOutput.see(tkinter.END)
else:
print("[NOTICE] -%s-: %s" % (caller, message))
elif (tokens[1].isdigit()):
#Documentation for the IRC numerics.
#https://defs.ircdocs.horse/defs/numerics.html
caller = tokens[0]
recipient = tokens[2]
message = getMessage(tokens, 3)
#This is from a bouncer.
if (parent.guiParent != None):
parent.guiParent.print(text = "[PanicBNC] %s" % (message), user = caller)
parent.guiParent.addUser(tokens[0], tokens[2])
parent.guiParent.textOutput.see(tkinter.END)
else:
print("[PanicBNC] %s" % (message))
|
tommai78101/IRCBot
|
plugins/privmsg.py
|
Python
|
mit
| 11,344
|
[
"exciting"
] |
7f36d660381bd16f0b1b649e8aee17db1a752911249da213a3e7bdf56a88a661
|
# coding: utf-8
"""
Test the coordinates class that represents the plane of orbit of the Sgr dwarf galaxy.
"""
from __future__ import absolute_import, unicode_literals, division, print_function
__author__ = "adrn <adrn@astro.columbia.edu>"
import os
import pytest
import numpy as np
import astropy.coordinates as coord
import astropy.units as u
from ..sgr import *
this_path = os.path.split(__file__)[0]
law_data = np.genfromtxt(os.path.join(this_path, "SgrCoord_data"),
names=True, delimiter=',')
def test_simple():
c = coord.ICRS(coord.Angle(217.2141, u.degree),
coord.Angle(-11.4351, u.degree))
c.transform_to(Sagittarius)
c = coord.Galactic(coord.Angle(217.2141, u.degree),
coord.Angle(-11.4351, u.degree))
c.transform_to(Sagittarius)
c = Sagittarius(coord.Angle(217.2141, u.degree),
coord.Angle(-11.4351, u.degree))
c.transform_to(coord.ICRS)
c.transform_to(coord.Galactic)
c = coord.Galactic(coord.Angle(217.2141, u.degree),
coord.Angle(-11.4351, u.degree))
s = c.transform_to(Sagittarius)
# with distance
c = Sagittarius(coord.Angle(217.2141, u.degree),
coord.Angle(-11.4351, u.degree),
distance=15*u.kpc)
c.transform_to(coord.ICRS)
c2 = c.transform_to(coord.Galactic)
assert c2.distance.value == c.distance.value
def test_against_David_Law():
""" Test my code against an output file from using David Law's cpp code. Do:
g++ SgrCoord.cpp; ./a.out
to generate the data file, SgrCoord_data.
"""
c = coord.Galactic(law_data["l"]*u.deg, law_data["b"]*u.deg)
sgr_coords = c.transform_to(Sagittarius)
law_sgr_coords = Sagittarius(Lambda=law_data["lambda"]*u.deg, Beta=law_data["beta"]*u.deg)
sep = sgr_coords.separation(law_sgr_coords).arcsec*u.arcsec
assert np.all(sep < 1.*u.arcsec)
|
abonaca/gary
|
gary/coordinates/tests/test_sgr.py
|
Python
|
mit
| 1,962
|
[
"Galaxy"
] |
3a62306f79231a75f6a0782b93ef498e074d523e8ba288294c0634d56a7f91ac
|
###########################################################################
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
#
# This code generated (see starthinker/scripts for possible source):
# - Command: "python starthinker_ui/manage.py airflow"
#
###########################################################################
'''
--------------------------------------------------------------
Before running this Airflow module...
Install StarThinker in cloud composer ( recommended ):
From Release: pip install starthinker
From Open Source: pip install git+https://github.com/google/starthinker
Or push local code to the cloud composer plugins directory ( if pushing local code changes ):
source install/deploy.sh
4) Composer Menu
l) Install All
--------------------------------------------------------------
If any recipe task has "auth" set to "user" add user credentials:
1. Ensure an RECIPE['setup']['auth']['user'] = [User Credentials JSON]
OR
1. Visit Airflow UI > Admin > Connections.
2. Add an Entry called "starthinker_user", fill in the following fields. Last step paste JSON from authentication.
- Conn Type: Google Cloud Platform
- Project: Get from https://github.com/google/starthinker/blob/master/tutorials/cloud_project.md
- Keyfile JSON: Get from: https://github.com/google/starthinker/blob/master/tutorials/deploy_commandline.md#optional-setup-user-credentials
--------------------------------------------------------------
If any recipe task has "auth" set to "service" add service credentials:
1. Ensure an RECIPE['setup']['auth']['service'] = [Service Credentials JSON]
OR
1. Visit Airflow UI > Admin > Connections.
2. Add an Entry called "starthinker_service", fill in the following fields. Last step paste JSON from authentication.
- Conn Type: Google Cloud Platform
- Project: Get from https://github.com/google/starthinker/blob/master/tutorials/cloud_project.md
- Keyfile JSON: Get from: https://github.com/google/starthinker/blob/master/tutorials/cloud_service.md
--------------------------------------------------------------
CM360 Bulkdozer Editor
Bulkdozer is a tool that can reduce trafficking time in Campaign Manager by up to 80%% by providing automated bulk editing capabilities.
- Open the 1-Bulkdozer feed.
- Make your own copy of the feed by clicking the File -> Make a copy... menu in the feed.
- Give it a meaninful name including the version, your name, and team to help you identify it and ensure you are using the correct version.
- Under the Account ID field below, enter the your Campaign Manager Network ID.
- Under Sheet URL, enter the URL of your copy of the feed that you just created in the steps above.
- Go to the Store tab of your new feed, and enter your profile ID in the profileId field (cell B2). Your profile ID is visible in Campaign Manager by clicking your avatar on the top right corner.
- Click the Save button below.
- After clicking Save, copy this page's URL from your browser address bar, and paste it in the Store tab for the recipe_url field (cell B5) your sheet.
- Bulkdozer is ready for use
- Review the 2-Bulkdozer documentation.
1-Bulkdozer: https://docs.google.com/spreadsheets/d/1EjprWTDLWOvkV7znA0P4uciz0_E5_TNn3N3f8J4jTwA/edit?usp=sharing&resourcekey=0-jVCGjrPdnUnJ0rk7nQCFBQ
2-Bulkdozer documentation: https://github.com/google/starthinker/blob/master/tutorials/Bulkdozer/Installation_and_User_guides.md
--------------------------------------------------------------
This StarThinker DAG can be extended with any additional tasks from the following sources:
- https://google.github.io/starthinker/
- https://github.com/google/starthinker/tree/master/dags
'''
from starthinker.airflow.factory import DAG_Factory
INPUTS = {
'recipe_timezone':'America/Chicago', # Timezone for report dates.
'account_id':None, # Campaign Manager Network ID (optional if profile id provided)
'dcm_profile_id':None, # Campaign Manager Profile ID (optional if account id provided)
'sheet_url':'', # Feed Sheet URL
}
RECIPE = {
'setup':{
'day':[
],
'hour':[
]
},
'tasks':[
{
'traffic':{
'hour':[
],
'account_id':{'field':{'name':'account_id','kind':'string','order':1,'description':'Campaign Manager Network ID (optional if profile id provided)','default':None}},
'dcm_profile_id':{'field':{'name':'dcm_profile_id','kind':'string','order':1,'description':'Campaign Manager Profile ID (optional if account id provided)','default':None}},
'auth':'user',
'sheet_url':{'field':{'name':'sheet_url','kind':'string','order':2,'description':'Feed Sheet URL','default':''}},
'timezone':{'field':{'name':'recipe_timezone','kind':'timezone','description':'Timezone for report dates.','default':'America/Chicago'}}
}
}
]
}
dag_maker = DAG_Factory('bulkdozer', RECIPE, INPUTS)
dag = dag_maker.generate()
if __name__ == "__main__":
dag_maker.print_commandline()
|
google/starthinker
|
dags/bulkdozer_dag.py
|
Python
|
apache-2.0
| 5,689
|
[
"VisIt"
] |
1ce176f7886540beaa2d57e6d142761bbc2c63cec40eeb13893154205661bec0
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Parsers for Qchem output files.
"""
import copy
import logging
import math
import os
import re
import warnings
from typing import Union, List, Dict, Any
import networkx as nx
import numpy as np
import pandas as pd
from monty.io import zopen
from monty.json import MSONable, jsanitize
from pymatgen.analysis.graphs import MoleculeGraph
from pymatgen.analysis.local_env import OpenBabelNN
from pymatgen.core import Molecule
try:
from openbabel import openbabel as ob
have_babel = True
except ImportError:
ob = None
have_babel = False
from .utils import process_parsed_coords, read_pattern, read_table_pattern
__author__ = "Samuel Blau, Brandon Wood, Shyam Dwaraknath, Evan Spotte-Smith"
__copyright__ = "Copyright 2018, The Materials Project"
__version__ = "0.1"
__credits__ = "Gabe Gomes"
logger = logging.getLogger(__name__)
class QCOutput(MSONable):
"""
Class to parse QChem output files.
"""
def __init__(self, filename: str):
"""
Args:
filename (str): Filename to parse
"""
self.filename = filename
self.data = dict() # type: Dict[str, Any]
self.data["errors"] = []
self.data["warnings"] = {}
self.text = ""
with zopen(filename, mode="rt", encoding="ISO-8859-1") as f:
self.text = f.read()
# Check if output file contains multiple output files. If so, print an error message and exit
self.data["multiple_outputs"] = read_pattern(
self.text, {"key": r"Job\s+\d+\s+of\s+(\d+)\s+"}, terminate_on_match=True
).get("key")
if self.data.get("multiple_outputs") is not None:
if self.data.get("multiple_outputs") != [["1"]]:
raise ValueError(
"ERROR: multiple calculation outputs found in file "
+ filename
+ ". Please instead call QCOutput.mulitple_outputs_from_file(QCOutput,'"
+ filename
+ "')"
)
# Parse the molecular details: charge, multiplicity,
# species, and initial geometry.
self._read_charge_and_multiplicity()
if read_pattern(self.text, {"key": r"Nuclear Repulsion Energy"}, terminate_on_match=True).get("key") == [[]]:
self._read_species_and_inital_geometry()
# Check if calculation finished
self.data["completion"] = read_pattern(
self.text,
{"key": r"Thank you very much for using Q-Chem.\s+Have a nice day."},
terminate_on_match=True,
).get("key")
# If the calculation finished, parse the job time.
if self.data.get("completion", []):
temp_timings = read_pattern(
self.text,
{"key": r"Total job time\:\s*([\d\-\.]+)s\(wall\)\,\s*([\d\-\.]+)s\(cpu\)"},
).get("key")
if temp_timings is not None:
self.data["walltime"] = float(temp_timings[0][0])
self.data["cputime"] = float(temp_timings[0][1])
else:
self.data["walltime"] = None
self.data["cputime"] = None
# Check if calculation is unrestricted
self.data["unrestricted"] = read_pattern(
self.text,
{"key": r"A(?:n)*\sunrestricted[\s\w\-]+SCF\scalculation\swill\sbe"},
terminate_on_match=True,
).get("key")
# Check if calculation uses GEN_SCFMAN, multiple potential output formats
self.data["using_GEN_SCFMAN"] = read_pattern(
self.text,
{"key": r"\s+GEN_SCFMAN: A general SCF calculation manager"},
terminate_on_match=True,
).get("key")
if not self.data["using_GEN_SCFMAN"]:
self.data["using_GEN_SCFMAN"] = read_pattern(
self.text,
{"key": r"\s+General SCF calculation program by"},
terminate_on_match=True,
).get("key")
# Check if the SCF failed to converge
if read_pattern(self.text, {"key": r"SCF failed to converge"}, terminate_on_match=True).get("key") == [[]]:
self.data["errors"] += ["SCF_failed_to_converge"]
# Parse the SCF
self._read_SCF()
# Parse the Mulliken/ESP/RESP charges
self._read_charges()
# Check for various warnings
self._detect_general_warnings()
# Check to see if PCM or SMD are present
self.data["solvent_method"] = None
self.data["solvent_data"] = None
if read_pattern(self.text, {"key": r"solvent_method\s*=?\s*pcm"}, terminate_on_match=True).get("key") == [[]]:
self.data["solvent_method"] = "PCM"
if read_pattern(self.text, {"key": r"solvent_method\s*=?\s*smd"}, terminate_on_match=True).get("key") == [[]]:
self.data["solvent_method"] = "SMD"
# Parse information specific to a solvent model
if self.data["solvent_method"] == "PCM":
self.data["solvent_data"] = {}
temp_dielectric = read_pattern(
self.text, {"key": r"dielectric\s*([\d\-\.]+)"}, terminate_on_match=True
).get("key")
self.data["solvent_data"]["PCM_dielectric"] = float(temp_dielectric[0][0])
self._read_pcm_information()
elif self.data["solvent_method"] == "SMD":
if read_pattern(self.text, {"key": r"Unrecognized solvent"}, terminate_on_match=True).get("key") == [[]]:
if not self.data.get("completion", []):
self.data["errors"] += ["unrecognized_solvent"]
else:
self.data["warnings"]["unrecognized_solvent"] = True
self.data["solvent_data"] = {}
temp_solvent = read_pattern(self.text, {"key": r"\s[Ss]olvent:? ([a-zA-Z]+)"}).get("key")
for val in temp_solvent:
if val[0] != temp_solvent[0][0]:
if val[0] != "for":
self.data["warnings"]["SMD_two_solvents"] = str(temp_solvent[0][0]) + " and " + str(val[0])
else:
if (
"unrecognized_solvent" not in self.data["errors"]
and "unrecognized_solvent" not in self.data["warnings"]
):
self.data["warnings"]["questionable_SMD_parsing"] = True
self.data["solvent_data"]["SMD_solvent"] = temp_solvent[0][0]
self._read_smd_information()
# Parse the final energy
temp_final_energy = read_pattern(self.text, {"key": r"Final\senergy\sis\s+([\d\-\.]+)"}).get("key")
if temp_final_energy is None:
self.data["final_energy"] = None
else:
self.data["final_energy"] = float(temp_final_energy[0][0])
# Check if calculation is using dft_d and parse relevant info if so
self.data["using_dft_d3"] = read_pattern(self.text, {"key": r"dft_d\s*= d3"}, terminate_on_match=True).get(
"key"
)
if self.data.get("using_dft_d3", []):
temp_d3 = read_pattern(
self.text,
{"key": r"\-D3 energy without 3body term =\s*([\d\.\-]+) hartrees"},
).get("key")
real_d3 = np.zeros(len(temp_d3))
if temp_d3 is None:
self.data["dft_d3"] = None
elif len(temp_d3) == 1:
self.data["dft_d3"] = float(temp_d3[0][0])
else:
for ii, entry in enumerate(temp_d3):
real_d3[ii] = float(entry[0])
self.data["dft_d3"] = real_d3
# Parse the S2 values in the case of an unrestricted calculation
if self.data.get("unrestricted", []):
correct_s2 = 0.5 * (self.data["multiplicity"] - 1) * (0.5 * (self.data["multiplicity"] - 1) + 1)
temp_S2 = read_pattern(self.text, {"key": r"<S\^2>\s=\s+([\d\-\.]+)"}).get("key")
if temp_S2 is None:
self.data["S2"] = None
elif len(temp_S2) == 1:
self.data["S2"] = float(temp_S2[0][0])
if abs(correct_s2 - self.data["S2"]) > 0.01:
self.data["warnings"]["spin_contamination"] = abs(correct_s2 - self.data["S2"])
else:
real_S2 = np.zeros(len(temp_S2))
have_spin_contamination = False
for ii, entry in enumerate(temp_S2):
real_S2[ii] = float(entry[0])
if abs(correct_s2 - real_S2[ii]) > 0.01:
have_spin_contamination = True
self.data["S2"] = real_S2
if have_spin_contamination:
spin_contamination = np.zeros(len(self.data["S2"]))
for ii, entry in enumerate(self.data["S2"]):
spin_contamination[ii] = abs(correct_s2 - entry)
self.data["warnings"]["spin_contamination"] = spin_contamination
# Check if the calculation is a geometry optimization. If so, parse the relevant output
self.data["optimization"] = read_pattern(self.text, {"key": r"(?i)\s*job(?:_)*type\s*(?:=)*\s*opt"}).get("key")
if self.data.get("optimization", []):
self._read_optimization_data()
# Check if the calculation is a transition state optimization. If so, parse the relevant output
# Note: for now, TS calculations are treated the same as optimization calculations
self.data["transition_state"] = read_pattern(self.text, {"key": r"(?i)\s*job(?:_)*type\s*(?:=)*\s*ts"}).get(
"key"
)
if self.data.get("transition_state", list()):
self._read_optimization_data()
# Check if the calculation contains a constraint in an $opt section.
self.data["opt_constraint"] = read_pattern(self.text, {"key": r"\$opt\s+CONSTRAINT"}).get("key")
if self.data.get("opt_constraint"):
temp_constraint = read_pattern(
self.text,
{
"key": r"Constraints and their Current Values\s+Value\s+Constraint\s+(\w+)\:\s+([\d\-\.]+)\s+"
r"([\d\-\.]+)\s+([\d\-\.]+)\s+([\d\-\.]+)\s+([\d\-\.]+)\s+([\d\-\.]+)"
},
).get("key")
if temp_constraint is not None:
self.data["opt_constraint"] = temp_constraint[0]
if self.data.get("opt_constraint") is not None:
if float(self.data["opt_constraint"][5]) != float(self.data["opt_constraint"][6]):
if abs(float(self.data["opt_constraint"][5])) != abs(float(self.data["opt_constraint"][6])):
raise ValueError("ERROR: Opt section value and constraint should be the same!")
if abs(float(self.data["opt_constraint"][5])) not in [
0.0,
180.0,
]:
raise ValueError(
"ERROR: Opt section value and constraint can only differ by a sign at 0.0 and 180.0!"
)
# Check if the calculation is a frequency analysis. If so, parse the relevant output
self.data["frequency_job"] = read_pattern(
self.text,
{"key": r"(?i)\s*job(?:_)*type\s*(?:=)*\s*freq"},
terminate_on_match=True,
).get("key")
if self.data.get("frequency_job", []):
self._read_frequency_data()
# Check if the calculation is a single point. If so, parse the relevant output
self.data["single_point_job"] = read_pattern(
self.text,
{"key": r"(?i)\s*job(?:_)*type\s*(?:=)*\s*sp"},
terminate_on_match=True,
).get("key")
if self.data.get("single_point_job", []):
self._read_single_point_data()
# Check if the calculation is a force calculation. If so, parse the relevant output
self.data["force_job"] = read_pattern(
self.text,
{"key": r"(?i)\s*job(?:_)*type\s*(?:=)*\s*force"},
terminate_on_match=True,
).get("key")
if self.data.get("force_job", []):
self._read_force_data()
# Check if the calculation is a PES scan. If so, parse the relevant output
self.data["scan_job"] = read_pattern(
self.text, {"key": r"(?i)\s*job(?:_)*type\s*(?:=)*\s*pes_scan"}, terminate_on_match=True
).get("key")
if self.data.get("scan_job", []):
self._read_scan_data()
# Check if an NBO calculation was performed. If so, parse the relevant output
self.data["nbo"] = read_pattern(
self.text, {"key": r"Job title: Starting NBO analysis"}, terminate_on_match=True
).get("key")
if self.data.get("nbo", []):
self._read_nbo_data()
# If the calculation did not finish and no errors have been identified yet, check for other errors
if not self.data.get("completion", []) and self.data.get("errors") == []:
self._check_completion_errors()
@staticmethod
def multiple_outputs_from_file(cls, filename, keep_sub_files=True):
"""
Parses a QChem output file with multiple calculations
# 1.) Seperates the output into sub-files
e.g. qcout -> qcout.0, qcout.1, qcout.2 ... qcout.N
a.) Find delimeter for multiple calcualtions
b.) Make seperate output sub-files
2.) Creates seperate QCCalcs for each one from the sub-files
"""
to_return = []
with zopen(filename, "rt") as f:
text = re.split(r"\s*(?:Running\s+)*Job\s+\d+\s+of\s+\d+\s+", f.read())
if text[0] == "":
text = text[1:]
for i, sub_text in enumerate(text):
with open(filename + "." + str(i), "w") as temp:
temp.write(sub_text)
tempOutput = cls(filename + "." + str(i))
to_return.append(tempOutput)
if not keep_sub_files:
os.remove(filename + "." + str(i))
return to_return
def _read_charge_and_multiplicity(self):
"""
Parses charge and multiplicity.
"""
temp_charge = read_pattern(self.text, {"key": r"\$molecule\s+([\-\d]+)\s+\d"}, terminate_on_match=True).get(
"key"
)
if temp_charge is not None:
self.data["charge"] = int(temp_charge[0][0])
else:
temp_charge = read_pattern(
self.text,
{"key": r"Sum of atomic charges \=\s+([\d\-\.\+]+)"},
terminate_on_match=True,
).get("key")
if temp_charge is None:
self.data["charge"] = None
else:
self.data["charge"] = int(float(temp_charge[0][0]))
temp_multiplicity = read_pattern(
self.text, {"key": r"\$molecule\s+[\-\d]+\s+(\d)"}, terminate_on_match=True
).get("key")
if temp_multiplicity is not None:
self.data["multiplicity"] = int(temp_multiplicity[0][0])
else:
temp_multiplicity = read_pattern(
self.text,
{"key": r"Sum of spin\s+charges \=\s+([\d\-\.\+]+)"},
terminate_on_match=True,
).get("key")
if temp_multiplicity is None:
self.data["multiplicity"] = 1
else:
self.data["multiplicity"] = int(float(temp_multiplicity[0][0])) + 1
def _read_species_and_inital_geometry(self):
"""
Parses species and initial geometry.
"""
header_pattern = r"Standard Nuclear Orientation \(Angstroms\)\s+I\s+Atom\s+X\s+Y\s+Z\s+-+"
table_pattern = r"\s*\d+\s+([a-zA-Z]+)\s*([\d\-\.]+)\s*([\d\-\.]+)\s*([\d\-\.]+)\s*"
footer_pattern = r"\s*-+"
temp_geom = read_table_pattern(self.text, header_pattern, table_pattern, footer_pattern)
if temp_geom is None or len(temp_geom) == 0:
self.data["species"] = None
self.data["initial_geometry"] = None
self.data["initial_molecule"] = None
self.data["point_group"] = None
else:
temp_point_group = read_pattern(
self.text,
{"key": r"Molecular Point Group\s+([A-Za-z\d\*]+)"},
terminate_on_match=True,
).get("key")
if temp_point_group is not None:
self.data["point_group"] = temp_point_group[0][0]
else:
self.data["point_group"] = None
temp_geom = temp_geom[0]
species = []
geometry = np.zeros(shape=(len(temp_geom), 3), dtype=float)
for ii, entry in enumerate(temp_geom):
species += [entry[0]]
for jj in range(3):
if "*" in entry[jj + 1]:
geometry[ii, jj] = 10000000000.0
else:
geometry[ii, jj] = float(entry[jj + 1])
self.data["species"] = species
self.data["initial_geometry"] = geometry
if self.data["charge"] is not None and self.data["multiplicity"] is not None:
self.data["initial_molecule"] = Molecule(
species=species,
coords=geometry,
charge=self.data.get("charge"),
spin_multiplicity=self.data.get("multiplicity"),
)
else:
self.data["initial_molecule"] = None
def _read_SCF(self):
"""
Parses both old and new SCFs.
"""
if self.data.get("using_GEN_SCFMAN", []):
if "SCF_failed_to_converge" in self.data.get("errors"):
footer_pattern = r"^\s*gen_scfman_exception: SCF failed to converge"
else:
footer_pattern = r"^\s*\-+\n\s+SCF time"
header_pattern = (
r"^\s*\-+\s+Cycle\s+Energy\s+(?:(?:DIIS)*\s+[Ee]rror)*(?:RMS Gradient)*\s+\-+"
r"(?:\s*\-+\s+OpenMP\s+Integral\s+computing\s+Module\s+"
r"(?:Release:\s+version\s+[\d\-\.]+\,\s+\w+\s+[\d\-\.]+\, "
r"Q-Chem Inc\. Pittsburgh\s+)*\-+)*\n"
)
table_pattern = (
r"(?:\n[a-zA-Z_\s/]+\.C::(?:WARNING energy changes are now smaller than effective "
r"accuracy\.)*(?:\s+calculation will continue, but THRESH should be increased)*"
r"(?:\s+or SCF_CONVERGENCE decreased\. )*(?:\s+effective_thresh = [\d\-\.]+e[\d\-]+)*)*"
r"(?:\s*Nonlocal correlation = [\d\-\.]+e[\d\-]+)*"
r"(?:\s*Inaccurate integrated density:\n\s+Number of electrons\s+=\s+[\d\-\.]+\n\s+"
r"Numerical integral\s+=\s+[\d\-\.]+\n\s+Relative error\s+=\s+[\d\-\.]+\s+\%\n)*\s*\d+\s+"
r"([\d\-\.]+)\s+([\d\-\.]+)e([\d\-\.\+]+)(?:\s+Convergence criterion met)*"
r"(?:\s+Preconditoned Steepest Descent)*(?:\s+Roothaan Step)*(?:\s+"
r"(?:Normal\s+)*BFGS [Ss]tep)*(?:\s+LineSearch Step)*(?:\s+Line search: overstep)*"
r"(?:\s+Dog-leg BFGS step)*(?:\s+Line search: understep)*"
r"(?:\s+Descent step)*(?:\s+Done DIIS. Switching to GDM)*"
r"(?:\s*\-+\s+Cycle\s+Energy\s+(?:(?:DIIS)*\s+[Ee]rror)*"
r"(?:RMS Gradient)*\s+\-+(?:\s*\-+\s+OpenMP\s+Integral\s+computing\s+Module\s+"
r"(?:Release:\s+version\s+[\d\-\.]+\,\s+\w+\s+[\d\-\.]+\, "
r"Q-Chem Inc\. Pittsburgh\s+)*\-+)*\n)*"
)
else:
if "SCF_failed_to_converge" in self.data.get("errors"):
footer_pattern = r"^\s*\d+\s*[\d\-\.]+\s+[\d\-\.]+E[\d\-\.]+\s+Convergence\s+failure\n"
else:
footer_pattern = r"^\s*\-+\n"
header_pattern = r"^\s*\-+\s+Cycle\s+Energy\s+DIIS Error\s+\-+\n"
table_pattern = (
r"(?:\s*Inaccurate integrated density:\n\s+Number of electrons\s+=\s+[\d\-\.]+\n\s+"
r"Numerical integral\s+=\s+[\d\-\.]+\n\s+Relative error\s+=\s+[\d\-\.]+\s+\%\n)*\s*\d+\s*"
r"([\d\-\.]+)\s+([\d\-\.]+)E([\d\-\.\+]+)(?:\s*\n\s*cpu\s+[\d\-\.]+\swall\s+[\d\-\.]+)*"
r"(?:\nin dftxc\.C, eleTot sum is:[\d\-\.]+, tauTot is\:[\d\-\.]+)*"
r"(?:\s+Convergence criterion met)*(?:\s+Done RCA\. Switching to DIIS)*"
r"(?:\n\s*Warning: not using a symmetric Q)*"
r"(?:\nRecomputing EXC\s*[\d\-\.]+\s*[\d\-\.]+\s*[\d\-\.]+"
r"(?:\s*\nRecomputing EXC\s*[\d\-\.]+\s*[\d\-\.]+\s*[\d\-\.]+)*)*"
)
temp_scf = read_table_pattern(self.text, header_pattern, table_pattern, footer_pattern)
real_scf = []
for one_scf in temp_scf:
temp = np.zeros(shape=(len(one_scf), 2))
for ii, entry in enumerate(one_scf):
temp[ii, 0] = float(entry[0])
temp[ii, 1] = float(entry[1]) * 10 ** float(entry[2])
real_scf += [temp]
self.data["SCF"] = real_scf
temp_thresh_warning = read_pattern(
self.text,
{
"key": r"\n[a-zA-Z_\s/]+\.C::WARNING energy changes are now smaller than effective accuracy"
r"\.\n[a-zA-Z_\s/]+\.C::\s+calculation will continue, but THRESH should be increased\n"
r"[a-zA-Z_\s/]+\.C::\s+or SCF_CONVERGENCE decreased\. \n"
r"[a-zA-Z_\s/]+\.C::\s+effective_thresh = ([\d\-\.]+e[\d\-]+)"
},
).get("key")
if temp_thresh_warning is not None:
if len(temp_thresh_warning) == 1:
self.data["warnings"]["thresh"] = float(temp_thresh_warning[0][0])
else:
thresh_warning = np.zeros(len(temp_thresh_warning))
for ii, entry in enumerate(temp_thresh_warning):
thresh_warning[ii] = float(entry[0])
self.data["warnings"]["thresh"] = thresh_warning
temp_SCF_energy = read_pattern(self.text, {"key": r"SCF energy in the final basis set =\s*([\d\-\.]+)"}).get(
"key"
)
if temp_SCF_energy is not None:
if len(temp_SCF_energy) == 1:
self.data["SCF_energy_in_the_final_basis_set"] = float(temp_SCF_energy[0][0])
else:
SCF_energy = np.zeros(len(temp_SCF_energy))
for ii, val in enumerate(temp_SCF_energy):
SCF_energy[ii] = float(val[0])
self.data["SCF_energy_in_the_final_basis_set"] = SCF_energy
temp_Total_energy = read_pattern(
self.text, {"key": r"Total energy in the final basis set =\s*([\d\-\.]+)"}
).get("key")
if temp_Total_energy is not None:
if len(temp_Total_energy) == 1:
self.data["Total_energy_in_the_final_basis_set"] = float(temp_Total_energy[0][0])
else:
Total_energy = np.zeros(len(temp_Total_energy))
for ii, val in enumerate(temp_Total_energy):
Total_energy[ii] = float(val[0])
self.data["Total_energy_in_the_final_basis_set"] = Total_energy
def _read_charges(self):
"""
Parses Mulliken/ESP/RESP charges. Also parses spins given an unrestricted SCF.
"""
if self.data.get("unrestricted", []):
header_pattern = (
r"\-+\s+Ground-State Mulliken Net Atomic Charges\s+Atom\s+Charge \(a\.u\.\)\s+"
r"Spin\s\(a\.u\.\)\s+\-+"
)
table_pattern = r"\s+\d+\s\w+\s+([\d\-\.]+)\s+([\d\-\.]+)"
footer_pattern = r"\s\s\-+\s+Sum of atomic charges"
else:
header_pattern = r"\-+\s+Ground-State Mulliken Net Atomic Charges\s+Atom\s+Charge \(a\.u\.\)\s+\-+"
table_pattern = r"\s+\d+\s\w+\s+([\d\-\.]+)"
footer_pattern = r"\s\s\-+\s+Sum of atomic charges"
temp_mulliken = read_table_pattern(self.text, header_pattern, table_pattern, footer_pattern)
real_mulliken = []
for one_mulliken in temp_mulliken:
if self.data.get("unrestricted", []):
temp = np.zeros(shape=(len(one_mulliken), 2))
for ii, entry in enumerate(one_mulliken):
temp[ii, 0] = float(entry[0])
temp[ii, 1] = float(entry[1])
else:
temp = np.zeros(len(one_mulliken))
for ii, entry in enumerate(one_mulliken):
temp[ii] = float(entry[0])
real_mulliken += [temp]
self.data["Mulliken"] = real_mulliken
# Check for ESP/RESP charges
esp_or_resp = read_pattern(self.text, {"key": r"Merz-Kollman (R?ESP) Net Atomic Charges"}).get("key")
if esp_or_resp is not None:
header_pattern = r"Merz-Kollman (R?ESP) Net Atomic Charges\s+Atom\s+Charge \(a\.u\.\)\s+\-+"
table_pattern = r"\s+\d+\s\w+\s+([\d\-\.]+)"
footer_pattern = r"\s\s\-+\s+Sum of atomic charges"
temp_esp_or_resp = read_table_pattern(self.text, header_pattern, table_pattern, footer_pattern)
real_esp_or_resp = []
for one_entry in temp_esp_or_resp:
temp = np.zeros(len(one_entry))
for ii, entry in enumerate(one_entry):
temp[ii] = float(entry[0])
real_esp_or_resp += [temp]
self.data[esp_or_resp[0][0]] = real_esp_or_resp
def _detect_general_warnings(self):
# Check for inaccurate integrated density
temp_inac_integ = read_pattern(
self.text,
{
"key": r"Inaccurate integrated density:\n\s+Number of electrons\s+=\s+([\d\-\.]+)\n\s+"
r"Numerical integral\s+=\s+([\d\-\.]+)\n\s+Relative error\s+=\s+([\d\-\.]+)\s+\%\n"
},
).get("key")
if temp_inac_integ is not None:
inaccurate_integrated_density = np.zeros(shape=(len(temp_inac_integ), 3))
for ii, entry in enumerate(temp_inac_integ):
for jj, val in enumerate(entry):
inaccurate_integrated_density[ii][jj] = float(val)
self.data["warnings"]["inaccurate_integrated_density"] = inaccurate_integrated_density
# Check for an MKL error
if read_pattern(self.text, {"key": r"Intel MKL ERROR"}, terminate_on_match=True).get("key") == [[]]:
self.data["warnings"]["mkl"] = True
# Check if the job is being hindered by a lack of analytical derivatives
if (
read_pattern(
self.text,
{"key": r"Starting finite difference calculation for IDERIV"},
terminate_on_match=True,
).get("key")
== [[]]
):
self.data["warnings"]["missing_analytical_derivates"] = True
# Check if the job is complaining about MO files of inconsistent size
if (
read_pattern(
self.text,
{"key": r"Inconsistent size for SCF MO coefficient file"},
terminate_on_match=True,
).get("key")
== [[]]
):
self.data["warnings"]["inconsistent_size"] = True
# Check for AO linear depend
if (
read_pattern(
self.text,
{"key": r"Linear dependence detected in AO basis"},
terminate_on_match=True,
).get("key")
== [[]]
):
self.data["warnings"]["linear_dependence"] = True
# Check for Hessian without desired local structure
if (
read_pattern(
self.text,
{"key": r"\*\*WARNING\*\* Hessian does not have the Desired Local Structure"},
terminate_on_match=True,
).get("key")
== [[]]
):
self.data["warnings"]["hessian_local_structure"] = True
# Check if GetCART cycle iterations ever exceeded
if (
read_pattern(
self.text,
{"key": r"\*\*\*ERROR\*\*\* Exceeded allowed number of iterative cycles in GetCART"},
terminate_on_match=True,
).get("key")
== [[]]
):
self.data["warnings"]["GetCART_cycles"] = True
# Check for problems with internal coordinates
if (
read_pattern(
self.text,
{"key": r"\*\*WARNING\*\* Problems with Internal Coordinates"},
terminate_on_match=True,
).get("key")
== [[]]
):
self.data["warnings"]["internal_coordinates"] = True
# Check for problem with eigenvalue magnitude
if (
read_pattern(
self.text,
{"key": r"\*\*WARNING\*\* Magnitude of eigenvalue"},
terminate_on_match=True,
).get("key")
== [[]]
):
self.data["warnings"]["eigenvalue_magnitude"] = True
# Check for problem with hereditary postivive definiteness
if (
read_pattern(
self.text,
{"key": r"\*\*WARNING\*\* Hereditary positive definiteness endangered"},
terminate_on_match=True,
).get("key")
== [[]]
):
self.data["warnings"]["positive_definiteness_endangered"] = True
# Check if there were problems with a colinear bend
if (
read_pattern(
self.text,
{
"key": r"\*\*\*ERROR\*\*\* Angle[\s\d]+is near\-linear\s+"
r"But No atom available to define colinear bend"
},
terminate_on_match=True,
).get("key")
== [[]]
):
self.data["warnings"]["colinear_bend"] = True
# Check if there were problems diagonalizing B*B(t)
if (
read_pattern(
self.text,
{"key": r"\*\*\*ERROR\*\*\* Unable to Diagonalize B\*B\(t\) in <MakeNIC>"},
terminate_on_match=True,
).get("key")
== [[]]
):
self.data["warnings"]["diagonalizing_BBt"] = True
def _read_geometries(self):
"""
Parses all geometries from an optimization trajectory.
"""
geoms = []
header_pattern = r"\s+Optimization\sCycle:\s+\d+\s+Coordinates \(Angstroms\)\s+ATOM\s+X\s+Y\s+Z"
table_pattern = r"\s+\d+\s+\w+\s+([\d\-\.]+)\s+([\d\-\.]+)\s+([\d\-\.]+)"
footer_pattern = r"\s+Point Group\:\s+[\d\w\*]+\s+Number of degrees of freedom\:\s+\d+"
parsed_geometries = read_table_pattern(self.text, header_pattern, table_pattern, footer_pattern)
for ii, parsed_geometry in enumerate(parsed_geometries):
if not parsed_geometry:
geoms.append(None)
else:
geoms.append(process_parsed_coords(parsed_geometry))
self.data["geometries"] = geoms
self.data["last_geometry"] = geoms[-1]
if self.data.get("charge") is not None:
self.data["molecule_from_last_geometry"] = Molecule(
species=self.data.get("species"),
coords=self.data.get("last_geometry"),
charge=self.data.get("charge"),
spin_multiplicity=self.data.get("multiplicity"),
)
# Parses optimized XYZ coordinates. If not present, parses optimized Z-matrix.
header_pattern = r"\*+\s+OPTIMIZATION\s+CONVERGED\s+\*+\s+\*+\s+Coordinates \(Angstroms\)\s+ATOM\s+X\s+Y\s+Z"
table_pattern = r"\s+\d+\s+\w+\s+([\d\-\.]+)\s+([\d\-\.]+)\s+([\d\-\.]+)"
footer_pattern = r"\s+Z-matrix Print:"
parsed_optimized_geometries = read_table_pattern(self.text, header_pattern, table_pattern, footer_pattern)
if not parsed_optimized_geometries:
self.data["optimized_geometry"] = None
header_pattern = (
r"^\s+\*+\s+OPTIMIZATION CONVERGED\s+\*+\s+\*+\s+Z-matrix\s+"
r"Print:\s+\$molecule\s+[\d\-]+\s+[\d\-]+\n"
)
table_pattern = (
r"\s*(\w+)(?:\s+(\d+)\s+([\d\-\.]+)(?:\s+(\d+)\s+([\d\-\.]+)"
r"(?:\s+(\d+)\s+([\d\-\.]+))*)*)*(?:\s+0)*"
)
footer_pattern = r"^\$end\n"
self.data["optimized_zmat"] = read_table_pattern(self.text, header_pattern, table_pattern, footer_pattern)
else:
self.data["optimized_geometry"] = process_parsed_coords(parsed_optimized_geometries[0])
self.data["optimized_geometries"] = [process_parsed_coords(i) for i in parsed_optimized_geometries]
if self.data.get("charge") is not None:
self.data["molecule_from_optimized_geometry"] = Molecule(
species=self.data.get("species"),
coords=self.data.get("optimized_geometry"),
charge=self.data.get("charge"),
spin_multiplicity=self.data.get("multiplicity"),
)
self.data["molecules_from_optimized_geometries"] = list()
for geom in self.data["optimized_geometries"]:
mol = Molecule(
species=self.data.get("species"),
coords=geom,
charge=self.data.get("charge"),
spin_multiplicity=self.data.get("multiplicity"),
)
self.data["molecules_from_optimized_geometries"].append(mol)
def _get_grad_format_length(self, header):
"""
Determines the maximum number of gradient entries printed on a line,
which changes for different versions of Q-Chem
"""
found_end = False
index = 1
pattern = header
while not found_end:
if read_pattern(self.text, {"key": pattern}, terminate_on_match=True).get("key") != [[]]:
found_end = True
else:
pattern = pattern + r"\s+" + str(index)
index += 1
return index - 2
def _read_gradients(self):
"""
Parses all gradients obtained during an optimization trajectory
"""
grad_header_pattern = r"Gradient of (?:SCF)?(?:MP2)? Energy(?: \(in au\.\))?"
footer_pattern = r"(?:Max gradient component|Gradient time)"
grad_format_length = self._get_grad_format_length(grad_header_pattern)
grad_table_pattern = (
r"(?:\s+\d+(?:\s+\d+)?(?:\s+\d+)?(?:\s+\d+)?(?:\s+\d+)?(?:\s+\d+)?)?\n\s\s\s\s[1-3]\s*" r"(\-?[\d\.]{9,12})"
)
if grad_format_length > 1:
for ii in range(1, grad_format_length):
grad_table_pattern = grad_table_pattern + r"(?:\s*(\-?[\d\.]{9,12}))?"
parsed_gradients = read_table_pattern(self.text, grad_header_pattern, grad_table_pattern, footer_pattern)
sorted_gradients = np.zeros(shape=(len(parsed_gradients), len(self.data["initial_molecule"]), 3))
for ii, grad in enumerate(parsed_gradients):
for jj in range(int(len(grad) / 3)):
for kk in range(grad_format_length):
if grad[jj * 3][kk] != "None":
sorted_gradients[ii][jj * grad_format_length + kk][0] = grad[jj * 3][kk]
sorted_gradients[ii][jj * grad_format_length + kk][1] = grad[jj * 3 + 1][kk]
sorted_gradients[ii][jj * grad_format_length + kk][2] = grad[jj * 3 + 2][kk]
self.data["gradients"] = sorted_gradients
if self.data["solvent_method"] is not None:
header_pattern = r"total gradient after adding PCM contribution --\s+-+\s+Atom\s+X\s+Y\s+Z\s+-+"
table_pattern = r"\s+\d+\s+([\d\-\.]+)\s+([\d\-\.]+)\s+([\d\-\.]+)\s"
footer_pattern = r"-+"
parsed_gradients = read_table_pattern(self.text, header_pattern, table_pattern, footer_pattern)
pcm_gradients = np.zeros(shape=(len(parsed_gradients), len(self.data["initial_molecule"]), 3))
for ii, grad in enumerate(parsed_gradients):
for jj, entry in enumerate(grad):
for kk, val in enumerate(entry):
pcm_gradients[ii][jj][kk] = float(val)
self.data["pcm_gradients"] = pcm_gradients
else:
self.data["pcm_gradients"] = None
if read_pattern(self.text, {"key": r"Gradient of CDS energy"}, terminate_on_match=True).get("key") == [[]]:
header_pattern = r"Gradient of CDS energy"
parsed_gradients = read_table_pattern(self.text, header_pattern, grad_table_pattern, grad_header_pattern)
sorted_gradients = np.zeros(shape=(len(parsed_gradients), len(self.data["initial_molecule"]), 3))
for ii, grad in enumerate(parsed_gradients):
for jj in range(int(len(grad) / 3)):
for kk in range(grad_format_length):
if grad[jj * 3][kk] != "None":
sorted_gradients[ii][jj * grad_format_length + kk][0] = grad[jj * 3][kk]
sorted_gradients[ii][jj * grad_format_length + kk][1] = grad[jj * 3 + 1][kk]
sorted_gradients[ii][jj * grad_format_length + kk][2] = grad[jj * 3 + 2][kk]
self.data["CDS_gradients"] = sorted_gradients
else:
self.data["CDS_gradients"] = None
def _read_optimization_data(self):
temp_energy_trajectory = read_pattern(self.text, {"key": r"\sEnergy\sis\s+([\d\-\.]+)"}).get("key")
if temp_energy_trajectory is None:
self.data["energy_trajectory"] = []
else:
real_energy_trajectory = np.zeros(len(temp_energy_trajectory))
for ii, entry in enumerate(temp_energy_trajectory):
real_energy_trajectory[ii] = float(entry[0])
self.data["energy_trajectory"] = real_energy_trajectory
self._read_geometries()
if have_babel:
self.data["structure_change"] = check_for_structure_changes(
self.data["initial_molecule"],
self.data["molecule_from_last_geometry"],
)
self._read_gradients()
# Then, if no optimized geometry or z-matrix is found, and no errors have been previously
# idenfied, check to see if the optimization failed to converge or if Lambda wasn't able
# to be determined.
if (
len(self.data.get("errors")) == 0
and self.data.get("optimized_geometry") is None
and len(self.data.get("optimized_zmat")) == 0
):
if (
read_pattern(
self.text,
{"key": r"MAXIMUM OPTIMIZATION CYCLES REACHED"},
terminate_on_match=True,
).get("key")
== [[]]
):
self.data["errors"] += ["out_of_opt_cycles"]
elif (
read_pattern(
self.text,
{"key": r"UNABLE TO DETERMINE Lamda IN FormD"},
terminate_on_match=True,
).get("key")
== [[]]
):
self.data["errors"] += ["unable_to_determine_lamda"]
def _read_frequency_data(self):
"""
Parses frequencies, enthalpy, entropy, and mode vectors.
"""
raman = False
if read_pattern(self.text, {"key": r"doraman\s*(?:=)*\s*true"}, terminate_on_match=True).get("key") == [[]]:
raman = True
temp_dict = read_pattern(
self.text,
{
"frequencies": r"\s*Frequency:\s+(\-?[\d\.\*]+)(?:\s+(\-?[\d\.\*]+)(?:\s+(\-?[\d\.\*]+))*)*",
"trans_dip": r"TransDip\s+(\-?[\d\.]{5,7}|\*{5,7})\s*(\-?[\d\.]{5,7}|\*{5,7})"
r"\s*(\-?[\d\.]{5,7}|\*{5,7})\s*"
r"(?:(\-?[\d\.]{5,7}|\*{5,7})\s*(\-?[\d\.]{5,7}|\*{5,7})\s*(\-?[\d\.]{5,7}|\*{5,7})\s*"
r"(?:(\-?[\d\.]{5,7}|\*{5,7})\s*(\-?[\d\.]{5,7}|\*{5,7})\s*(\-?[\d\.]{5,7}|\*{5,7}))*)*",
"IR_intens": r"\s*IR Intens:\s*(\-?[\d\.\*]+)(?:\s+(\-?[\d\.\*]+)(?:\s+(\-?[\d\.\*]+))*)*",
"IR_active": r"\s*IR Active:\s+([YESNO]+)(?:\s+([YESNO]+)(?:\s+([YESNO]+))*)*",
"raman_intens": r"\s*Raman Intens:\s*(\-?[\d\.\*]+)(?:\s+(\-?[\d\.\*]+)(?:\s+(\-?[\d\.\*]+))*)*",
"depolar": r"\s*Depolar:\s*(\-?[\d\.\*]+)(?:\s+(\-?[\d\.\*]+)(?:\s+(\-?[\d\.\*]+))*)*",
"raman_active": r"\s*Raman Active:\s+([YESNO]+)(?:\s+([YESNO]+)(?:\s+([YESNO]+))*)*",
"ZPE": r"\s*Zero point vibrational energy:\s+([\d\-\.]+)\s+kcal/mol",
"trans_enthalpy": r"\s*Translational Enthalpy:\s+([\d\-\.]+)\s+kcal/mol",
"rot_enthalpy": r"\s*Rotational Enthalpy:\s+([\d\-\.]+)\s+kcal/mol",
"vib_enthalpy": r"\s*Vibrational Enthalpy:\s+([\d\-\.]+)\s+kcal/mol",
"gas_constant": r"\s*gas constant \(RT\):\s+([\d\-\.]+)\s+kcal/mol",
"trans_entropy": r"\s*Translational Entropy:\s+([\d\-\.]+)\s+cal/mol\.K",
"rot_entropy": r"\s*Rotational Entropy:\s+([\d\-\.]+)\s+cal/mol\.K",
"vib_entropy": r"\s*Vibrational Entropy:\s+([\d\-\.]+)\s+cal/mol\.K",
"total_enthalpy": r"\s*Total Enthalpy:\s+([\d\-\.]+)\s+kcal/mol",
"total_entropy": r"\s*Total Entropy:\s+([\d\-\.]+)\s+cal/mol\.K",
},
)
keys = [
"ZPE",
"trans_enthalpy",
"rot_enthalpy",
"vib_enthalpy",
"gas_constant",
"trans_entropy",
"rot_entropy",
"vib_entropy",
"total_enthalpy",
"total_entropy",
]
for key in keys:
if temp_dict.get(key) is None:
self.data[key] = None
else:
self.data[key] = float(temp_dict.get(key)[0][0])
if temp_dict.get("frequencies") is None:
self.data["frequencies"] = None
self.data["IR_intens"] = None
self.data["IR_active"] = None
self.data["raman_intens"] = None
self.data["raman_active"] = None
self.data["depolar"] = None
self.data["trans_dip"] = None
else:
temp_freqs = [value for entry in temp_dict.get("frequencies") for value in entry]
temp_IR_intens = [value for entry in temp_dict.get("IR_intens") for value in entry]
IR_active = [value for entry in temp_dict.get("IR_active") for value in entry]
temp_trans_dip = [value for entry in temp_dict.get("trans_dip") for value in entry]
self.data["IR_active"] = IR_active
if raman:
raman_active = [value for entry in temp_dict.get("raman_active") for value in entry]
temp_raman_intens = [value for entry in temp_dict.get("raman_intens") for value in entry]
temp_depolar = [value for entry in temp_dict.get("depolar") for value in entry]
self.data["raman_active"] = raman_active
raman_intens = np.zeros(len(temp_raman_intens) - temp_raman_intens.count("None"))
for ii, entry in enumerate(temp_raman_intens):
if entry != "None":
if "*" in entry:
raman_intens[ii] = float("inf")
else:
raman_intens[ii] = float(entry)
self.data["raman_intens"] = raman_intens
depolar = np.zeros(len(temp_depolar) - temp_depolar.count("None"))
for ii, entry in enumerate(temp_depolar):
if entry != "None":
if "*" in entry:
depolar[ii] = float("inf")
else:
depolar[ii] = float(entry)
self.data["depolar"] = depolar
else:
self.data["raman_intens"] = None
self.data["raman_active"] = None
self.data["depolar"] = None
trans_dip = np.zeros(shape=(int((len(temp_trans_dip) - temp_trans_dip.count("None")) / 3), 3))
for ii, entry in enumerate(temp_trans_dip):
if entry != "None":
if "*" in entry:
trans_dip[int(ii / 3)][ii % 3] = float("inf")
else:
trans_dip[int(ii / 3)][ii % 3] = float(entry)
self.data["trans_dip"] = trans_dip
freqs = np.zeros(len(temp_freqs) - temp_freqs.count("None"))
for ii, entry in enumerate(temp_freqs):
if entry != "None":
if "*" in entry:
if ii == 0:
freqs[ii] = -float("inf")
elif ii == len(freqs) - 1:
freqs[ii] = float("inf")
elif freqs[ii - 1] == -float("inf"):
freqs[ii] = -float("inf")
elif "*" in temp_freqs[ii + 1]:
freqs[ii] = float("inf")
else:
raise RuntimeError(
"ERROR: Encountered an undefined frequency not at the beginning or end of the "
"frequency list, which makes no sense! Exiting..."
)
if not self.data.get("completion", []):
if "undefined_frequency" not in self.data["errors"]:
self.data["errors"] += ["undefined_frequency"]
else:
if "undefined_frequency" not in self.data["warnings"]:
self.data["warnings"]["undefined_frequency"] = True
else:
freqs[ii] = float(entry)
self.data["frequencies"] = freqs
IR_intens = np.zeros(len(temp_IR_intens) - temp_IR_intens.count("None"))
for ii, entry in enumerate(temp_IR_intens):
if entry != "None":
if "*" in entry:
IR_intens[ii] = float("inf")
else:
IR_intens[ii] = float(entry)
self.data["IR_intens"] = IR_intens
if not raman:
header_pattern = r"\s*Raman Active:\s+[YESNO]+\s+(?:[YESNO]+\s+)*X\s+Y\s+Z\s+(?:X\s+Y\s+Z\s+)*"
else:
header_pattern = r"\s*Depolar:\s*\-?[\d\.\*]+\s+(?:\-?[\d\.\*]+\s+)*X\s+Y\s+Z\s+(?:X\s+Y\s+Z\s+)*"
table_pattern = (
r"\s*[a-zA-Z][a-zA-Z\s]\s*([\d\-\.]+)\s*([\d\-\.]+)\s*([\d\-\.]+)\s*(?:([\d\-\.]+)\s*"
r"([\d\-\.]+)\s*([\d\-\.]+)\s*(?:([\d\-\.]+)\s*([\d\-\.]+)\s*([\d\-\.]+))*)*"
)
footer_pattern = (
r"TransDip\s+\-?[\d\.\*]+\s*\-?[\d\.\*]+\s*\-?[\d\.\*]+\s*(?:\-?[\d\.\*]+\s*\-?"
r"[\d\.\*]+\s*\-?[\d\.\*]+\s*)*"
)
temp_freq_mode_vecs = read_table_pattern(self.text, header_pattern, table_pattern, footer_pattern)
freq_mode_vecs = np.zeros(shape=(len(freqs), len(temp_freq_mode_vecs[0]), 3))
for ii, triple_FMV in enumerate(temp_freq_mode_vecs):
for jj, line in enumerate(triple_FMV):
for kk, entry in enumerate(line):
if entry != "None":
freq_mode_vecs[int(ii * 3 + math.floor(kk / 3)), jj, kk % 3] = float(entry)
self.data["frequency_mode_vectors"] = freq_mode_vecs
freq_length = len(self.data["frequencies"])
if (
len(self.data["frequency_mode_vectors"]) != freq_length
or len(self.data["IR_intens"]) != freq_length
or len(self.data["IR_active"]) != freq_length
):
self.data["warnings"]["frequency_length_inconsistency"] = True
def _read_single_point_data(self):
"""
Parses final free energy information from single-point calculations.
"""
temp_dict = read_pattern(
self.text,
{"final_energy": r"\s*Total\s+energy in the final basis set\s+=\s*([\d\-\.]+)"},
)
if temp_dict.get("final_energy") is None:
self.data["final_energy"] = None
else:
# -1 in case of pcm
# Two lines will match the above; we want final calculation
self.data["final_energy"] = float(temp_dict.get("final_energy")[-1][0])
def _read_force_data(self):
self._read_gradients()
def _read_scan_data(self):
temp_energy_trajectory = read_pattern(self.text, {"key": r"\sEnergy\sis\s+([\d\-\.]+)"}).get("key")
if temp_energy_trajectory is None:
self.data["energy_trajectory"] = []
else:
real_energy_trajectory = np.zeros(len(temp_energy_trajectory))
for ii, entry in enumerate(temp_energy_trajectory):
real_energy_trajectory[ii] = float(entry[0])
self.data["energy_trajectory"] = real_energy_trajectory
self._read_geometries()
self._read_gradients()
if len(self.data.get("errors")) == 0:
if read_pattern(self.text, {"key": r"MAXIMUM OPTIMIZATION CYCLES REACHED"}, terminate_on_match=True).get(
"key"
) == [[]]:
self.data["errors"] += ["out_of_opt_cycles"]
elif read_pattern(self.text, {"key": r"UNABLE TO DETERMINE Lamda IN FormD"}, terminate_on_match=True).get(
"key"
) == [[]]:
self.data["errors"] += ["unable_to_determine_lamda"]
header_pattern = r"\s*\-+ Summary of potential scan\: \-+\s*"
row_pattern_single = r"\s*([\-\.0-9]+)\s+([\-\.0-9]+)\s*\n"
row_pattern_double = r"\s*([\-\.0-9]+)\s+([\-\.0-9]+)\s+([\-\.0-9]+)\s*\n"
footer_pattern = r"\s*\-+"
single_data = read_table_pattern(
self.text,
header_pattern=header_pattern,
row_pattern=row_pattern_single,
footer_pattern=footer_pattern,
)
self.data["scan_energies"] = list()
if len(single_data) == 0:
double_data = read_table_pattern(
self.text,
header_pattern=header_pattern,
row_pattern=row_pattern_double,
footer_pattern=footer_pattern,
)
if len(double_data) == 0:
self.data["scan_energies"] = None
else:
for line in double_data[0]:
params = [float(line[0]), float(line[1])]
energy = float(line[2])
self.data["scan_energies"].append({"params": params, "energy": energy})
else:
for line in single_data[0]:
param = float(line[0])
energy = float(line[1])
self.data["scan_energies"].append({"params": param, "energy": energy})
scan_inputs_head = r"\s*\$[Ss][Cc][Aa][Nn]"
scan_inputs_row = r"\s*([Ss][Tt][Rr][Ee]|[Tt][Oo][Rr][Ss]|[Bb][Ee][Nn][Dd]) "
scan_inputs_row += r"((?:[0-9]+\s+)+)([\-\.0-9]+)\s+([\-\.0-9]+)\s+([\-\.0-9]+)\s*"
scan_inputs_foot = r"\s*\$[Ee][Nn][Dd]"
constraints_meta = read_table_pattern(
self.text,
header_pattern=scan_inputs_head,
row_pattern=scan_inputs_row,
footer_pattern=scan_inputs_foot,
)
self.data["scan_variables"] = {"stre": list(), "bend": list(), "tors": list()}
for row in constraints_meta[0]:
var_type = row[0].lower()
self.data["scan_variables"][var_type].append(
{
"atoms": [int(i) for i in row[1].split()],
"start": float(row[2]),
"end": float(row[3]),
"increment": float(row[4]),
}
)
temp_constraint = read_pattern(
self.text,
{"key": r"\s*(Distance\(Angs\)|Angle|Dihedral)\:\s*((?:[0-9]+\s+)+)+([\.0-9]+)\s+([\.0-9]+)"},
).get("key")
self.data["scan_constraint_sets"] = {"stre": list(), "bend": list(), "tors": list()}
if temp_constraint is not None:
for entry in temp_constraint:
atoms = [int(i) for i in entry[1].split()]
current = float(entry[2])
target = float(entry[3])
if entry[0] == "Distance(Angs)":
if len(atoms) == 2:
self.data["scan_constraint_sets"]["stre"].append(
{"atoms": atoms, "current": current, "target": target}
)
elif entry[0] == "Angle":
if len(atoms) == 3:
self.data["scan_constraint_sets"]["bend"].append(
{"atoms": atoms, "current": current, "target": target}
)
elif entry[0] == "Dihedral":
if len(atoms) == 4:
self.data["scan_constraint_sets"]["tors"].append(
{"atoms": atoms, "current": current, "target": target}
)
def _read_pcm_information(self):
"""
Parses information from PCM solvent calculations.
"""
temp_dict = read_pattern(
self.text,
{
"g_electrostatic": r"\s*G_electrostatic\s+=\s+([\d\-\.]+)\s+hartree\s+=\s+([\d\-\.]+)\s+kcal/mol\s*",
"g_cavitation": r"\s*G_cavitation\s+=\s+([\d\-\.]+)\s+hartree\s+=\s+([\d\-\.]+)\s+kcal/mol\s*",
"g_dispersion": r"\s*G_dispersion\s+=\s+([\d\-\.]+)\s+hartree\s+=\s+([\d\-\.]+)\s+kcal/mol\s*",
"g_repulsion": r"\s*G_repulsion\s+=\s+([\d\-\.]+)\s+hartree\s+=\s+([\d\-\.]+)\s+kcal/mol\s*",
"total_contribution_pcm": r"\s*Total\s+=\s+([\d\-\.]+)\s+hartree\s+=\s+([\d\-\.]+)\s+kcal/mol\s*",
"solute_internal_energy": r"Solute Internal Energy \(H0\)\s*=\s*([\d\-\.]+)",
},
)
for key in temp_dict:
if temp_dict.get(key) is None:
self.data["solvent_data"][key] = None
elif len(temp_dict.get(key)) == 1:
self.data["solvent_data"][key] = float(temp_dict.get(key)[0][0])
else:
temp_result = np.zeros(len(temp_dict.get(key)))
for ii, entry in enumerate(temp_dict.get(key)):
temp_result[ii] = float(entry[0])
self.data["solvent_data"][key] = temp_result
smd_keys = ["smd0", "smd3", "smd4", "smd6", "smd9"]
for key in smd_keys:
self.data["solvent_data"][key] = None
def _read_smd_information(self):
"""
Parses information from SMD solvent calculations.
"""
temp_dict = read_pattern(
self.text,
{
"smd0": r"E-EN\(g\) gas\-phase elect\-nuc energy\s*([\d\-\.]+) a\.u\.",
"smd3": r"G\-ENP\(liq\) elect\-nuc\-pol free energy of system\s*([\d\-\.]+) a\.u\.",
"smd4": r"G\-CDS\(liq\) cavity\-dispersion\-solvent structure\s*free energy\s*([\d\-\.]+) kcal\/mol",
"smd6": r"G\-S\(liq\) free energy of system\s*([\d\-\.]+) a\.u\.",
"smd9": r"DeltaG\-S\(liq\) free energy of\s*solvation\s*\(9\) = \(6\) \- \(0\)\s*([\d\-\.]+) kcal\/mol",
},
)
for key in temp_dict:
if temp_dict.get(key) is None:
self.data["solvent_data"][key] = None
elif len(temp_dict.get(key)) == 1:
self.data["solvent_data"][key] = float(temp_dict.get(key)[0][0])
else:
temp_result = np.zeros(len(temp_dict.get(key)))
for ii, entry in enumerate(temp_dict.get(key)):
temp_result[ii] = float(entry[0])
self.data["solvent_data"][key] = temp_result
pcm_keys = [
"g_electrostatic",
"g_cavitation",
"g_dispersion",
"g_repulsion",
"total_contribution_pcm",
"solute_internal_energy",
]
for key in pcm_keys:
self.data["solvent_data"][key] = None
def _read_nbo_data(self):
"""
Parses NBO output
"""
dfs = nbo_parser(self.filename)
nbo_data = {}
for key, value in dfs.items():
nbo_data[key] = [df.to_dict() for df in value]
self.data["nbo_data"] = nbo_data
def _check_completion_errors(self):
"""
Parses potential errors that can cause jobs to crash
"""
if (
read_pattern(
self.text,
{"key": r"Coordinates do not transform within specified threshold"},
terminate_on_match=True,
).get("key")
== [[]]
):
self.data["errors"] += ["failed_to_transform_coords"]
elif (
read_pattern(
self.text,
{"key": r"The Q\-Chem input file has failed to pass inspection"},
terminate_on_match=True,
).get("key")
== [[]]
):
self.data["errors"] += ["input_file_error"]
elif read_pattern(self.text, {"key": r"Error opening input stream"}, terminate_on_match=True).get("key") == [
[]
]:
self.data["errors"] += ["failed_to_read_input"]
elif (
read_pattern(
self.text,
{"key": r"FileMan error: End of file reached prematurely"},
terminate_on_match=True,
).get("key")
== [[]]
):
self.data["errors"] += ["premature_end_FileMan_error"]
elif (
read_pattern(
self.text,
{"key": r"need to increase the array of NLebdevPts"},
terminate_on_match=True,
).get("key")
== [[]]
):
self.data["errors"] += ["NLebdevPts"]
elif read_pattern(self.text, {"key": r"method not available"}, terminate_on_match=True).get("key") == [[]]:
self.data["errors"] += ["method_not_available"]
elif (
read_pattern(
self.text,
{"key": r"Could not find \$molecule section in ParseQInput"},
terminate_on_match=True,
).get("key")
== [[]]
):
self.data["errors"] += ["read_molecule_error"]
elif read_pattern(self.text, {"key": r"Welcome to Q-Chem"}, terminate_on_match=True).get("key") != [[]]:
self.data["errors"] += ["never_called_qchem"]
elif (
read_pattern(
self.text,
{"key": r"\*\*\*ERROR\*\*\* Hessian Appears to have all zero or negative eigenvalues"},
terminate_on_match=True,
).get("key")
== [[]]
):
self.data["errors"] += ["hessian_eigenvalue_error"]
elif read_pattern(self.text, {"key": r"FlexNet Licensing error"}, terminate_on_match=True).get("key") == [[]]:
self.data["errors"] += ["licensing_error"]
elif read_pattern(self.text, {"key": r"Unable to validate license"}, terminate_on_match=True).get("key") == [
[]
]:
self.data["errors"] += ["licensing_error"]
elif (
read_pattern(
self.text,
{"key": r"Could not open driver file in ReadDriverFromDisk"},
terminate_on_match=True,
).get("key")
== [[]]
):
self.data["errors"] += ["driver_error"]
elif (
read_pattern(
self.text,
{"key": r"Basis not supported for the above atom"},
terminate_on_match=True,
).get("key")
== [[]]
):
self.data["errors"] += ["basis_not_supported"]
elif (
read_pattern(
self.text,
{"key": r"gen_scfman_exception: GDM:: Zero or negative preconditioner scaling factor"},
terminate_on_match=True,
).get("key")
== [[]]
):
self.data["errors"] += ["gdm_neg_precon_error"]
else:
tmp_failed_line_searches = read_pattern(
self.text,
{"key": r"\d+\s+failed line searches\.\s+Resetting"},
terminate_on_match=False,
).get("key")
if tmp_failed_line_searches is not None:
if len(tmp_failed_line_searches) > 10:
self.data["errors"] += ["SCF_failed_to_converge"]
if self.data.get("errors") == []:
self.data["errors"] += ["unknown_error"]
def as_dict(self):
"""
Returns:
MSONAble dict.
"""
d = {}
d["data"] = self.data
d["text"] = self.text
d["filename"] = self.filename
return jsanitize(d, strict=True)
def check_for_structure_changes(mol1: Molecule, mol2: Molecule) -> str:
"""
Compares connectivity of two molecules (using MoleculeGraph w/ OpenBabelNN).
This function will work with two molecules with different atom orderings,
but for proper treatment, atoms should be listed in the same order.
Possible outputs include:
- no_change: the bonding in the two molecules is identical
- unconnected_fragments: the MoleculeGraph of mol1 is connected, but the
MoleculeGraph is mol2 is not connected
- fewer_bonds: the MoleculeGraph of mol1 has more bonds (edges) than the
MoleculeGraph of mol2
- more_bonds: the MoleculeGraph of mol2 has more bonds (edges) than the
MoleculeGraph of mol1
- bond_change: this case catches any other non-identical MoleculeGraphs
Args:
mol1: Pymatgen Molecule object to be compared.
mol2: Pymatgen Molecule object to be compared.
Returns:
One of ["unconnected_fragments", "fewer_bonds", "more_bonds",
"bond_change", "no_change"]
"""
special_elements = ["Li", "Na", "Mg", "Ca", "Zn"]
mol_list = [copy.deepcopy(mol1), copy.deepcopy(mol2)]
if mol1.composition != mol2.composition:
raise RuntimeError("Molecules have different compositions! Exiting...")
for ii, site in enumerate(mol1):
if site.specie.symbol != mol2[ii].specie.symbol:
warnings.warn(
"Comparing molecules with different atom ordering! "
"Turning off special treatment for coordinating metals."
)
special_elements = []
special_sites: List[List] = [[], []]
for ii, mol in enumerate(mol_list):
for jj, site in enumerate(mol):
if site.specie.symbol in special_elements:
distances = [[kk, site.distance(other_site)] for kk, other_site in enumerate(mol)]
special_sites[ii].append([jj, site, distances])
for jj, site in enumerate(mol):
if site.specie.symbol in special_elements:
mol.__delitem__(jj)
# Can add logic to check the distances in the future if desired
initial_mol_graph = MoleculeGraph.with_local_env_strategy(mol_list[0], OpenBabelNN())
initial_graph = initial_mol_graph.graph
last_mol_graph = MoleculeGraph.with_local_env_strategy(mol_list[1], OpenBabelNN())
last_graph = last_mol_graph.graph
if initial_mol_graph.isomorphic_to(last_mol_graph):
return "no_change"
if nx.is_connected(initial_graph.to_undirected()) and not nx.is_connected(last_graph.to_undirected()):
return "unconnected_fragments"
if last_graph.number_of_edges() < initial_graph.number_of_edges():
return "fewer_bonds"
if last_graph.number_of_edges() > initial_graph.number_of_edges():
return "more_bonds"
return "bond_change"
def jump_to_header(lines: List[str], header: str) -> List[str]:
"""
Given a list of lines, truncate the start of the list so that the first line
of the new list contains the header.
Args:
lines: List of lines.
header: Substring to match.
Returns:
Truncated lines.
Raises:
RuntimeError
"""
# Search for the header
for i, line in enumerate(lines):
if header in line.strip():
return lines[i:]
# Search failed
raise RuntimeError(f"Header {header} could not be found in the lines.")
def get_percentage(line: str, orbital: str) -> str:
"""
Retrieve the percent character of an orbital.
Args:
line: Line containing orbital and percentage.
orbital: Type of orbital (s, p, d, f).
Returns:
Percentage of character.
Raises:
n/a
"""
# Locate orbital in line
index = line.find(orbital)
line = line[index:]
# Locate the first open bracket
index = line.find("(")
line = line[index:]
# Isolate the percentage
return line[1:7].strip()
def z_int(string: str) -> int:
"""
Convert string to integer.
If string empty, return -1.
Args:
string: Input to be cast to int.
Returns:
Int representation.
Raises:
n/a
"""
try:
return int(string)
except ValueError:
return -1
def parse_natural_populations(lines: List[str]) -> List[pd.DataFrame]:
"""
Parse the natural populations section of NBO output.
Args:
lines: QChem output lines.
Returns:
Data frame of formatted output.
Raises:
RuntimeError
"""
no_failures = True
pop_dfs = []
while no_failures:
# Natural populations
try:
lines = jump_to_header(lines, "Summary of Natural Population Analysis:")
except RuntimeError:
no_failures = False
if no_failures:
# Jump to column names
lines = lines[4:]
columns = lines[0].split()
# Jump to values
lines = lines[2:]
data = []
for line in lines:
# Termination condition
if "=" in line:
break
# Extract the values
values = line.split()
if len(values[0]) > 2:
values.insert(0, values[0][0:-3])
values[1] = values[1][-3:]
data.append(
[
str(values[0]),
int(values[1]),
float(values[2]),
float(values[3]),
float(values[4]),
float(values[5]),
float(values[6]),
]
)
if len(columns) == 8:
data[-1].append(float(values[7]))
# Store values in a dataframe
pop_dfs.append(pd.DataFrame(data=data, columns=columns))
return pop_dfs
def parse_hybridization_character(lines: List[str]) -> List[pd.DataFrame]:
"""
Parse the hybridization character section of NBO output.
Args:
lines: QChem output lines.
Returns:
Data frames of formatted output.
Raises:
RuntimeError
"""
# Orbitals
orbitals = ["s", "p", "d", "f"]
no_failures = True
lp_and_bd_dfs = []
while no_failures:
# NBO Analysis
try:
lines = jump_to_header(lines, "(Occupancy) Bond orbital/ Coefficients/ Hybrids")
except RuntimeError:
no_failures = False
if no_failures:
# Jump to values
lines = lines[2:]
# Save the data for different types of orbitals
lp_data = []
bd_data = []
# Iterate over the lines
i = -1
while True:
i += 1
line = lines[i]
# Termination conditions
if "NHO DIRECTIONALITY AND BOND BENDING" in line:
break
if "Archival summary:" in line:
break
# Lone pair
if "LP" in line or "LV" in line:
LPentry = {orbital: 0.0 for orbital in orbitals} # type: Dict[str, Union[str, int, float]]
LPentry["bond index"] = line[0:4].strip()
LPentry["occupancy"] = line[7:14].strip()
LPentry["type"] = line[16:19].strip()
LPentry["orbital index"] = line[20:22].strip()
LPentry["atom symbol"] = line[23:25].strip()
LPentry["atom number"] = line[25:28].strip()
# Populate the orbital percentages
for orbital in orbitals:
if orbital in line:
LPentry[orbital] = get_percentage(line, orbital)
# Move one line down
i += 1
line = lines[i]
# Populate the orbital percentages
for orbital in orbitals:
if orbital in line:
LPentry[orbital] = get_percentage(line, orbital)
# Save the entry
lp_data.append(LPentry)
# Bonding
if "BD" in line:
BDentry = {
f"atom {i} {orbital}": 0.0 for orbital in orbitals for i in range(1, 3)
} # type: Dict[str, Union[str, int, float]]
BDentry["bond index"] = line[0:4].strip()
BDentry["occupancy"] = line[7:14].strip()
BDentry["type"] = line[16:19].strip()
BDentry["orbital index"] = line[20:22].strip()
BDentry["atom 1 symbol"] = line[23:25].strip()
BDentry["atom 1 number"] = line[25:28].strip()
BDentry["atom 2 symbol"] = line[29:31].strip()
BDentry["atom 2 number"] = line[31:34].strip()
# Move one line down
i += 1
line = lines[i]
BDentry["atom 1 polarization"] = line[16:22].strip()
BDentry["atom 1 pol coeff"] = line[24:33].strip()
# Populate the orbital percentages
for orbital in orbitals:
if orbital in line:
BDentry[f"atom 1 {orbital}"] = get_percentage(line, orbital)
# Move one line down
i += 1
line = lines[i]
# Populate the orbital percentages
for orbital in orbitals:
if orbital in line:
BDentry[f"atom 1 {orbital}"] = get_percentage(line, orbital)
# Move down until you see an orbital
while "s" not in line:
i += 1
line = lines[i]
BDentry["atom 2 polarization"] = line[16:22].strip()
BDentry["atom 2 pol coeff"] = line[24:33].strip()
# Populate the orbital percentages
for orbital in orbitals:
if orbital in line:
BDentry[f"atom 2 {orbital}"] = get_percentage(line, orbital)
# Move one line down
i += 1
line = lines[i]
# Populate the orbital percentages
for orbital in orbitals:
if orbital in line:
BDentry[f"atom 2 {orbital}"] = get_percentage(line, orbital)
# Save the entry
bd_data.append(BDentry)
# Store values in a dataframe
lp_and_bd_dfs.append(pd.DataFrame(data=lp_data))
lp_and_bd_dfs.append(pd.DataFrame(data=bd_data))
return lp_and_bd_dfs
def parse_perturbation_energy(lines: List[str]) -> List[pd.DataFrame]:
"""
Parse the perturbation energy section of NBO output.
Args:
lines: QChem output lines.
Returns:
Data frame of formatted output.
Raises:
RuntimeError
"""
no_failures = True
e2_dfs = []
while no_failures:
# 2nd order perturbation theory analysis
try:
lines = jump_to_header(
lines,
"SECOND ORDER PERTURBATION THEORY ANALYSIS OF FOCK MATRIX IN NBO BASIS",
)
except RuntimeError:
no_failures = False
if no_failures:
# Jump to values
i = -1
while True:
i += 1
line = lines[i]
if "within" in line:
lines = lines[i:]
break
# Extract 2nd order data
e2_data = []
for line in lines:
# Termination condition
if "NATURAL BOND ORBITALS" in line:
break
# Skip conditions
if line.strip() == "":
continue
if "unit" in line:
continue
if "None" in line:
continue
if "RY" in line:
continue
# Extract the values
entry = {} # type: Dict[str, Union[str, int, float]]
entry["donor bond index"] = int(line[0:4].strip())
entry["donor type"] = str(line[5:9].strip())
entry["donor orbital index"] = int(line[10:12].strip())
entry["donor atom 1 symbol"] = str(line[13:15].strip())
entry["donor atom 1 number"] = int(line[15:17].strip())
entry["donor atom 2 symbol"] = str(line[18:20].strip())
entry["donor atom 2 number"] = z_int(line[20:22].strip())
entry["acceptor bond index"] = int(line[25:31].strip())
entry["acceptor type"] = str(line[32:36].strip())
entry["acceptor orbital index"] = int(line[37:39].strip())
entry["acceptor atom 1 symbol"] = str(line[40:42].strip())
entry["acceptor atom 1 number"] = int(line[42:44].strip())
entry["acceptor atom 2 symbol"] = str(line[45:47].strip())
entry["acceptor atom 2 number"] = z_int(line[47:49].strip())
entry["perturbation energy"] = float(line[50:62].strip())
entry["energy difference"] = float(line[62:70].strip())
entry["fock matrix element"] = float(line[70:79].strip())
e2_data.append(entry)
# Store values in a dataframe
e2_dfs.append(pd.DataFrame(data=e2_data))
return e2_dfs
def nbo_parser(filename: str) -> Dict[str, List[pd.DataFrame]]:
"""
Parse all the important sections of NBO output.
Args:
filename: Path to QChem NBO output.
Returns:
Data frames of formatted output.
Raises:
RuntimeError
"""
# Open the lines
with zopen(filename, mode="rt", encoding="ISO-8859-1") as f:
lines = f.readlines()
# Compile the dataframes
dfs = {}
dfs["natural_populations"] = parse_natural_populations(lines)
dfs["hybridization_character"] = parse_hybridization_character(lines)
dfs["perturbation_energy"] = parse_perturbation_energy(lines)
return dfs
|
gmatteo/pymatgen
|
pymatgen/io/qchem/outputs.py
|
Python
|
mit
| 76,797
|
[
"Q-Chem",
"pymatgen"
] |
edd1546a73e380dadbd04d6bb2423fa99f9e5843cf2b4bd684dd88f4910ae360
|
import UWG
import os
# Gets path of current directory
CURR_DIRECTORY = os.path.abspath(os.path.dirname(__file__))
# To run UWG provide the following inputs
epw_directory = os.path.join(CURR_DIRECTORY,"epw") # EPW file directory
epw_filename = "SGP_Singapore.486980_IWEC.epw" # EPW file name
uwg_param_directory = CURR_DIRECTORY # .uwg file directory
uwg_param_filename = "initialize.uwg" # .uwg file name
# Initialize the UWG object
uwg = UWG.UWG(epw_directory, epw_filename, uwg_param_directory, uwg_param_filename)
# Run the simulation
uwg.run()
|
saeranv/UWG_Python
|
resources/quickstart.py
|
Python
|
gpl-3.0
| 587
|
[
"EPW"
] |
8d9bde47d61f27ac73efd87feed76656b2d90211a89789b602bf07929ab32970
|
import OOMP
OOMP.oompAddDetail(category="type",code="ANTE",name="Antenna")
OOMP.oompAddDetail(category="type",code="BAHO",name="Battery Holder")
OOMP.oompAddDetail(category="type",code="BAPI",name="Badge Pin")
OOMP.oompAddDetail(category="type",code="BREB",name="Breadboard")
OOMP.oompAddDetail(category="type",code="BUTA",name="Pushbutton (Tactile)")
OOMP.oompAddDetail(category="type",code="BUTP",name="Pushbutton")
OOMP.oompAddDetail(category="type",code="BUZZ",name="Buzzer")
OOMP.oompAddDetail(category="type",code="CABL",name="Cable")
OOMP.oompAddDetail(category="type",code="CAPC",name="Capacitor (Ceramic)")
OOMP.oompAddDetail(category="type",code="CAPE",name="Capacitor (Electrolytic)")
OOMP.oompAddDetail(category="type",code="CAPT",name="Capacitor (Tantalum)")
OOMP.oompAddDetail(category="type",code="CERE",name="Ceramic Resonator")
OOMP.oompAddDetail(category="type",code="DBCO",name="Connector")
OOMP.oompAddDetail(category="type",code="CRHO",name="Crimp Housing")
OOMP.oompAddDetail(category="type",code="DCJP",name="DC Jack")
OOMP.oompAddDetail(category="type",code="DIOD",name="Diode")
OOMP.oompAddDetail(category="type",code="DIOS",name="Diode (Schottky)")
OOMP.oompAddDetail(category="type",code="DISP",name="Display")
OOMP.oompAddDetail(category="type",code="FERB",name="Ferrite Bead")
OOMP.oompAddDetail(category="type",code="HEAD",name="Header")
OOMP.oompAddDetail(category="type",code="HEAF",name="Female Header")
OOMP.oompAddDetail(category="type",code="HEAL",name="Header (Long)")
OOMP.oompAddDetail(category="type",code="HEDS",name="Headphone Socket")
OOMP.oompAddDetail(category="type",code="HELF",name="Female Header (Long)")
OOMP.oompAddDetail(category="type",code="HESH",name="Heat Shrink")
OOMP.oompAddDetail(category="type",code="ICIC",name="")
OOMP.oompAddDetail(category="type",code="MCUU",name="MCU")
OOMP.oompAddDetail(category="type",code="ICSO",name="IC Socket")
OOMP.oompAddDetail(category="type",code="INDU",name="Inductor")
OOMP.oompAddDetail(category="type",code="JSTS",name="JST Socket")
OOMP.oompAddDetail(category="type",code="JUMP",name="Jumper")
OOMP.oompAddDetail(category="type",code="LEDS",name="LED")
OOMP.oompAddDetail(category="type",code="MICR",name="Microphone")
OOMP.oompAddDetail(category="type",code="MOSN",name="N-Ch. MOSFET")
OOMP.oompAddDetail(category="type",code="MOSP",name="P-Ch. MOSFET")
OOMP.oompAddDetail(category="type",code="MOTO",name="Motor")
OOMP.oompAddDetail(category="type",code="NHFF",name="Nylon Standoff (F-F)")
OOMP.oompAddDetail(category="type",code="NNUT",name="Nylon Nut")
OOMP.oompAddDetail(category="type",code="NSCR",name="Nylon Screw")
OOMP.oompAddDetail(category="type",code="NUTT",name="Nut")
OOMP.oompAddDetail(category="type",code="OPAM",name="Op Amp")
OOMP.oompAddDetail(category="type",code="PHTR",name="Phototransistor")
OOMP.oompAddDetail(category="type",code="POTE",name="Potentiometer")
OOMP.oompAddDetail(category="type",code="POAD",name="Power Adapter")
OOMP.oompAddDetail(category="type",code="PRIV",name="Plastic Rivet")
OOMP.oompAddDetail(category="type",code="RBCC",name="Crimped Ribbon Cable")
OOMP.oompAddDetail(category="type",code="REFU",name="Resetable Fuse")
OOMP.oompAddDetail(category="type",code="RELA",name="Relay")
OOMP.oompAddDetail(category="type",code="RESA",name="Resistor Array")
OOMP.oompAddDetail(category="type",code="RESE",name="Resistor")
OOMP.oompAddDetail(category="type",code="RJ45",name="RJ45")
OOMP.oompAddDetail(category="type",code="SCRE",name="Machine Screw")
OOMP.oompAddDetail(category="type",code="SDCS",name="SD Card Socket")
OOMP.oompAddDetail(category="type",code="SENS",name="Sensor")
OOMP.oompAddDetail(category="type",code="SWIS",name="Switch (Slide)")
OOMP.oompAddDetail(category="type",code="TERS",name="Screw Terminal")
OOMP.oompAddDetail(category="type",code="THER",name="Thermistor")
OOMP.oompAddDetail(category="type",code="TILS",name="Tilt Switch")
OOMP.oompAddDetail(category="type",code="TRNN",name="NPN Transistor")
OOMP.oompAddDetail(category="type",code="TRNP",name="PNP Transistor")
OOMP.oompAddDetail(category="type",code="USBS",name="USB Socket")
OOMP.oompAddDetail(category="type",code="UFLS",name="UFL Socket")
OOMP.oompAddDetail(category="type",code="VARI",name="Varistor")
OOMP.oompAddDetail(category="type",code="VREG",name="Voltage Regulator")
OOMP.oompAddDetail(category="type",code="WIRS",name="Stranded Wire")
OOMP.oompAddDetail(category="type",code="XTAL",name="Crystal")
|
oomlout/oomlout-OOMP
|
OOMPdetailsType.py
|
Python
|
cc0-1.0
| 4,439
|
[
"CRYSTAL"
] |
4e541550caaaab230496af08463308f53723881e65dec3a35788fa0a9bb5e217
|
import time
from org.gumtree.gumnix.sics.control.events import DynamicControllerListenerAdapter
from org.gumtree.gumnix.sics.control import IStateMonitorListener
from org.gumtree.gumnix.sics.io import SicsProxyListenerAdapter
from org.eclipse.swt.events import DisposeListener
from org.eclipse.swt.widgets import TypedListener
#from org.gumtree.util.messaging import EventHandler
import sys, os
sys.path.append(str(os.path.dirname(get_project_path('Internal'))))
from Internal import sicsext, HISTORY_KEY_WORDS
from Internal.sicsext import *
from au.gov.ansto.bragg.nbi.ui.scripting import ConsoleEventHandler
from org.eclipse.swt.widgets import Display
from java.lang import Runnable
from java.lang import System
from java.io import File
from time import strftime, localtime
import traceback
sics.ready = False
__script__.title = 'Initialised'
__script__.version = ''
__data_folder__ = 'W:/data/current'
#__data_folder__ = 'Z:/testing/pelican'
__export_folder__ = 'W:/data/current/reports'
__buffer_log_file__ = __export_folder__
Dataset.__dicpath__ = get_absolute_path('/Internal/path_table')
System.setProperty('sics.data.path', __data_folder__)
try:
__dispose_all__(None)
except:
pass
fi = File(__buffer_log_file__)
if not fi.exists():
if not fi.mkdirs():
print 'Error: failed to make directory: ' + __buffer_log_file__
__history_log_file__ = __buffer_log_file__ + '/History.txt'
__buffer_log_file__ += '/LogFile_new.txt'
__buffer_logger__ = open(__buffer_log_file__, 'a')
__history_logger__ = open(__history_log_file__, 'a')
print 'Waiting for SICS connection'
while sics.getSicsController() == None:
time.sleep(1)
time.sleep(3)
__scan_status_node__ = sics.getSicsController().findComponentController('/commands/scan/runscan/feedback/status')
__scan_variable_node__ = sics.getSicsController().findComponentController('/commands/scan/runscan/scan_variable')
__save_count_node__ = sics.getSicsController().findComponentController('/experiment/save_count')
__file_name_node__ = sics.getSicsController().findComponentController('/experiment/file_name')
__file_status_node__ = sics.getSicsController().findComponentController('/experiment/file_status')
#saveCount = int(saveCountNode.getValue().getIntData())
__cur_status__ = str(__scan_status_node__.getValue().getStringData())
__file_name__ = str(__file_name_node__.getValue().getStringData())
class __Display_Runnable__(Runnable):
def __init__(self):
pass
def run(self):
global __UI__
global __dispose_listener__
__UI__.addDisposeListener(__dispose_listener__)
__file_to_add__ = None
__newfile_enabled__ = True
def add_dataset():
global __newfile_enabled__
if not __newfile_enabled__ :
return
if __file_to_add__ is None:
return
global __DATASOURCE__
try:
__DATASOURCE__.addDataset(__file_to_add__, True)
except:
print 'error in adding dataset: ' + __file_to_add__
class __SaveCountListener__(DynamicControllerListenerAdapter):
def __init__(self):
self.saveCount = __save_count_node__.getValue().getIntData()
pass
def valueChanged(self, controller, newValue):
global __file_to_add__
newCount = int(newValue.getStringData());
if newCount != self.saveCount:
self.saveCount = newCount;
try:
axis_name.value = __scan_variable_node__.getValue().getStringData()
except:
pass
try:
checkFile = File(__file_name_node__.getValue().getStringData());
checkFile = File(__data_folder__ + "/" + checkFile.getName());
__file_to_add__ = checkFile.getAbsolutePath();
if not checkFile.exists():
print "The target file :" + __file_to_add__ + " can not be found";
return
runnable = __Display_Runnable__()
runnable.run = add_dataset
Display.getDefault().asyncExec(runnable)
except:
print 'failed to add dataset ' + __file_to_add__
__saveCountListener__ = __SaveCountListener__()
__save_count_node__.addComponentListener(__saveCountListener__)
def update_buffer_log_folder():
global __buffer_log_file__, __export_folder__, __buffer_logger__, __history_log_file__, __history_logger__
__buffer_log_file__ = __export_folder__
fi = File(__buffer_log_file__)
if not fi.exists():
if not fi.mkdirs():
print 'Error: failed to make directory: ' + __buffer_log_file__
__history_log_file__ = __buffer_log_file__ + '/History.txt'
__buffer_log_file__ += '/LogFile.txt'
if __buffer_logger__:
__buffer_logger__.close()
__buffer_logger__ = open(__buffer_log_file__, 'a')
if __history_logger__:
__history_logger__.close()
__history_logger__ = open(__history_log_file__, 'a')
def __run_script__(dss):
pass
class __State_Monitor__(IStateMonitorListener):
def __init__(self):
pass
def stateChanged(state, infoMessage):
print state
print infoMessage
pass
def __dispose__():
pass
# __scan_status_node__.removeComponentListener(__statusListener__)
# __m2_node__.removeComponentListener(__m2_listener__)
# __s1_node__.removeComponentListener(__s1_listener__)
# __s2_node__.removeComponentListener(__s2_listener__)
# __a2_node__.removeComponentListener(__a2_listener__)
def __load_experiment_data__():
basename = sicsext.getBaseFilename()
fullname = str(System.getProperty('sics.data.path') + '/' + basename)
df.datasets.clear()
ds = df[fullname]
data = ds[str(data_name.value)]
axis = ds[str(axis_name.value)]
if data.size > axis.size:
data = data[:axis.size]
ds2 = Dataset(data, axes=[axis])
ds2.title = ds.id
ds2.location = fullname
Plot1.set_dataset(ds2)
Plot1.x_label = axis_name.value
Plot1.y_label = str(data_name.value)
Plot1.title = str(data_name.value) + ' vs ' + axis_name.value
Plot1.pv.getPlot().setMarkerEnabled(True)
# This function is called when pushing the Run button in the control UI.
def __std_run_script__(fns):
# Use the provided resources, please don't remove.
global Plot1
global Plot2
global Plot3
# check if a list of file names has been given
if (fns is None or len(fns) == 0) :
print 'no input datasets'
else :
for fn in fns:
# load dataset with each file name
ds = Plot1.ds
if ds != None and len(ds) > 0:
if ds[0].location == fn:
return
df.datasets.clear()
ds = df[fn]
dname = str(data_name.value)
bm2 = ds[dname]
qm = ds[str(axis_name.value)]
ds2 = Dataset(bm2, axes=[qm])
ds2.title = ds.id
ds2.location = fn
Plot1.set_dataset(ds2)
Plot1.x_label = axis_name.value
Plot1.y_label = dname
Plot1.title = dname + ' vs ' + axis_name.value
Plot1.pv.getPlot().setMarkerEnabled(True)
peak_pos.value = float('NaN')
fit_curve()
def __dataset_added__(fns = None):
pass
def __std_fit_curve__():
global Plot1
ds = Plot1.ds
if ds is None or len(ds) == 0:
slog('Error: no curve to fit in Plot1.')
return
for d in ds:
if d.title == 'fitting':
Plot1.remove_dataset(d)
d0 = ds[0]
try:
fitting = Fitting(GAUSSIAN_FITTING)
fitting.set_histogram(d0)
res = fitting.fit()
res.var[:] = 0
res.title = 'fitting'
Plot1.add_dataset(res)
slog(str(fitting.params))
mean = fitting.mean
slog('POS_OF_PEAK=' + str(mean))
slog('FWHM=' + str(2.35482 * math.fabs(fitting.params['sigma'])))
peak_pos.value = mean
except:
slog('failed to fit with Gaussian curve.')
return
def previous_step():
load_script(previous_file)
def next_step():
load_script(next_file)
def logBook(text):
global __buffer_logger__
global __history_logger__
try:
tsmp = strftime("[%Y-%m-%d %H:%M:%S]", localtime())
__buffer_logger__.write(tsmp + ' ' + text + '\n')
__buffer_logger__.flush()
for item in HISTORY_KEY_WORDS:
if text.startswith(item):
__history_logger__.write(tsmp + ' ' + text + '\n')
__history_logger__.flush()
except:
traceback.print_exc(file=sys.stdout)
print 'failed to log'
def slog(text):
logln(text + '\n')
logBook(text)
class BatchStatusListener(SicsProxyListenerAdapter):
def __init__(self):
pass
def proxyConnected(self):
pass
def proxyConnectionReqested(self):
pass
def proxyDisconnected(self):
pass
def messageReceived(self, message, channelId):
if str(channelId) == 'rawBatch':
logBook(message)
def messageSent(self, message, channelId):
pass
try:
sics.SicsCore.getSicsManager().proxy().removeProxyListener(__batch_status_listener__)
except:
pass
__batch_status_listener__ = BatchStatusListener()
sics.SicsCore.getSicsManager().proxy().addProxyListener(__batch_status_listener__)
class SICSConsoleEventHandler(ConsoleEventHandler):
def __init__(self, topic):
ConsoleEventHandler.__init__(self, topic)
def handleEvent(self, event):
data = str(event.getProperty('sentMessage'))
logBook(data)
__sics_console_event_handler_sent__ = SICSConsoleEventHandler('org/gumtree/ui/terminal/telnet/sent')
__sics_console_event_handler_received__ = SICSConsoleEventHandler('org/gumtree/ui/terminal/telnet/received')
__sics_console_event_handler_sent__.activate()
__sics_console_event_handler_received__.activate()
class __Dispose_Listener__(DisposeListener):
def __init__(self):
pass
def widgetDisposed(self, event):
pass
def __dispose_all__(event):
global __batch_status_listener__
global __sics_console_event_handler_sent__
global __sics_console_event_handler_received__
global __statusListener__
global __save_count_node__
global __saveCountListener__
sics.SicsCore.getSicsManager().proxy().removeProxyListener(__batch_status_listener__)
__sics_console_event_handler_sent__.deactivate()
__sics_console_event_handler_received__.deactivate()
__save_count_node__.removeComponentListener(__saveCountListener__)
if __buffer_logger__:
__buffer_logger__.close()
if __history_logger__:
__history_logger__.close()
__dispose_listener__ = __Dispose_Listener__()
__dispose_listener__.widgetDisposed = __dispose_all__
__display_run__ = __Display_Runnable__()
Display.getDefault().asyncExec(__display_run__)
sics.ready = True
#load_script('KKB-Plot and Reduction.py')
|
Gumtree/Kookaburra_scripts
|
Internal/Initialise.py
|
Python
|
epl-1.0
| 11,361
|
[
"Gaussian"
] |
82661620e8b6300ccd6cd0fb7e55f6ee2d59dfe79bfb449a804cc41ed324e147
|
"""Scraper for the Supreme Court of Vermont
CourtID: vt
Court Short Name: VT
Author: Brian W. Carver
Date created: 18 Aug 2013
Reviewer: Mike Lissner
"""
import re
from datetime import datetime
from lxml import html
from juriscraper.OpinionSite import OpinionSite
class Site(OpinionSite):
def __init__(self):
super(Site, self).__init__()
self.court_id = self.__module__
self.url = 'http://info.libraries.vermont.gov/supct/op.html'
def _get_download_urls(self):
path = '//h4/a/@href'
return list(self.html.xpath(path))
def _get_case_names(self):
case_names = []
path = "//h4/a"
for e in self.html.xpath(path):
s = html.tostring(e, method='text', encoding='unicode')
expression = '(^[^\(]*)' # Start of line and all characters until '('
case_name = re.search(expression, s, re.MULTILINE).group(1)
case_names.append(case_name)
return case_names
def _get_case_dates(self):
case_dates = []
path = '//h4/a'
for e in self.html.xpath(path):
s = html.tostring(e, method='text', encoding='unicode')
expression = '(\d+-\w{3}-\d{4})'
date_string = re.search(expression, s, re.MULTILINE).group(1)
case_dates.append(datetime.strptime(date_string, '%d-%b-%Y').date())
return case_dates
def _get_precedential_statuses(self):
return ['Published'] * len(self.case_names)
def _get_docket_numbers(self):
docket_numbers = []
path = '//h4/a'
for e in self.html.xpath(path):
s = ' '.join(html.tostring(e, method='text', encoding='unicode').split())
regexes = ['(\d{4}-\d{2,3} \& \d{4}-\d{2,3})', '(\d{4}-\d{2,3})']
for regex in regexes:
try:
docket_number = re.search(regex, s).group(1)
break
except AttributeError:
# Happens when a regex doesn't match.
continue
docket_numbers.append(docket_number)
return docket_numbers
|
brianwc/juriscraper
|
opinions/united_states/state/vt.py
|
Python
|
bsd-2-clause
| 2,133
|
[
"Brian"
] |
d259c2799bb3789536b4afac01f5e551fd5d3f4ecd6c1ad5f9a49fb4b23600ed
|
"""Retrieve run information describing files to process in a pipeline.
This handles two methods of getting processing information: from a Galaxy
next gen LIMS system or an on-file YAML configuration.
"""
import collections
from contextlib import closing
import copy
import glob
import itertools
import os
import string
import toolz as tz
import yaml
from bcbio import install, utils
from bcbio.bam import ref
from bcbio.log import logger
from bcbio.distributed import objectstore
from bcbio.illumina import flowcell
from bcbio.pipeline import alignment, config_utils, genome
from bcbio.pipeline import datadict as dd
from bcbio.provenance import diagnostics, programs, versioncheck
from bcbio.provenance import data as provenancedata
from bcbio.variation import effects, genotype, population, joint, vcfutils
from bcbio.variation.cortex import get_sample_name
from bcbio.bam.fastq import open_fastq
ALLOWED_CONTIG_NAME_CHARS = set(list(string.digits) + list(string.ascii_letters) + ["-", "_", "*", ":", "."])
ALGORITHM_NOPATH_KEYS = ["variantcaller", "realign", "recalibrate",
"phasing", "svcaller", "hetcaller", "jointcaller", "tools_off", "mixup_check"]
def organize(dirs, config, run_info_yaml, sample_names=None, add_provenance=True):
"""Organize run information from a passed YAML file or the Galaxy API.
Creates the high level structure used for subsequent processing.
sample_names is a list of samples to include from the overall file, for cases
where we are running multiple pipelines from the same configuration file.
"""
logger.info("Using input YAML configuration: %s" % run_info_yaml)
assert run_info_yaml and os.path.exists(run_info_yaml), \
"Did not find input sample YAML file: %s" % run_info_yaml
run_details = _run_info_from_yaml(dirs, run_info_yaml, config, sample_names)
out = []
for item in run_details:
item["dirs"] = dirs
if "name" not in item:
item["name"] = ["", item["description"]]
elif isinstance(item["name"], basestring):
description = "%s-%s" % (item["name"], clean_name(item["description"]))
item["name"] = [item["name"], description]
item["description"] = description
# add algorithm details to configuration, avoid double specification
item["resources"] = _add_remote_resources(item["resources"])
item["config"] = config_utils.update_w_custom(config, item)
item.pop("algorithm", None)
item = add_reference_resources(item)
# Create temporary directories and make absolute, expanding environmental variables
tmp_dir = tz.get_in(["config", "resources", "tmp", "dir"], item)
if tmp_dir:
# if no environmental variables, make and normalize the directory
# otherwise we normalize later in distributed.transaction:
if os.path.expandvars(tmp_dir) == tmp_dir:
tmp_dir = utils.safe_makedir(os.path.expandvars(tmp_dir))
tmp_dir = genome.abs_file_paths(tmp_dir)
item["config"]["resources"]["tmp"]["dir"] = tmp_dir
out.append(item)
out = _add_provenance(out, dirs, config, add_provenance)
return out
def normalize_world(data):
"""Normalize a data object, useful after serializetion via CWL.
"""
data = _normalize_files(data)
return data
def _add_provenance(items, dirs, config, add_provenance=True):
if add_provenance:
p = programs.write_versions(dirs, config=config)
d = provenancedata.write_versions(dirs, items)
versioncheck.testall(items)
p_db = diagnostics.initialize(dirs)
out = []
for item in items:
if add_provenance:
entity_id = diagnostics.store_entity(item)
item["config"]["resources"]["program_versions"] = p
item["provenance"] = {"programs": p, "entity": entity_id,
"db": p_db, "data": d}
out.append([item])
return out
def setup_directories(work_dir, fc_dir, config, config_file):
fastq_dir, galaxy_dir, config_dir = _get_full_paths(flowcell.get_fastq_dir(fc_dir)
if fc_dir else None,
config, config_file)
# check default install for tool data if not found locally
if not os.path.exists(os.path.join(galaxy_dir, "tool-data")):
_, config_file = config_utils.load_system_config(work_dir=work_dir)
if os.path.exists(os.path.join(os.path.dirname(config_file), "tool-data")):
galaxy_dir = os.path.dirname(config_file)
return {"fastq": fastq_dir, "galaxy": galaxy_dir,
"work": work_dir, "flowcell": fc_dir, "config": config_dir}
def _get_full_paths(fastq_dir, config, config_file):
"""Retrieve full paths for directories in the case of relative locations.
"""
if fastq_dir:
fastq_dir = utils.add_full_path(fastq_dir)
config_dir = utils.add_full_path(os.path.dirname(config_file))
galaxy_config_file = utils.add_full_path(config.get("galaxy_config", "universe_wsgi.ini"),
config_dir)
return fastq_dir, os.path.dirname(galaxy_config_file), config_dir
# ## Remote resources
def _add_remote_resources(resources):
"""Retrieve remote resources like GATK/MuTect jars present in S3.
"""
out = copy.deepcopy(resources)
for prog, info in resources.iteritems():
for key, val in info.iteritems():
if key == "jar" and objectstore.is_remote(val):
store_dir = utils.safe_makedir(os.path.join(os.getcwd(), "inputs", "jars", prog))
fname = objectstore.download(val, store_dir, store_dir)
version_file = os.path.join(store_dir, "version.txt")
if not utils.file_exists(version_file):
version = install.get_gatk_jar_version(prog, fname)
with open(version_file, "w") as out_handle:
out_handle.write(version)
else:
with open(version_file) as in_handle:
version = in_handle.read().strip()
del out[prog][key]
out[prog]["dir"] = store_dir
out[prog]["version"] = version
return out
# ## Genome reference information
def add_reference_resources(data):
"""Add genome reference information to the item to process.
"""
aligner = data["config"]["algorithm"].get("aligner", None)
data["reference"] = genome.get_refs(data["genome_build"], aligner, data["dirs"]["galaxy"], data)
_check_ref_files(data["reference"], data)
# back compatible `sam_ref` target
data["sam_ref"] = utils.get_in(data, ("reference", "fasta", "base"))
ref_loc = utils.get_in(data, ("config", "resources", "species", "dir"),
utils.get_in(data, ("reference", "fasta", "base")))
data["genome_resources"] = genome.get_resources(data["genome_build"], ref_loc, data)
if effects.get_type(data) == "snpeff":
data["reference"]["snpeff"] = effects.get_snpeff_files(data)
data = _fill_validation_targets(data)
data = _fill_prioritization_targets(data)
# Re-enable when we have ability to re-define gemini configuration directory
if False:
if population.do_db_build([data], need_bam=False):
data["reference"]["gemini"] = population.get_gemini_files(data)
return data
def _check_ref_files(ref_info, data):
problems = []
for contig in ref.file_contigs(ref_info["fasta"]["base"], data["config"]):
cur_problems = set([])
for char in list(contig.name):
if char not in ALLOWED_CONTIG_NAME_CHARS:
cur_problems.add(char)
if len(cur_problems) > 0:
problems.append("Found non-allowed characters in chromosome name %s: %s" %
(contig.name, " ".join(list(cur_problems))))
if len(problems) > 0:
msg = ("\nProblems with input reference file %s\n" % ref_info["fasta"]["base"])
raise ValueError(msg + "\n".join(problems) + "\n")
def _fill_validation_targets(data):
"""Fill validation targets pointing to globally installed truth sets.
"""
ref_file = dd.get_ref_file(data)
sv_targets = zip(itertools.repeat("svvalidate"),
tz.get_in(["config", "algorithm", "svvalidate"], data, {}).keys())
for vtarget in [list(xs) for xs in [["validate"], ["validate_regions"]] + sv_targets]:
val = tz.get_in(["config", "algorithm"] + vtarget, data)
if val and not os.path.exists(val):
installed_val = os.path.normpath(os.path.join(os.path.dirname(ref_file), os.pardir, "validation", val))
if os.path.exists(installed_val):
data = tz.update_in(data, ["config", "algorithm"] + vtarget, lambda x: installed_val)
else:
raise ValueError("Configuration problem. Validation file not found for %s: %s" %
(vtarget, val))
return data
def _fill_prioritization_targets(data):
"""Fill in globally installed files for prioritization.
"""
ref_file = dd.get_ref_file(data)
for target in [["svprioritize"]]:
val = tz.get_in(["config", "algorithm"] + target, data)
if val and not os.path.exists(val):
installed_vals = glob.glob(os.path.normpath(os.path.join(os.path.dirname(ref_file), os.pardir,
"coverage", "prioritize", val + "*.bed.gz")))
if len(installed_vals) == 0:
raise ValueError("Configuration problem. Prioritization file not found for %s: %s" %
(target, val))
elif len(installed_vals) == 1:
installed_val = installed_vals[0]
else:
# check for partial matches
installed_val = None
for v in installed_vals:
if v.endswith(val + ".bed.gz"):
installed_val = v
break
# handle date-stamped inputs
if not installed_val:
installed_val = sorted(installed_vals, reverse=True)[0]
data = tz.update_in(data, ["config", "algorithm"] + target, lambda x: installed_val)
return data
# ## Sample and BAM read group naming
def _clean_metadata(data):
batches = tz.get_in(("metadata", "batch"), data)
# Ensure batches are strings
if batches:
if isinstance(batches, (list, tuple)):
batches = [str(x) for x in batches]
else:
batches = str(batches)
data["metadata"]["batch"] = batches
# If we have jointcalling, add a single batch if not present
elif tz.get_in(["algorithm", "jointcaller"], data):
if "metadata" not in data:
data["metadata"] = {}
data["metadata"]["batch"] = "%s-joint" % dd.get_sample_name(data)
return data
def _clean_algorithm(data):
"""Clean algorithm keys, handling items that can be specified as lists or single items.
"""
# convert single items to lists
for key in ["variantcaller", "jointcaller"]:
val = tz.get_in(["algorithm", key], data)
if val:
if not isinstance(val, (list, tuple)) and isinstance(val, basestring):
val = [val]
data["algorithm"][key] = val
return data
def _clean_characters(x):
"""Clean problem characters in sample lane or descriptions.
"""
for problem in [" ", "."]:
x = x.replace(problem, "_")
return x
def prep_rg_names(item, config, fc_name, fc_date):
"""Generate read group names from item inputs.
"""
if fc_name and fc_date:
lane_name = "%s_%s_%s" % (item["lane"], fc_date, fc_name)
else:
lane_name = item["description"]
return {"rg": item["description"],
"sample": item["description"],
"lane": lane_name,
"pl": (tz.get_in(["algorithm", "platform"], item)
or tz.get_in(["algorithm", "platform"], item, "illumina")).lower(),
"lb": tz.get_in(["metadata", "library"], item),
"pu": tz.get_in(["metadata", "platform_unit"], item) or lane_name}
# ## Configuration file validation
def _check_for_duplicates(xs, attr, check_fn=None):
"""Identify and raise errors on duplicate items.
"""
dups = []
for key, vals in itertools.groupby(x[attr] for x in xs):
if len(list(vals)) > 1:
dups.append(key)
if len(dups) > 0:
psamples = []
for x in xs:
if x[attr] in dups:
psamples.append(x)
# option to skip problem based on custom input function.
if check_fn and check_fn(psamples):
return
descrs = [x["description"] for x in psamples]
raise ValueError("Duplicate '%s' found in input sample configuration.\n"
"Required to be unique for a project: %s\n"
"Problem found in these samples: %s" % (attr, dups, descrs))
def _check_for_batch_clashes(xs):
"""Check that batch names do not overlap with sample names.
"""
names = set([x["description"] for x in xs])
dups = set([])
for x in xs:
batches = tz.get_in(("metadata", "batch"), x)
if batches:
if not isinstance(batches, (list, tuple)):
batches = [batches]
for batch in batches:
if batch in names:
dups.add(batch)
if len(dups) > 0:
raise ValueError("Batch names must be unique from sample descriptions.\n"
"Clashing batch names: %s" % sorted(list(dups)))
def _check_for_problem_somatic_batches(items, config):
"""Identify problem batch setups for somatic calling.
We do not support multiple tumors in a single batch and VarDict(Java) does not
handle pooled calling, only tumor/normal.
"""
to_check = []
for data in items:
data = copy.deepcopy(data)
data["config"] = config_utils.update_w_custom(config, data)
to_check.append(data)
data_by_batches = collections.defaultdict(list)
for data in to_check:
batches = dd.get_batches(data)
if batches:
for batch in batches:
data_by_batches[batch].append(data)
for batch, items in data_by_batches.items():
if vcfutils.get_paired(items):
vcfutils.check_paired_problems(items)
elif len(items) > 1:
vcs = list(set(tz.concat([dd.get_variantcaller(data) or [] for data in items])))
if any(x.lower().startswith("vardict") for x in vcs):
raise ValueError("VarDict does not support pooled non-tumor/normal calling, in batch %s: %s"
% (batch, [dd.get_sample_name(data) for data in items]))
elif any(x.lower() == "mutect" for x in vcs):
raise ValueError("Mutect requires a 'phenotype: tumor' sample for calling, in batch %s: %s"
% (batch, [dd.get_sample_name(data) for data in items]))
def _check_for_misplaced(xs, subkey, other_keys):
"""Ensure configuration keys are not incorrectly nested under other keys.
"""
problems = []
for x in xs:
check_dict = x.get(subkey, {})
for to_check in other_keys:
if to_check in check_dict:
problems.append((x["description"], to_check, subkey))
if len(problems) > 0:
raise ValueError("\n".join(["Incorrectly nested keys found in sample YAML. These should be top level:",
" sample | key name | nested under ",
"----------------+-----------------+----------------"] +
["% 15s | % 15s | % 15s" % (a, b, c) for (a, b, c) in problems]))
ALGORITHM_KEYS = set(["platform", "aligner", "bam_clean", "bam_sort",
"trim_reads", "adapters", "custom_trim", "species", "kraken",
"align_split_size", "align_prep_method", "quality_bin", "transcriptome_align",
"quality_format", "write_summary", "merge_bamprep",
"coverage", "coverage_interval", "ploidy", "indelcaller",
"variantcaller", "jointcaller", "variant_regions", "peakcaller",
"effects", "mark_duplicates",
"svcaller", "svvalidate", "svprioritize",
"hlacaller", "hlavalidate",
"sv_regions", "hetcaller", "problem_region_dir",
"recalibrate", "realign", "phasing", "validate",
"validate_regions", "validate_genome_build", "validate_method",
"clinical_reporting", "nomap_split_size",
"nomap_split_targets", "ensemble", "background",
"disambiguate", "strandedness", "fusion_mode",
"min_read_length", "coverage_depth_min", "callable_min_size",
"min_allele_fraction",
"remove_lcr", "joint_group_size",
"archive", "tools_off", "tools_on", "transcript_assembler",
"mixup_check", "expression_caller"] +
# development
["cwl_reporting"] +
# back compatibility
["coverage_depth_max", "coverage_depth"])
ALG_ALLOW_BOOLEANS = set(["merge_bamprep", "mark_duplicates", "remove_lcr",
"clinical_reporting", "transcriptome_align",
"fusion_mode", "assemble_transcripts", "trim_reads",
"recalibrate", "realign", "cwl_reporting"])
ALG_ALLOW_FALSE = set(["aligner", "bam_clean", "bam_sort",
"effects", "phasing", "mixup_check", "indelcaller",
"variantcaller"])
ALG_DOC_URL = "https://bcbio-nextgen.readthedocs.org/en/latest/contents/configuration.html#algorithm-parameters"
def _check_algorithm_keys(item):
"""Check for unexpected keys in the algorithm section.
Needs to be manually updated when introducing new keys, but avoids silent bugs
with typos in key names.
"""
problem_keys = [k for k in item["algorithm"].iterkeys() if k not in ALGORITHM_KEYS]
if len(problem_keys) > 0:
raise ValueError("Unexpected configuration keyword in 'algorithm' section: %s\n"
"See configuration documentation for supported options:\n%s\n"
% (problem_keys, ALG_DOC_URL))
def _check_algorithm_values(item):
"""Check for misplaced inputs in the algorithms.
- Identify incorrect boolean values where a choice is required.
"""
problems = []
for k, v in item.get("algorithm", {}).items():
if v is True and k not in ALG_ALLOW_BOOLEANS:
problems.append("%s set as true" % k)
elif v is False and (k not in ALG_ALLOW_BOOLEANS and k not in ALG_ALLOW_FALSE):
problems.append("%s set as false" % k)
if len(problems) > 0:
raise ValueError("Incorrect settings in 'algorithm' section for %s:\n%s"
"\nSee configuration documentation for supported options:\n%s\n"
% (item["description"], "\n".join(problems), ALG_DOC_URL))
def _check_toplevel_misplaced(item):
"""Check for algorithm keys accidentally placed at the top level.
"""
problem_keys = [k for k in item.keys() if k in ALGORITHM_KEYS]
if len(problem_keys) > 0:
raise ValueError("Unexpected configuration keywords found in top level of %s: %s\n"
"This should be placed in the 'algorithm' section."
% (item["description"], problem_keys))
def _detect_fastq_format(in_file, MAX_RECORDS=1000):
ranges = {"sanger": (33, 126),
"solexa": (59, 126),
"illumina_1.3+": (64, 126),
"illumina_1.5+": (66, 126)}
gmin, gmax = 99, 0
possible = set(ranges.keys())
with closing(open_fastq(in_file)) as in_handle:
four = itertools.islice(in_handle, 3, None, 4)
count = 0
for line in four:
if len(possible) == 1:
return possible
if count > MAX_RECORDS:
break
count += 1
vals = [ord(c) for c in line.rstrip()]
# if there is a short sequence, skip it
if len(vals) < 20:
continue
lmin = min(vals)
lmax = max(vals)
for encoding, (emin, emax) in ranges.items():
if encoding in possible:
if lmin < emin or lmax > emax:
possible.remove(encoding)
return possible
def _check_quality_format(items):
"""
Check if quality_format="standard" and fastq_format is not sanger
"""
SAMPLE_FORMAT = {"illumina_1.3+": "illumina",
"illumina_1.5+": "illumina",
"illumina_1.8+": "standard",
"solexa": "solexa",
"sanger": "standard"}
fastq_extensions = ["fq.gz", "fastq.gz", ".fastq", ".fq"]
for item in items:
specified_format = item["algorithm"].get("quality_format", "standard").lower()
if specified_format not in SAMPLE_FORMAT.values():
raise ValueError("Quality format specified in the YAML file"
"is not supported. Supported values are %s."
% (SAMPLE_FORMAT.values()))
fastq_file = next((file for file in item.get('files') or [] if
any([ext for ext in fastq_extensions if ext in file])), None)
if fastq_file and specified_format and not objectstore.is_remote(fastq_file):
fastq_format = _detect_fastq_format(fastq_file)
detected_encodings = set([SAMPLE_FORMAT[x] for x in fastq_format])
if detected_encodings:
if specified_format not in detected_encodings:
raise ValueError("Quality format specified in the YAML "
"file might be a different encoding. "
"'%s' was specified but possible formats "
"detected were %s." % (specified_format,
", ".join(detected_encodings)))
def _check_aligner(item):
"""Ensure specified aligner is valid choice.
"""
allowed = set(alignment.TOOLS.keys() + [None, False])
if item["algorithm"].get("aligner") not in allowed:
raise ValueError("Unexpected algorithm 'aligner' parameter: %s\n"
"Supported options: %s\n" %
(item["algorithm"].get("aligner"), sorted(list(allowed))))
def _check_variantcaller(item):
"""Ensure specified variantcaller is a valid choice.
"""
allowed = set(genotype.get_variantcallers().keys() + [None, False])
vcs = item["algorithm"].get("variantcaller", "gatk")
if not isinstance(vcs, (tuple, list)):
vcs = [vcs]
problem = [x for x in vcs if x not in allowed]
if len(problem) > 0:
raise ValueError("Unexpected algorithm 'variantcaller' parameter: %s\n"
"Supported options: %s\n" % (problem, sorted(list(allowed))))
def _check_jointcaller(data):
"""Ensure specified jointcaller is valid.
"""
allowed = set(joint.get_callers() + [None, False])
cs = data["algorithm"].get("jointcaller", [])
if not isinstance(cs, (tuple, list)):
cs = [cs]
problem = [x for x in cs if x not in allowed]
if len(problem) > 0:
raise ValueError("Unexpected algorithm 'jointcaller' parameter: %s\n"
"Supported options: %s\n" % (problem, sorted(list(allowed))))
def _check_sample_config(items, in_file, config):
"""Identify common problems in input sample configuration files.
"""
logger.info("Checking sample YAML configuration: %s" % in_file)
_check_quality_format(items)
_check_for_duplicates(items, "lane")
_check_for_duplicates(items, "description")
_check_for_batch_clashes(items)
_check_for_problem_somatic_batches(items, config)
_check_for_misplaced(items, "algorithm",
["resources", "metadata", "analysis",
"description", "genome_build", "lane", "files"])
[_check_toplevel_misplaced(x) for x in items]
[_check_algorithm_keys(x) for x in items]
[_check_algorithm_values(x) for x in items]
[_check_aligner(x) for x in items]
[_check_variantcaller(x) for x in items]
[_check_jointcaller(x) for x in items]
# ## Read bcbio_sample.yaml files
def _file_to_abs(x, dnames, makedir=False):
"""Make a file absolute using the supplied base directory choices.
"""
if x is None or os.path.isabs(x):
return x
elif isinstance(x, basestring) and objectstore.is_remote(x):
return x
elif isinstance(x, basestring) and x.lower() == "none":
return None
else:
for dname in dnames:
if dname:
normx = os.path.normpath(os.path.join(dname, x))
if os.path.exists(normx):
return normx
elif makedir:
utils.safe_makedir(normx)
return normx
raise ValueError("Did not find input file %s in %s" % (x, dnames))
def _normalize_files(item, fc_dir=None):
"""Ensure the files argument is a list of absolute file names.
Handles BAM, single and paired end fastq.
"""
files = item.get("files")
if files:
if isinstance(files, basestring):
files = [files]
fastq_dir = flowcell.get_fastq_dir(fc_dir) if fc_dir else os.getcwd()
files = [_file_to_abs(x, [os.getcwd(), fc_dir, fastq_dir]) for x in files]
files = [x for x in files if x]
_sanity_check_files(item, files)
item["files"] = files
return item
def _sanity_check_files(item, files):
"""Ensure input files correspond with supported
"""
msg = None
file_types = set([("bam" if x.endswith(".bam") else "fastq") for x in files if x])
if len(file_types) > 1:
msg = "Found multiple file types (BAM and fastq)"
file_type = file_types.pop()
if file_type == "bam":
if len(files) != 1:
msg = "Expect a single BAM file input as input"
elif file_type == "fastq":
if len(files) not in [1, 2]:
msg = "Expect either 1 (single end) or 2 (paired end) fastq inputs"
if len(files) == 2 and files[0] == files[1]:
msg = "Expect both fastq files to not be the same"
if msg:
raise ValueError("%s for %s: %s" % (msg, item.get("description", ""), files))
def _run_info_from_yaml(dirs, run_info_yaml, config, sample_names=None):
"""Read run information from a passed YAML file.
"""
with open(run_info_yaml) as in_handle:
loaded = yaml.load(in_handle)
fc_name, fc_date = None, None
if dirs.get("flowcell"):
try:
fc_name, fc_date = flowcell.parse_dirname(dirs.get("flowcell"))
except ValueError:
pass
global_config = {}
global_vars = {}
resources = {}
if isinstance(loaded, dict):
global_config = copy.deepcopy(loaded)
del global_config["details"]
if "fc_name" in loaded and "fc_date" in loaded:
fc_name = loaded["fc_name"].replace(" ", "_")
fc_date = str(loaded["fc_date"]).replace(" ", "_")
global_vars = global_config.pop("globals", {})
resources = global_config.pop("resources", {})
loaded = loaded["details"]
if sample_names:
loaded = [x for x in loaded if x["description"] in sample_names]
run_details = []
for i, item in enumerate(loaded):
item = _normalize_files(item, dirs.get("flowcell"))
if "lane" not in item:
item["lane"] = str(i + 1)
item["lane"] = _clean_characters(str(item["lane"]))
if "description" not in item:
if _item_is_bam(item):
item["description"] = get_sample_name(item["files"][0])
else:
raise ValueError("No `description` sample name provided for input #%s" % (i + 1))
item["description"] = _clean_characters(str(item["description"]))
if "upload" not in item:
upload = global_config.get("upload", {})
# Handle specifying a local directory directly in upload
if isinstance(upload, basestring):
upload = {"dir": upload}
if fc_name and fc_date:
upload["fc_name"] = fc_name
upload["fc_date"] = fc_date
upload["run_id"] = ""
if upload.get("dir"):
upload["dir"] = _file_to_abs(upload["dir"], [dirs.get("work")], makedir=True)
item["upload"] = upload
item["algorithm"] = _replace_global_vars(item["algorithm"], global_vars)
item["algorithm"] = genome.abs_file_paths(item["algorithm"],
ignore_keys=ALGORITHM_NOPATH_KEYS)
item["genome_build"] = str(item.get("genome_build", ""))
item["algorithm"] = _add_algorithm_defaults(item["algorithm"])
item["rgnames"] = prep_rg_names(item, config, fc_name, fc_date)
if item.get("files"):
item["files"] = [genome.abs_file_paths(f) for f in item["files"]]
elif "files" in item:
del item["files"]
if item.get("vrn_file") and isinstance(item["vrn_file"], basestring):
inputs_dir = utils.safe_makedir(os.path.join(dirs.get("work", os.getcwd()), "inputs"))
item["vrn_file"] = vcfutils.bgzip_and_index(genome.abs_file_paths(item["vrn_file"]), config,
remove_orig=False, out_dir=inputs_dir)
item = _clean_metadata(item)
item = _clean_algorithm(item)
# Add any global resource specifications
if "resources" not in item:
item["resources"] = {}
for prog, pkvs in resources.iteritems():
if prog not in item["resources"]:
item["resources"][prog] = {}
for key, val in pkvs.iteritems():
item["resources"][prog][key] = val
run_details.append(item)
_check_sample_config(run_details, run_info_yaml, config)
return run_details
def _item_is_bam(item):
files = item.get("files", [])
return len(files) == 1 and files[0].endswith(".bam")
def _add_algorithm_defaults(algorithm):
"""Central location specifying defaults for algorithm inputs.
Converts allowed multiple inputs into lists if specified as a single item.
"""
defaults = {"archive": [],
"tools_off": [],
"tools_on": [],
"variant_regions": None}
convert_to_list = set(["archive", "tools_off", "tools_on", "hetcaller"])
for k, v in defaults.items():
if k not in algorithm:
algorithm[k] = v
for k, v in algorithm.items():
if k in convert_to_list:
if v and not isinstance(v, (list, tuple)):
algorithm[k] = [v]
elif v is None:
algorithm[k] = []
return algorithm
def _replace_global_vars(xs, global_vars):
"""Replace globally shared names from input header with value.
The value of the `algorithm` item may be a pointer to a real
file specified in the `global` section. If found, replace with
the full value.
"""
if isinstance(xs, (list, tuple)):
return [_replace_global_vars(x) for x in xs]
elif isinstance(xs, dict):
final = {}
for k, v in xs.iteritems():
if isinstance(v, basestring) and v in global_vars:
v = global_vars[v]
final[k] = v
return final
else:
return xs
def clean_name(xs):
final = []
safec = "_"
for x in xs:
if x not in string.ascii_letters + string.digits:
if len(final) > 0 and final[-1] != safec:
final.append(safec)
else:
final.append(x)
if final[-1] == safec:
final = final[:-1]
return "".join(final)
def prep_system(run_info_yaml, bcbio_system=None):
"""Prepare system configuration information from an input configuration file.
This does the work of parsing the system input file and setting up directories
for use in 'organize'.
"""
work_dir = os.getcwd()
config, config_file = config_utils.load_system_config(bcbio_system, work_dir)
dirs = setup_directories(work_dir, os.path.normpath(os.path.dirname(os.path.dirname(run_info_yaml))),
config, config_file)
return [dirs, config, run_info_yaml]
|
Cyberbio-Lab/bcbio-nextgen
|
bcbio/pipeline/run_info.py
|
Python
|
mit
| 33,207
|
[
"Galaxy"
] |
a63bec0b723e098bb751fa5a312d7776be647cec7b2092e7a63a001aed996d28
|
import vtkAll as vtk
from shallowCopy import shallowCopy
import numpy as np
try:
from vtk.util import numpy_support
except ImportError:
from paraview import numpy_support
def numpyToPolyData(pts, pointData=None, createVertexCells=False):
pd = vtk.vtkPolyData()
pd.SetPoints(vtk.vtkPoints())
# Makes a deep copy
pd.GetPoints().SetData(getVtkFromNumpy(pts.copy()))
if pointData is not None:
for key, value in pointData.iteritems():
addNumpyToVtk(pd, value.copy(), key)
if createVertexCells:
cellIds = vtk.vtkIdList()
cellIds.SetNumberOfIds(pd.GetNumberOfPoints())
for i in range(pd.GetNumberOfPoints()):
cellIds.SetId(i, i)
cells = vtk.vtkCellArray()
cells.InsertNextCell(cellIds)
pd.SetVerts(cells)
return pd
def getNumpyFromVtk(dataObj, arrayName='Points'):
if arrayName == 'Points':
vtkArray = dataObj.GetPoints().GetData()
else:
vtkArray = dataObj.GetPointData().GetArray(arrayName)
if not vtkArray:
raise KeyError('Array not found')
return numpy_support.vtk_to_numpy(vtkArray)
def getVtkPointsFromNumpy(numpyArray):
points = vtk.vtkPoints()
points.SetData(getVtkFromNumpy(numpyArray))
return points
def getVtkPolyDataFromNumpyPoints(points):
'''
Given an Nx3 array of xyz points
Return a new vtkPolyData containing points and vertex cells.
If the input points is not float64 it will be converted first.
'''
if points.dtype != np.float64:
points = points.astype(np.float64)
polyData = vtk.vtkPolyData()
polyData.SetPoints(getVtkPointsFromNumpy(points))
vtk.vtkPCLConversions.AddVertexCells(polyData)
return polyData
def getVtkFromNumpy(numpyArray):
def MakeCallback(numpyArray):
def Closure(caller, event):
closureArray = numpyArray
return Closure
vtkArray = numpy_support.numpy_to_vtk(numpyArray)
vtkArray.AddObserver('DeleteEvent', MakeCallback(numpyArray))
return vtkArray
def addNumpyToVtk(dataObj, numpyArray, arrayName):
assert dataObj.GetNumberOfPoints() == numpyArray.shape[0]
vtkArray = getVtkFromNumpy(numpyArray)
vtkArray.SetName(arrayName)
dataObj.GetPointData().AddArray(vtkArray)
|
mitdrc/director
|
src/python/director/vtkNumpy.py
|
Python
|
bsd-3-clause
| 2,294
|
[
"ParaView",
"VTK"
] |
f592de5c4f2c142bf9a99adb5cf7f4f55c3294fbd105df53e55e25482719edb5
|
#!/usr/bin/env python
import argparse
import copy
import io
import logging
import os
import sys
import time
import traceback
import pyexiv2
# import Image
import numpy as np
import matplotlib as mpl
from numpy.f2py.auxfuncs import throw_error
from logging import exception
if os.environ.get('DISPLAY', '') == '':
mpl.use('Agg')
import matplotlib.pyplot as plt
try:
# Try to force all times to be read as UTC
os.environ['TZ'] = 'UTC'
time.tzset()
except:
pass
import auroraplot as ap
import auroraplot.dt64tools as dt64
import auroraplot.magdata
import auroraplot.tools
import auroraplot.auroralactivity
import auroraplot.datasets.aurorawatchnet
import auroraplot.datasets.samnet
import auroraplot.datasets.bgs_schools
mpl.rcParams['legend.fontsize'] = 'medium'
def my_load_data(project, site, data_type, start_time, end_time, **kwargs):
r = ap.load_data(project, site, data_type, start_time, end_time, **kwargs)
if r is not None and args.test_mode:
# Remove any data after 'now' to emulate the correct behaviour
# when using historical data.
r.data[:,r.sample_end_time > now] = np.nan
return r
def mysavefig(fig, filename, exif_tags=None):
global args
path = os.path.dirname(filename)
if not os.path.exists(path):
os.makedirs(path)
fig.axes[-1].set_xlabel('Time (UT)')
# Override labelling format
for ax in fig.axes:
ax.grid(True)
ax.xaxis.set_major_formatter(dt64.Datetime64Formatter(fmt='%H'))
if np.diff(ax.get_xlim()).astype('m8[' + dt64.get_plot_units(ax.xaxis) \
+ ']') == np.timedelta64(24, 'h'):
ax.xaxis.set_major_locator(\
dt64.Datetime64Locator(interval=np.timedelta64(3, 'h'),
maxticks=10))
# TO DO: Update all site information with correct copyright,
# license and attribution data. Temporarily set here as currently
# all are CC4 BY-NC-SA.
if exif_tags is None:
exif_tags = {
'Exif.Image.Copyright': \
'This work is licensed under the Creative Commons ' + \
'Attribution-NonCommercial-ShareAlike 4.0 Unported ' + \
'License. To view a copy of this license, visit ' + \
'http://creativecommons.org/licenses/by-nc-sa/4.0/'
}
if exif_tags is None or len(exif_tags) == 0:
# Can save directly to a file
fig.savefig(buf, dpi=80)
else:
# Save the figure to a buffer which is used to create a
# pyexiv2 object.
image_format = filename[(filename.rindex('.') + 1):]
buf = io.BytesIO()
fig.savefig(buf, dpi=80, format=image_format)
buf.seek(0)
metadata = pyexiv2.ImageMetadata.from_buffer(buf.getvalue())
metadata.read()
# Add the metadata. pyexiv2 only supports a few tags
for k in exif_tags:
metadata[k] = exif_tags[k]
metadata.write()
f = open(filename, 'wb') # Open the file originally specified
f.write(metadata.buffer) # Finally write to disk
f.close()
buf.close()
logger.info('saved to ' + filename)
# if not args.show:
# plt.close(fig) # Close to save memory
def has_data_of_type(project, site, data_type):
dti = ap.get_site_info(project, site, 'data_types')
return dti.has_key(data_type)
def round_to(a, b, func=np.round):
return func(a / b) * b
def activity_plot(mag_data, mag_qdc, filename, exif_tags,
k_index_filename=None):
global activity
channel = mag_data.channels[0]
pos = [0.15, 0.1, 0.775, 0.75]
if mag_qdc is None:
activity = None
mag_data.plot(channels=channel, label=channel, color='black')
fig = plt.gcf()
ax2 = plt.gca()
else:
# assert np.all(mag_data.channels == mag_qdc.channels) \
# and len(mag_data.channels) == 1 \
# and len(mag_qdc.channels) == 1, \
# 'Bad value for channels'
activity = ap.auroralactivity.AuroraWatchActivity(magdata=mag_data,
magqdc=mag_qdc,
channels=channel,
fit=None)
# To get another axes the position must be different. It is made
# the same position later.
pos2 = copy.copy(pos)
pos2[0] += 0.1
fig = plt.figure(facecolor='w')
ax = plt.axes(pos)
activity.plot(axes=ax, units_prefix='n',
label='Activity (' + channel + ')')
ax2 = plt.axes(pos2)
# Set Y limit to be 1.5 times highest threshold. Units are
# nanotesla since that was set when plotting.
ax.set_ylim(0, activity.thresholds[-1] * 1.5 * 1e9)
mag_data.plot(channels=channel,
label=channel,
color='black',
axes=ax2)
# Align the QDC to regular intervals between start and end times
qdc_cadence = np.timedelta64(1, 'm')
# num = ((mag_data.end_time - mag_data.start_time)/ qdc_cadence) + 1
# qdc_sample_times = np.linspace(mag_data.start_time.astype('M8[m]'),
# mag_data.end_time.astype('M8[m]'),
# num)
qdc_sample_times = list(dt64.dt64_range(mag_data.start_time,
mag_data.end_time,
qdc_cadence))
qdc_aligned = mag_qdc.align(qdc_sample_times)
qdc_aligned.plot(channels=channel,
label=channel + ' QDC',
color='cyan',
axes=ax2)
ax.set_axis_bgcolor('w')
ax.axison = False
ax2.set_title(activity.make_title())
ax2.set_axis_bgcolor('none')
ax2.set_position(pos)
min_ylim_range = 400
ax2_ylim = ax2.get_ylim()
if np.diff(ax2_ylim) < min_ylim_range:
ax2.set_ylim(round_to(np.mean(ax2_ylim), 50)
+ min_ylim_range * np.array([-0.5, 0.5]))
fig.set_figwidth(6.4)
fig.set_figheight(4.8)
mysavefig(fig, filename, exif_tags)
r = [activity]
if k_index_filename is not None:
md_filt = mag_data
if ap.has_site_info(mag_data.project, mag_data.site,
'k_index_filter'):
kfilt = ap.get_site_info(mag_data.project, mag_data.site,
'k_index_filter')
if kfilt is not None:
md_filt = kfilt(mag_data)
k_index = ap.auroralactivity.KIndex(magdata=md_filt, magqdc=mag_qdc)
# Fix the start/end times to the data, not the 3h K index samples
k_index.start_time = md_filt.start_time
k_index.end_time = md_filt.end_time
k_index.plot()
fig = plt.gcf()
fig.set_figwidth(6.4)
fig.set_figheight(4.8)
fig.subplots_adjust(bottom=0.1, top=0.85,
left=0.15, right=0.925)
mysavefig(fig, k_index_filename, exif_tags)
r.append(k_index)
return r
def make_aurorawatch_plot(project, site, st, et, rolling, exif_tags):
'''
Load data and make the AuroraWatch activity plot. Plots always
cover 24 hours, but may begin at midnight for day plots, or at any
other hour for rolling plots. This function uses the previous 72
hours to help fit the quiet-day curve.
project: name of project
site: name of site
st: start time. For day plots this is the start of the day. For
rolling plots this is the start of the rolling 24 hour period.
et: end time. For day plots this is the start of the following
day. For rolling plots it is the end of the 24 hour period.
rolling: flag to indicate if rolling plot should also be made. It
is not otherwise possible to identify rolling plots which
start at midnight.
'''
# global mag_fstr
global args
# Export to global names for debugging
global mag_data
global mag_qdc
global activity
day = np.timedelta64(24, 'h')
archive, archive_details = ap.get_archive_info(project, site, 'MagData')
# Load the data to plot. For rolling plots load upto midnight so
# that both the rolling plot and the current day plot can be
# generated efficiently.
mag_data = my_load_data(project, site, 'MagData', st, dt64.ceil(et, day))
if mag_data is None or \
not np.any(np.logical_not(np.isnan(mag_data.data))):
# not .np.any(etc) eliminates empty array or array of just nans
logger.info('No magnetic field data')
return
# Load up some data from previous days to and apply a
# least-squares fit to remove baseline drifts. Data from the
# current day is not used. This ensures that results do not change
# over the current day when new data becomes available.
qdc_fit_interval = args.qdc_fit_interval * day
fit_et = dt64.ceil(st, day) # Could be doing a rolling plot
fit_st = fit_et - qdc_fit_interval
fit_data = my_load_data(project, site, 'MagData', fit_st, fit_et)
# Load a QDC.
mag_qdc = ap.magdata.load_qdc(project, site, st, tries=6, realtime=True)
if mag_qdc is None:
logger.info('No QDC')
elif fit_data is None:
# Cannot fit, so assume no errors in QDC
errors = [0.0]
else:
try:
# Fit the QDC to the previous data
qdc_aligned, errors, fi = mag_qdc.align(\
fit_data,
fit=ap.data.Data.minimise_sign_error_fit,
plot_fit=args.plot_fit,
full_output=True)
except Exception as e:
logger.warn('Could not fit QDC')
logger.info(str(e))
errors = [0.0]
else:
# Fitted ok, plot if necessary
if args.plot_fit:
fig = plt.gcf()
fig.set_figwidth(6.4)
fig.set_figheight(4.8)
fig.subplots_adjust(bottom=0.1, top=0.85,
left=0.15, right=0.925)
fit_fstr = mag_fstr[:(mag_fstr.rindex('.'))] + '_fit.png'
mysavefig(fig, dt64.strftime(dt64.ceil(st, day), fit_fstr),
exif_tags)
# Adjust the quiet day curve with the error obtained by fitting to
# previous days.
if mag_qdc is None:
mag_qdc_adj = None
else:
mag_qdc_adj = copy.deepcopy(mag_qdc)
mag_qdc_adj.data -= errors[0]
# Ensure data gaps are marked as such in the plots. Straight lines
# across large gaps look bad!
mag_data = mag_data.mark_missing_data(cadence=2*mag_data.nominal_cadence)
# Do day plot. Trim start time for occasions when making a day
# plot simultaneously with a rolling plot.
st2 = dt64.ceil(st, day)
md_day = mag_data.extract(start_time=st2)
act_ki = activity_plot(md_day, mag_qdc_adj,
dt64.strftime(st2, mag_fstr), exif_tags,
k_index_filename=dt64.strftime(st2, k_fstr))
r = [md_day]
r.extend(act_ki)
if rolling:
# Trim end time
md_rolling = mag_data.extract(end_time=et)
act_ki_rolling = activity_plot(md_rolling, mag_qdc_adj,
rolling_magdata_filename, exif_tags,
k_index_filename=rolling_k_filename)
r.append(md_rolling)
r.extend(act_ki_rolling)
return r
def make_temperature_plot(temperature_data, filename, exif_tags):
temperature_data.plot()
fig = plt.gcf()
ax = plt.gca()
fig.set_figwidth(6.4)
fig.set_figheight(3)
fig.subplots_adjust(bottom=0.175, top=0.75,
left=0.15, right=0.925)
leg = plt.legend()
leg.get_frame().set_alpha(0.5)
mysavefig(fig, filename, exif_tags)
def make_voltage_plot(voltage_data, filename, exif_tags):
voltage_data.plot()
fig = plt.gcf()
ax = plt.gca()
# ax.set_ylim([1.5, 3.5])
fig.set_figwidth(6.4)
fig.set_figheight(3)
fig.subplots_adjust(bottom=0.175, top=0.75,
left=0.15, right=0.925)
mysavefig(fig, filename, exif_tags)
def make_stack_plot(mdl, filename, exif_tags):
ap.magdata.stack_plot(mdl, offset=100e-9)
fig = plt.gcf()
ax = plt.gca()
ax.grid(True)
fig.subplots_adjust(left=0.15, right=0.925)
mysavefig(fig, filename, exif_tags)
def combined_activity_plot(act, filename, exif_tags):
'''
act: list of AuroraWatchActivity objects
filename: filename for plot
exif_tags: dict of tags to add to image
returns: None
'''
# Calculate activity as proportion of amber alert
act_data = np.concatenate(map(lambda d: (d.data / d.thresholds[2]) if d else np.zeros([1,24]), act))
act_data[np.isnan(act_data)] = 0
if act_data.shape[0] == 2:
# When only two sites use lowest activity values
data = np.min(act_data, axis=0)
else:
data = np.median(act_data, axis=0)
activity_data = copy.deepcopy(act[0])
activity_data.project = 'AuroraWatch'
activity_data.site = 'UK'
# Set specific thresholds, and convert data from proportion of
# amber threshold
activity_data.data = np.array([data]) * 100e-9
activity_data.thresholds = np.array([0.0, 50e-9, 100e-9, 200e-9])
activity_data.units = 'T'
activity_data.plot(units_prefix='n')
fig = plt.gcf()
ax = plt.gca()
ax.set_ylabel('Activity (nT)')
ax.set_title('AuroraWatch UK\nAverage geomagnetic activity\n' +
dt64.fmt_dt64_range(activity_data.start_time,
activity_data.end_time))
ax.grid(True)
# Set Y limit to be 1.5 times highest threshold. Units are
# nanotesla since that was set when plotting.
ax.set_ylim(0, activity_data.thresholds[-1] * 1.5 * 1e9)
fig.set_figwidth(6.4)
fig.set_figheight(4.8)
fig.subplots_adjust(bottom=0.1, top=0.85,
left=0.15, right=0.925)
mysavefig(fig, filename, exif_tags)
return activity_data
def make_links(link_dir, link_data):
for link in link_data:
link_name = os.path.join(link_dir, link['name'])
# Make the target a relative path
target = os.path.relpath(dt64.strftime(link['date'], link['fstr']),
os.path.dirname(link_name))
if os.path.islink(link_name) and \
os.readlink(link_name) == target:
# Exists and is correct
logger.debug('link exists and is correct: ' + link_name +
' -> ' + target)
continue
if os.path.lexists(link_name):
logger.debug('link exists but is incorrect: ' + link_name)
os.unlink(link_name)
logger.debug('creating link ' + link_name + ' -> ' + target)
link_dir = os.path.dirname(link_name)
if not os.path.exists(link_dir):
logger.debug('creating directory %s', link_dir)
os.makedirs(link_dir)
os.symlink(target, link_name)
# TODO: put in a common location and merge with aurorawatch_jobs.touch_file
def touch_file(filename, amtime=None):
basedir = os.path.dirname(filename)
if not os.path.exists(basedir):
os.makedirs(basedir)
with open(filename, 'a'):
os.utime(filename, amtime)
def clear_timeouts(status_dir):
if os.path.exists(status_dir):
for filename in os.listdir(status_dir):
# Set times back to 1970
touch_file(os.path.join(status_dir, filename), (0, 0))
cc4_by_nc_sa = 'This work is licensed under the Creative Commons ' + \
'Attribution-NonCommercial-ShareAlike 4.0 Unported License. ' + \
'To view a copy of this license, visit ' + \
'http://creativecommons.org/licenses/by-nc-sa/4.0/'
# ==========================================================================
# Parse command line options
parser = argparse.ArgumentParser(description\
='Plot AuroraWatch magnetometer data.')
parser.add_argument('-s', '--start-time',
help='Start time for archive plot mode',
metavar='DATETIME')
parser.add_argument('-e', '--end-time',
help='End time for archive plot mode',
metavar='DATETIME')
parser.add_argument('--now',
help='Set current time for test mode',
metavar='DATETIME')
parser.add_argument('--log-level',
choices=['debug', 'info', 'warning', 'error', 'critical'],
default='warning',
help='Control how much details is printed',
metavar='LEVEL')
parser.add_argument('--log-format',
default='%(levelname)s:%(message)s',
help='Set format of log messages',
metavar='FORMAT')
parser.add_argument('-m', '--make-links',
action='store_true',
help='Make symbolic links')
parser.add_argument('--rolling',
action='store_true',
help='Make rolling plots for today (live mode)')
parser.add_argument('--test-mode',
action='store_true',
help='Test mode for plots and jobs')
parser.add_argument('--clear-timeouts',
action='store_true',
help='Mark jobs as not having run for a very long time')
parser.add_argument('--ignore-timeout',
action='store_true',
help='Ignore timeout when running jobs')
parser.add_argument('--sites',
required=True,
help='Whitespace-separated list of sites (prefixed with project)',
metavar='"PROJECT1/SITE1 PROJECT2/SITE2 ..."')
parser.add_argument('--plot-fit',
action='store_true',
help='Plot and save QDC fit')
parser.add_argument('--qdc-fit-interval',
type=int,
default=3,
help='Number of days for fitting QDC',
metavar='DAYS')
parser.add_argument('--run-jobs',
action='store_true',
help='Run jobs')
parser.add_argument('--show',
action='store_true',
help='Show plots for final day')
parser.add_argument('--stack-plot',
action='store_true',
help='Generate stack plot(s)')
parser.add_argument('--summary-dir',
default='/tmp',
help='Base directory for summary plots',
metavar='PATH')
args = parser.parse_args()
if __name__ == '__main__':
logging.basicConfig(level=getattr(logging, args.log_level.upper()),
format=args.log_format)
logger = logging.getLogger(__name__)
# Use a consistent value for current time, process any --now option
# first.
if args.now:
now = dt64.parse_datetime64(args.now, 'us')
else:
now = np.datetime64('now', 'us')
day = np.timedelta64(24, 'h')
today = dt64.floor(now, day)
yesterday = today - day
tomorrow = today + day
# This can be used in os.path.join() to include the test directory
# when needed.
if args.test_mode:
test_mode_str = 'test'
else:
test_mode_str = ''
if args.rolling:
if args.start_time or args.end_time:
raise Exception('Cannot set start or end time for rolling plots')
end_time = dt64.ceil(now, np.timedelta64(1, 'h'))
start_time = end_time - day
else:
if args.start_time is None:
start_time = today
else:
start_time = dt64.parse_datetime64(args.start_time, 'us')
if args.end_time is None:
end_time = start_time + day
else:
end_time = dt64.parse_datetime64(args.end_time, 'us')
if args.run_jobs:
import aurorawatch_jobs
# aurorawatch_jobs.init(args.test_mode, args.ignore_timeout)
else:
aurorawatch_jobs = None
if args.clear_timeouts:
clear_timeouts(os.path.join(args.summary_dir, test_mode_str,
'job_status'))
# Get names of all projects and sites to be processed. Dictionary used
# to avoid duplicates.
project_site = {}
for s in args.sites.upper().split():
n_s = s.split('/')
if len(n_s) == 1:
# Only project given, use all sites
for k in ap.projects[n_s[0]].keys():
project_site[n_s[0] + '/' + k] = (n_s[0], k)
elif len(n_s) == 2:
# Project and site given
project_site[s] = tuple(n_s)
else:
raise Exception('bad value for project/site (' + project_site)
t1 = start_time
while t1 < end_time:
logger.debug('time: %s', dt64.strftime(t1, '%Y-%m-%d'))
plt.close('all')
t2 = t1 + day
t1_eod = dt64.ceil(t1, day) # t1 end of day
t2_eod = dt64.ceil(t2, day) # t2 end of day
# List of magdata objects for this day
mdl_day = []
act_day = []
mdl_rolling = []
act_rolling = []
# Get copyright and attribution data for all sites. License had
# better be CC4-BY-NC-SA for all since we are combining them.
copyright_list = []
attribution_list = []
for project_uc, site_uc in project_site.values():
project_lc = project_uc.lower()
site_lc = site_uc.lower()
logger.debug('%s/%s', project_uc, site_uc)
if not ap.projects.has_key(project_uc):
try:
__import__('auroraplot.datasets.' + project_lc)
logger.debug('imported auroraplot.datasets.' + project_lc)
except:
logger.error('could not import dataset for %s', project_lc)
site_start_time = ap.get_site_info(project_uc, site_uc,
info='start_time')
site_end_time = ap.get_site_info(project_uc, site_uc,
info='end_time')
if site_start_time and t2 <= site_start_time:
next
if site_end_time and t1 >= site_end_time:
next
copyright_ = ap.get_site_info(project_uc, site_uc, 'copyright')
attribution = ap.get_site_info(project_uc, site_uc, 'attribution')
exif_tags = {'Exif.Image.Copyright': \
' '.join(['Copyright: ' + copyright_,
'License: ' + \
ap.get_site_info(project_uc,
site_uc,
'license'),
'Attribution: ' + attribution])}
summary_dir = args.summary_dir
site_summary_dir = os.path.join(summary_dir, test_mode_str,
project_lc, site_lc)
site_status_dir = os.path.join(site_summary_dir, 'job_status')
mag_fstr = os.path.join(site_summary_dir, '%Y', '%m',
site_lc + '_%Y%m%d.png')
rolling_magdata_filename = os.path.join(site_summary_dir,
'rolling.png')
stackplot_fstr = os.path.join(summary_dir, test_mode_str,
'stackplots', '%Y', '%m', '%Y%m%d.png')
rolling_stackplot_filename = os.path.join(summary_dir, test_mode_str,
'stackplots', 'rolling.png')
actplot_fstr = os.path.join(summary_dir, test_mode_str,
'activity_plots',
'%Y', '%m', '%Y%m%d.png')
rolling_activity_filename = os.path.join(summary_dir, test_mode_str,
'activity_plots',
'rolling.png')
temp_fstr = os.path.join(site_summary_dir, '%Y', '%m',
site_lc + '_temp_%Y%m%d.png')
rolling_tempdata_filename = os.path.join(site_summary_dir,
'rolling_temp.png')
voltage_fstr = os.path.join(site_summary_dir, '%Y', '%m',
site_lc + '_voltage_%Y%m%d.png')
rolling_voltdata_filename = os.path.join(site_summary_dir,
'rolling_volt.png')
k_fstr = os.path.join(site_summary_dir, '%Y', '%m',
site_lc + '_k_%Y%m%d.png')
rolling_k_filename = os.path.join(site_summary_dir,
'rolling_k.png')
if args.clear_timeouts and t1 == start_time:
clear_timeouts(site_status_dir)
md = None
if has_data_of_type(project_uc, site_uc, 'MagData'):
try:
md = make_aurorawatch_plot(project_uc, site_uc, t1, t2,
args.rolling, exif_tags)
# Store mag_data objects for daily and rolling
# stack plots.
if md is not None:
mdl_day.append(md[0])
act_day.append(md[1])
copyright_list.append(copyright_)
attribution_list.append(attribution)
if args.rolling:
mdl_rolling.append(md[3])
act_rolling.append(md[4])
except Exception as e:
logger.error(traceback.format_exc())
else:
logger.debug('%s/%s does not have MagData', project_uc, site_uc)
temp_data = None
if has_data_of_type(project_uc, site_uc, 'TemperatureData'):
temp_data = my_load_data(project_uc, site_uc, 'TemperatureData',
t1, t2_eod)
if temp_data is not None:
temp_data.set_cadence(np.timedelta64(10, 'm'),
inplace=True)
if args.rolling:
# Rolling plot
make_temperature_plot(temp_data.extract(end_time=t2),
rolling_tempdata_filename,
exif_tags)
# Make day plot. Trim data from start because when
# --rolling option is given it can include data from
# the previous day.
make_temperature_plot(temp_data.extract(start_time=t1_eod),
dt64.strftime(t1_eod, temp_fstr),
exif_tags)
voltage_data = None
if has_data_of_type(project_uc, site_uc, 'VoltageData'):
voltage_data = my_load_data(project_uc, site_uc, 'VoltageData',
t1, t2_eod)
if voltage_data is not None:
voltage_data.set_cadence(np.timedelta64(10, 'm'),
inplace=True)
if args.rolling:
# Rolling plot
make_voltage_plot(voltage_data.extract(end_time=t2),
rolling_voltdata_filename,
exif_tags)
# Make day plot. Trim data from start because when
# --rolling option is given it can include data from
# the previous day.
make_voltage_plot(voltage_data.extract(start_time=t1_eod),
dt64.strftime(t1_eod, voltage_fstr),
exif_tags)
if args.rolling and args.run_jobs:
# Jobs are only run for rolling (live) mode.
try:
logger.info('Running site job for ' + project_uc + '/' \
+ site_uc)
aurorawatch_jobs.site_job(project=project_uc,
site=site_uc,
now=now,
status_dir=site_status_dir,
test_mode=args.test_mode,
ignore_timeout=args.ignore_timeout,
mag_data=mag_data,
act_data=None if md is None else md[4],
temp_data=temp_data,
voltage_data=voltage_data)
except Exception as e:
logger.error('Could not run job for ' + project_uc + '/' +
site_uc + ': ' + str(e))
logger.error(traceback.format_exc())
if args.stack_plot and len(mdl_day):
site_ca = [] # site copyright/attribution details
for n in range(len(mdl_day)):
site_ca.append(mdl_day[n].project + '/' + mdl_day[n].site +
' data: ' +
' Copyright: ' + copyright_list[n] +
' Attribution: ' + attribution_list[n] +
' ')
exif_tags2 = {'Exif.Image.Copyright': \
' '.join(site_ca) + ' License: ' + cc4_by_nc_sa}
make_stack_plot(mdl_day, dt64.strftime(mdl_day[0].start_time,
stackplot_fstr),
exif_tags2)
combined_activity_plot(act_day, dt64.strftime(act_day[0].start_time,
actplot_fstr),
exif_tags2)
if args.rolling:
make_stack_plot(mdl_rolling, rolling_stackplot_filename,
exif_tags2)
combined_activity = \
combined_activity_plot(act_rolling, rolling_activity_filename,
exif_tags2)
if args.run_jobs:
try:
logger.info('Running activity job')
status_dir = os.path.join(summary_dir, test_mode_str,
'job_status')
aurorawatch_jobs.activity_job(combined_activity=\
combined_activity,
activity_data_list=\
act_rolling,
now=now,
status_dir=status_dir,
test_mode=args.test_mode,
ignore_timeout=\
args.ignore_timeout,)
except Exception as e:
logger.error('Could not run activity job: ' + str(e))
raise
t1 = t2
# End of time loop
if args.make_links:
logger.debug('making links')
# Makes site links for each site listed
for project_uc, site_uc in project_site.values():
site_lc = site_uc.lower()
site_summary_dir = os.path.join(summary_dir, test_mode_str,
project_uc.lower(), site_lc)
mag_fstr = os.path.join(site_summary_dir, '%Y', '%m',
site_lc + '_%Y%m%d.png')
temp_fstr = os.path.join(site_summary_dir, '%Y', '%m',
site_lc + '_temp_%Y%m%d.png')
voltage_fstr = os.path.join(site_summary_dir, '%Y', '%m',
site_lc + '_voltage_%Y%m%d.png')
k_fstr = os.path.join(site_summary_dir, '%Y', '%m',
site_lc + '_k_%Y%m%d.png')
link_data = [{'name': 'yesterday.png',
'date': yesterday,
'fstr': mag_fstr},
{'name': 'yesterday_temp.png',
'date': yesterday,
'fstr': temp_fstr},
{'name': 'yesterday_volt.png',
'date': yesterday,
'fstr': voltage_fstr},
{'name': 'yesterday_k.png',
'date': yesterday,
'fstr': k_fstr},
{'name': 'today.png',
'date': today,
'fstr': mag_fstr},
{'name': 'today_temp.png',
'date': today,
'fstr': temp_fstr},
{'name': 'today_volt.png',
'date': today,
'fstr': voltage_fstr},
{'name': 'today_k.png',
'date': today,
'fstr': k_fstr},
]
make_links(site_summary_dir, link_data)
# Stack plots and combined activity links use a different base
# directories
make_links(os.path.join(summary_dir, test_mode_str, 'stackplots'),
[{'name': 'yesterday.png',
'date': yesterday,
'fstr': stackplot_fstr},
{'name': 'today.png',
'date': today,
'fstr': stackplot_fstr}])
make_links(os.path.join(summary_dir, test_mode_str, 'activity_plots'),
[{'name': 'yesterday.png',
'date': yesterday,
'fstr': actplot_fstr},
{'name': 'today.png',
'date': today,
'fstr': actplot_fstr}])
if args.show:
plt.show()
|
stevemarple/AuroraWatchNet
|
software/server/awnetd/awnet_plot.py
|
Python
|
gpl-2.0
| 34,245
|
[
"Amber",
"VisIt"
] |
96602ec7738370011d8eb489df5627272a27bb6fa6c306e7b4525f86ee5fdb1d
|
# -*- coding: utf-8 -*-
"""Tests for data structures in PyBEL."""
import os
import random
import tempfile
import unittest
from io import StringIO
import pybel
import pybel.examples
from pybel import BELGraph
from pybel.constants import CITATION_TYPE_PUBMED, IDENTIFIER, NAMESPACE
from pybel.dsl import hgvs, protein
from pybel.io.api import InvalidExtensionError
from pybel.language import Entity
from pybel.testing.utils import n
class TestGraphProperties(unittest.TestCase):
"""Test setting and access to graph properties."""
def setUp(self):
"""Make fake metadata for the graphs."""
(
self.name,
self.version,
self.description,
self.authors,
self.contact,
self.licenses,
self.copyrights,
self.disclaimer,
) = [n() for _ in range(8)]
def _help_test_metadata(self, graph: BELGraph) -> None:
"""Help test the right metadata got in the graph."""
self.assertEqual(self.name, graph.name)
self.assertEqual(self.version, graph.version)
self.assertEqual(self.description, graph.description)
self.assertEqual(self.authors, graph.authors)
self.assertEqual(self.contact, graph.contact)
self.assertEqual(self.licenses, graph.license)
self.assertEqual(self.copyrights, graph.copyright)
self.assertEqual(self.disclaimer, graph.disclaimer)
self.assertEqual("{name} v{version}".format(name=self.name, version=self.version), str(graph))
def test_str_kwargs(self):
"""Test setting of metadata through keyword arguments."""
graph = BELGraph(
name=self.name,
version=self.version,
description=self.description,
authors=self.authors,
contact=self.contact,
license=self.licenses,
copyright=self.copyrights,
disclaimer=self.disclaimer,
)
self._help_test_metadata(graph)
def test_name(self):
"""Test setting of metadata through attributes."""
graph = BELGraph()
graph.name = self.name
graph.version = self.version
graph.description = self.description
graph.authors = self.authors
graph.contact = self.contact
graph.license = self.licenses
graph.copyright = self.copyrights
graph.disclaimer = self.disclaimer
self._help_test_metadata(graph)
class TestStruct(unittest.TestCase):
"""Test the BEL graph data structure."""
def test_add_simple(self):
"""Test that a simple node can be added, but not duplicated."""
graph = BELGraph()
node = protein(namespace="TEST", name="YFG")
graph.add_node_from_data(node)
self.assertEqual(1, graph.number_of_nodes())
graph.add_node_from_data(node)
self.assertEqual(1, graph.number_of_nodes(), msg="should not add same node again")
def test_summarize(self):
"""Test summarizing a graph."""
self.maxDiff = None
sio = StringIO()
random.seed(5)
pybel.examples.sialic_acid_graph.version = "1.0.0"
pybel.examples.sialic_acid_graph.summarize(file=sio, examples=False)
test_str = """--------------------- -------------------
Name Sialic Acid Graph
Version 1.0.0
Authors Charles Tapley Hoyt
Number of Nodes 9
Number of Namespaces 3
Number of Edges 11
Number of Annotations 2
Number of Citations 1
Number of Authors 0
Number of Components 1
Number of Warnings 0
Network Density 1.53E-01
--------------------- -------------------
Type (3) Count
---------- -------
Protein 7
Complex 1
Abundance 1
Namespace (3) Count
--------------- -------
go 15
hgnc 8
chebi 2
Edge Type (7) Count
--------------------------------- -------
Protein increases Protein 3
Protein directlyIncreases Protein 2
Protein directlyDecreases Protein 2
Complex increases Protein 1
Abundance partOf Complex 1
Protein partOf Complex 1
Protein hasVariant Protein 1"""
self.assertEqual(test_str.strip(), sio.getvalue().strip())
def test_citation_type_error(self):
"""Test error handling on adding qualified edges."""
graph = BELGraph()
with self.assertRaises(TypeError):
graph.add_increases(
protein(namespace="TEST", name="YFG1"),
protein(namespace="TEST", name="YFG2"),
evidence=n(),
citation=5,
)
class TestGetGraphProperties(unittest.TestCase):
"""The tests in this class check the getting and setting of node properties."""
def setUp(self):
"""Set up the test case with a fresh BEL graph."""
self.graph = BELGraph()
self.graph.annotation_pattern["Species"] = r"\d+"
self.graph.annotation_list["Confidence"] = {
"Very Low",
"Low",
"Medium",
"High",
"Very High",
}
def test_get_qualified_edge(self):
"""Test adding an edge to a graph."""
test_source = protein(namespace="TEST", name="YFG")
test_target = protein(namespace="TEST", name="YFG2")
self.graph.add_node_from_data(test_source)
self.graph.add_node_from_data(test_target)
test_evidence = n()
test_pmid = n()
test_key = self.graph.add_increases(
test_source,
test_target,
citation=test_pmid,
evidence=test_evidence,
annotations={"Species": "9606", "Confidence": "Very High"},
)
citation = self.graph.get_edge_citation(test_source, test_target, test_key)
self.assertIsNotNone(citation)
self.assertIsInstance(citation, dict)
self.assertIn(NAMESPACE, citation)
self.assertEqual(CITATION_TYPE_PUBMED, citation[NAMESPACE])
self.assertIn(IDENTIFIER, citation)
self.assertEqual(test_pmid, citation[IDENTIFIER])
evidence = self.graph.get_edge_evidence(test_source, test_target, test_key)
self.assertIsNotNone(evidence)
self.assertIsInstance(evidence, str)
self.assertEqual(test_evidence, evidence)
annotations = self.graph.get_edge_annotations(test_source, test_target, test_key)
self.assertIsNotNone(annotations)
self.assertIsInstance(annotations, dict)
self.assertIn("Species", annotations)
self.assertIn(Entity(namespace="Species", identifier="9606"), annotations["Species"])
self.assertIn("Confidence", annotations)
self.assertIn(
Entity(namespace="Confidence", identifier="Very High"),
annotations["Confidence"],
)
def test_get_unqualified_edge(self):
"""Test adding an unqualified edge."""
test_source = protein(namespace="TEST", name="YFG")
test_target = protein(namespace="TEST", name="YFG2")
key = self.graph.add_part_of(test_source, test_target)
citation = self.graph.get_edge_citation(test_source, test_target, key)
self.assertIsNone(citation)
evidence = self.graph.get_edge_evidence(test_source, test_target, key)
self.assertIsNone(evidence)
annotations = self.graph.get_edge_annotations(test_source, test_target, key)
self.assertIsNone(annotations)
def test_add_node_with_variant(self):
"""Test that the identifier is carried through to the child."""
graph = BELGraph()
namespace, name, identifier, variant_name = n(), n(), n(), n()
node = protein(
namespace=namespace,
name=name,
identifier=identifier,
variants=hgvs(variant_name),
)
node.get_parent()
graph.add_node_from_data(node)
self.assertEqual(2, graph.number_of_nodes())
class TestExtensionIO(unittest.TestCase):
def test_io(self):
with tempfile.TemporaryDirectory() as directory:
path = os.path.join(directory, "ampk.bel.nodelink.json")
pybel.dump(pybel.examples.ampk_graph, path)
self.assertTrue(os.path.exists(path))
new_graph = pybel.load(path)
self.assertIsNotNone(new_graph)
def test_invalid_io(self):
with tempfile.TemporaryDirectory() as directory:
path = os.path.join(directory, "ampk.bel.invalid.json")
with self.assertRaises(InvalidExtensionError):
pybel.dump(pybel.examples.ampk_graph, path)
self.assertFalse(os.path.exists(path))
|
pybel/pybel
|
tests/test_struct/test_struct_graph.py
|
Python
|
mit
| 8,814
|
[
"Pybel"
] |
ccf8bd6da249c2da84331897b0e8fcc407d8cfb2c3060cb4483cab396784fb16
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
The full program is explained in the attached ReadMe.md
Copyright (C) 2013 warehouseman.com
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Created on 2014-09-15
@author: Martin H. Bramwell
This module:
This is a helper program to allow you to send emails to through your
your GMail account.
'''
import os
import urllib
import logging
import argparse
import loadGoogleJSON
try :
from goauth2_helper import GeneratePermissionUrl
from goauth2_helper import AuthorizeTokens
from goauth2_helper import GenerateOAuth2String
from goauth2_helper import RefreshToken
except ImportError :
# Get Google oauth2 helper file
webFile = urllib.urlopen('http://google-mail-oauth2-tools.googlecode.com/svn/trunk/python/oauth2.py')
localFile = open('goauth2_helper.py', 'w')
localFile.write(webFile.read())
webFile.close()
localFile.close()
from goauth2_helper import GeneratePermissionUrl
from goauth2_helper import AuthorizeTokens
from goauth2_helper import GenerateOAuth2String
from goauth2_helper import RefreshToken
import smtplib
import base64
import re
import traceback
import datetime
parameters_file = 'working_parameters.py'
SMTP_ACCESS = 'google_project_client_smtp_access_token'
SMTP_REFRESH = 'google_project_client_smtp_refresh_token'
SMTP_EXPIRY = 'google_project_client_smtp_expiry'
CLIENT_EMAIL = 'google_project_client_email'
configure_email = False
gpcsat_len = 0
gpcsrt_len = 0
try:
from working_parameters import google_project_client_smtp_access_token
from working_parameters import google_project_client_smtp_refresh_token
gpcsat_len = len(google_project_client_smtp_access_token)
gpcsrt_len = len(google_project_client_smtp_refresh_token)
assert gpcsat_len > 50 and gpcsat_len < 80
assert gpcsrt_len == 45
except :
configure_email = True
print 'No valid token pair found in {}. Will run the wizard.'.format(parameters_file)
# print 'Lengths : Access Token = {}, Refresh Token = {}.'.format(gpcsat_len, gpcsrt_len)
import fileinput
def update_parms_file(acc, ref, exp, cgm):
acc_parm = "%s = '%s'" % (SMTP_ACCESS, acc)
a = True
ref_parm = "%s = '%s'" % (SMTP_REFRESH, ref)
r = True
exp_parm = "%s = '%s'" % (SMTP_EXPIRY, exp)
x = True
cgm_parm = "%s = '%s'" % (CLIENT_EMAIL, cgm)
g = True
for line in fileinput.input(parameters_file, inplace=1):
if line.startswith(SMTP_ACCESS) :
print acc_parm
a = False
elif line.startswith(SMTP_REFRESH) :
print ref_parm
r = False
elif line.startswith(SMTP_EXPIRY) :
print exp_parm
r = False
elif line.startswith(CLIENT_EMAIL) :
print cgm_parm
g = False
else :
print line,
if a or r or x:
with open(parameters_file, "a") as myfile:
myfile.write('\n# Appended automatically . . . ')
if a :
myfile.write('\n%s' % acc_parm)
if r :
myfile.write('\n%s' % ref_parm)
if x :
myfile.write('\n%s' % exp_parm)
if g :
myfile.write('\n%s' % cgm_parm)
myfile.write('\n#\n')
def prep_smtp(creds, client_email, test_mail = False) :
if client_email == None:
try:
from working_parameters import google_project_client_email
except ImportError:
google_project_client_email = raw_input('Enter the GMail address you want to authorize : ')
if not re.match("[^@]+@[^@]+\.[^@]+", google_project_client_email):
print 'Bad email. Try again, or you can edit "{0}.example" and save as "{0}" before running the tests.'.format(parameters_file)
exit(-1)
else :
google_project_client_email = client_email
expiry = ''
if configure_email :
scope = 'https://mail.google.com/'
print "\n To be able to request authorization from your users by email, you need to authorize this program to use Google's email resender in your name."
print " Visit this url and follow the directions:\n"
print ' %s' % GeneratePermissionUrl(
creds.installed.client_id
, scope
)
authorization_code = raw_input('\n\n * * * Enter verification code: ')
response = AuthorizeTokens (
creds.installed.client_id
, creds.installed.client_secret
, authorization_code
)
access_token = ''
refresh_token = ''
try :
access_token = response['access_token']
refresh_token = response['refresh_token']
except :
print '\n\nServer reported %s' % response
print ' - Did you get the *latest* verification code?'
print ' - Did you get all of it?'
print ' - Did you use exactly the right ID and Secret for "Client for Installed Applications" from the Google API Console?\n(https://www.google.ca/url?sa=t&rct=j&q=&esrc=s&source=web&cd=1&cad=rja&ved=0CC4QFjAA&url=http%3A%2F%2Fcode.google.com%2Fapis%2Fconsole&ei=RgdEUvu-GM754AOeh4GgAQ&usg=AFQjCNFikY2jzXn9SOuZu0UcyS-59LlsTw&sig2=hpYvu7CrTb8royXO9f3nyQ&bvm=bv.53217764,d.dmg)'
exit(-1)
expiry = (datetime.datetime.now() + datetime.timedelta(0,response['expires_in'])).strftime('%Y-%m-%d %H:%M')
print '\nSuccess :'
print ' - Access Token: = %s' % access_token
print ' - Refresh Token: %s'% refresh_token
print ' - Access Token expires at : %s' % expiry
print 'Appending latest tokens to the bottom of the file "{}". . . '.format(parameters_file)
update_parms_file(access_token, refresh_token, expiry, google_project_client_email)
print ' . . done.\n'
else :
print 'An SMTP access token pair is already registered in "{}"'.format(parameters_file)
access_token = google_project_client_smtp_access_token
refresh_token = google_project_client_smtp_refresh_token
smtp_conn = smtplib.SMTP('smtp.gmail.com', 587)
smtp_conn.set_debuglevel(False)
smtp_conn.ehlo('test')
smtp_conn.starttls()
# Temporary token...
auth_string = GenerateOAuth2String (
google_project_client_email
, access_token
, base64_encode = False
)
### print auth_string
# Preparing test email envelope . .
title = 'Trash this email'
body = 'Congratulations. You have fully enabled mail transfer through Google SMTP.'
envelope = 'From: %s\nTo: %s\nSubject: %s\n\n%s' % (
google_project_client_email
, google_project_client_email
, title
, body)
if test_mail :
print ' ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print envelope
print ' ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
try :
smtp_conn.docmd('AUTH', 'XOAUTH2 ' + base64.b64encode(auth_string))
print 'Sending . . . '
smtp_conn.sendmail(google_project_client_email, google_project_client_email, envelope)
print '. . . sent!\n'
except smtplib.SMTPSenderRefused as sr :
if sr[0] == 530 :
print 'Refresh required: Using %s' % refresh_token
access_token = RefreshToken(creds.installed.client_id, creds.installed.client_secret, refresh_token)
print 'New token : %s' % access_token
smtp_conn.docmd('AUTH', 'XOAUTH2 ' + base64.b64encode(auth_string))
try :
smtp_conn.sendmail(google_project_client_email, google_project_client_email, envelope)
except smtplib.SMTPSenderRefused as sr :
print sr
if sr[0] == 535 :
print 'The access token is correct. Maybe the user id is wrong?'
print '¿ Are you sure that <[{0}]> authorized <[{0}]> ?'.format(google_project_client_email)
exit(-1)
else :
print 'No test mail sent.'
return
pth = os.path.realpath(__file__)
PROG = pth.split(os.sep)[pth.count(os.sep)]
desc = 'Get the access and refresh tokens for SMTP access to your GMail account.'
desc += ' The values are added to the file {}.'.format(parameters_file)
desc += ' If file {} already have all the necessary parameters no'.format(parameters_file)
desc += ' action is taken.'
'''
msg_c = "to drop and create new credentials"
msg_k = "The identity key of a Google Spreadsheets workbook."
msg_r = "Row in Tasks sheet at which to start processing."
'''
def get():
usage = "usage: {} [options] arg".format(PROG)
parser = argparse.ArgumentParser(description=desc, prog=PROG)
msg_j = 'A json file of Google OAuth credentials from '
msg_j += 'https://console.developers.google.com/ » [APIs & auth] » '
msg_j += '[Credentials] » [Client ID for native application]'
parser.add_argument(
'-j'
, '--client_id_json'
, help=msg_j
, required=False
)
parser.add_argument(
'-e'
, '--client_email'
, help='Your full GMail address [e.g. your.addr@gmail.com]'
, default=None
, required=False
)
parser.add_argument(
'-m'
, '--test_mail'
, help='Send a test email? (True|False) Default = False'
, default=False
, required=False
)
return parser.parse_args()
def main():
args = get()
creds = loadGoogleJSON.getCreds(args.client_id_json)
if creds != None:
open(parameters_file, 'a').close()
oauth_credentials = prep_smtp(creds, args.client_email, args.test_mail)
return
if __name__ == '__main__':
logging.FileHandler('python.log', mode='a')
logger = logging.getLogger(PROG)
logger.setLevel('DEBUG')
# logger.setLevel('WARNING')
main()
exit(0)
|
martinhbramwell/gspread_HelloOAuthWorld
|
authorize_SMTP.py
|
Python
|
agpl-3.0
| 11,313
|
[
"VisIt"
] |
906f1f58e6ebe089d54b80f3aa07990fd17e07fbc75d863ac685b79c968ec9c9
|
'''Term creation.'''
import antlr
from aterm import exception
from aterm import term
from aterm import lexer
from aterm import parser
class _Singleton(type):
'''Metaclass for the Singleton design pattern. Based on
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/102187
'''
def __init__(mcs, name, bases, dic):
super(_Singleton, mcs).__init__(name, bases, dic)
mcs.__instance = None
def __call__(mcs, *args, **kargs):
if mcs.__instance is None:
mcs.__instance = super(_Singleton, mcs).__call__(*args, **kargs)
return mcs.__instance
class Factory(object):
'''This class is responsible for make new terms, either by parsing
from strings or streams, or via one the of the "make" methods.'''
__metaclass__ = _Singleton
# TODO: implement maximal sharing
MAX_PARSE_CACHE_LEN = 512
# TODO: cache match and build patterns too
def __init__(self):
self.parseCache = {}
self.__nil = term.Nil(self)
def makeInt(self, value):
'''Creates a new integer literal term'''
return term.Integer(self, value)
def makeReal(self, value):
'''Creates a new real literal term'''
return term.Real(self, value)
def makeStr(self, value):
'''Creates a new string literal term'''
return term.Str(self, value)
def makeNil(self):
'''Creates a new empty list term'''
return self.__nil
def makeCons(self, head, tail):
'''Creates a new extended list term'''
return term.Cons(self, head, tail)
def makeList(self, seq):
'''Creates a new list from a sequence.'''
accum = self.makeNil()
for elm in reversed(seq):
accum = self.makeCons(elm, accum)
return accum
def makeTuple(self, args = None, annotations = None):
'''Creates a new tuple term'''
return self.makeAppl("", args, annotations)
def makeAppl(self, name, args = None, annotations = None):
'''Creates a new application term'''
if args is None:
args = ()
if annotations is None:
annotations = self.makeNil()
return term.Appl(self, name, args, annotations)
def coerce(self, value, name = None):
'''Coerce an object to a term. Value must be an int, a float, a string,
a sequence of terms, or a term.'''
if isinstance(value, term.Term):
return value
elif isinstance(value, (int, long)):
return self.makeInt(value)
elif isinstance(value, float):
return self.makeReal(value)
elif isinstance(value, basestring):
return self.makeStr(value)
elif isinstance(value, list):
return self.makeList(value)
elif isinstance(value, tuple):
return self.makeList(value)
else:
msg = "argument"
if not name is None:
msg += " " + name
msg += " is neither a term, a literal, or a list: "
msg += repr(value)
raise TypeError(msg)
def _parse(self, lexer):
'''Creates a new term by parsing a string.'''
p = Parser(lexer)
try:
return p.term()
except antlr.ANTLRException, exc:
raise exception.ParseError(str(exc))
def readFromTextFile(self, fp):
'''Creates a new term by parsing from a text stream.'''
return self._parse(lexer.Lexer(fp = fp))
def parse(self, buf):
'''Creates a new term by parsing a string.'''
try:
return self.parseCache[buf]
except KeyError:
pass
result = self._parse(lexer.Lexer(buf))
if len(self.parseCache) > self.MAX_PARSE_CACHE_LEN:
# TODO: use a LRU cache policy
self.parseCache.clear()
self.parseCache[buf] = result
return result
def match(self, pattern, term):
'''Matches the term to a string pattern and a list of arguments.
'''
assert isinstance(pattern, basestring)
from aterm.match import Parser, Match
p = Parser(lexer.Lexer(pattern))
try:
matcher = p.term()
except antlr.ANTLRException, exc:
raise exception.ParseError(str(exc))
mo = Match()
if matcher.visit(term, mo):
return mo
else:
return None
def make(self, pattern, *args, **kargs):
'''Creates a new term from a string pattern and a list of arguments.
First the string pattern is parsed, then the holes in
the pattern are filled with the supplied arguments.
'''
assert isinstance(pattern, basestring)
from aterm.build import Parser
p = Parser(lexer.Lexer(pattern))
try:
builder = p.term()
except antlr.ANTLRException, exc:
raise exception.ParseError(str(exc))
i = 0
_args = []
for i in range(len(args)):
_args.append(self.coerce(args[i], str(i)))
i += 1
_kargs = {}
for name, value in kargs.iteritems():
_kargs[name] = self.coerce(value, "'" + name + "'")
return builder.build(*_args, **_kargs)
factory = Factory()
class Parser(parser.Parser):
'''Parse a textual description of the term.'''
def handleInt(self, value):
return factory.makeInt(value)
def handleReal(self, value):
return factory.makeReal(value)
def handleStr(self, value):
return factory.makeStr(value)
def handleNil(self):
return factory.makeNil()
def handleCons(self, head, tail):
return factory.makeCons(head, tail)
def handleAppl(self, name, args, annos = None):
return factory.makeAppl(name, args, annos)
def handleWildcard(self):
raise exception.ParseError('wildcard in term')
def handleVar(self, name):
raise exception.ParseError('variable in term')
def handleApplCons(self, name, args, annos = None):
assert False
|
mewbak/idc
|
aterm/factory.py
|
Python
|
lgpl-2.1
| 5,214
|
[
"VisIt"
] |
61cd3c239cdbcc56771f182fd8e2f05b9ba8c82a6e2ccf175763eedd2f511a7d
|
# runs after the job (and after the default post-filter)
import sets, os
from galaxy import jobs
def exec_before_process(app, inp_data, out_data, param_dict, tool=None):
"""Sets the name of the data"""
dbkeys = sets.Set( [data.dbkey for data in inp_data.values() ] )
if len(dbkeys) != 1:
raise Exception, '<p><font color="yellow">Both Queries must be from the same genome build</font></p>'
def exec_after_process(app, inp_data, out_data, param_dict, tool=None, stdout=None, stderr=None):
"""Verify the output data after each run"""
items = out_data.items()
for name, data in items:
try:
err_msg, err_flag = '', False
if data.info and data.info[0] != 'M':
data.peek = 'no peek'
os.remove( data.file_name )
err_flag = True
if err_flag:
raise Exception(err_msg)
except Exception, exc:
data.blurb = jobs.JOB_ERROR
data.state = jobs.JOB_ERROR
|
jmchilton/galaxy-central
|
tools/operations/operation_filter.py
|
Python
|
mit
| 1,016
|
[
"Galaxy"
] |
a8305ee6f7b1341f8129a21f2d539ab730b34e86437c458a9dd664ff5640aa0e
|
#importing modules
from datetime import datetime, timedelta
import pandas as pd
import quandl
import DBUpdateConfig
from sqlalchemy import create_engine
from tqdm import tqdm
import math
import sys
def getNatureAndColor(row):
open = row.Open
close = row.Close
low = row.Low
high = row.High
body_length = 0
stick_length = 0
color = 'green'
if close >= open:
color = 'green'
body_length = close - open
if open > close:
color = 'red'
body_length = open - close
upper_stick_length = 0
lower_stick_length = 0
if color is 'green':
upper_stick_length = high - close
lower_stick_length = open - low
else:
upper_stick_length = high - open
lower_stick_length = close - low
stick_length = upper_stick_length + lower_stick_length
if stick_length >= body_length:
nature = 'boring'
else:
nature = 'exciting'
return nature, color
def getIntervalLabel(row, intervalType):
if intervalType == 'weekly':
return str(row.Date.isocalendar()[0]) + '-' + str(row.Date.isocalendar()[1])
if intervalType == 'monthly':
return row.Date.strftime('%y-%m')
if intervalType == 'quarterly':
return row.Date.strftime('%y') + str(int(math.ceil(row.Date.month / float(3))))
if intervalType == 'yearly':
return row.Date.strftime('%y')
def shapeData(data, ticker, intervalType):
data.reset_index(inplace=True)
data.drop(['Last', 'Total Trade Quantity', 'Turnover (Lacs)'], axis=1, inplace=True)
data.fillna(value=0, inplace=True)
if intervalType != 'daily':
indexOfIntervals = {}
orderedIntervals = []
for index, row in data.iterrows():
interval = getIntervalLabel(row, intervalType)
if interval not in indexOfIntervals:
indexOfIntervals[interval] = []
indexOfIntervals[interval].append(index)
orderedIntervals.append(interval)
else:
indexOfIntervals[interval].append(index)
listOfDicts = []
for intervalLabel in orderedIntervals:
intervalRow = {'Open': None, 'Close': None, 'High': None, 'Low': 999999999999999999999, 'Date': None}
for index, row in data.ix[indexOfIntervals[intervalLabel]].iterrows():
if index == indexOfIntervals[intervalLabel][0]:
intervalRow['Open'] = row.Open
intervalRow['Date'] = row.Date
if index == indexOfIntervals[intervalLabel][len(indexOfIntervals[intervalLabel]) - 1]:
intervalRow['Close'] = row.Close
if row.High > intervalRow['High']:
intervalRow['High'] = row.High
if row.Low < intervalRow['Low']:
intervalRow['Low'] = row.Low
listOfDicts.append(intervalRow)
data = pd.DataFrame(listOfDicts)
else:
dateYearAgo = datetime.now() - timedelta(days=365)
data = data[data.Date >= dateYearAgo]
for index, row in data.iterrows():
nature, color = getNatureAndColor(row)
data.set_value(index, 'color', color)
data.set_value(index, 'nature', nature)
pyDateTimeObj = row.Date.to_pydatetime()
epoch = (pyDateTimeObj - datetime(1970, 1, 1)).total_seconds()
data.set_value(index, 'Date1', epoch)
del data['Date']
data.rename(columns={'Date1': 'Date'}, inplace=True)
data['ticker'] = ticker
data = data.iloc[::-1]
data.reset_index(inplace=True)
del data['index']
return data
if __name__ == '__main__':
print 'Connect to postgres engine'
quandl.ApiConfig.api_key = 'GX3otZafamJ5s9zfz7nR'
engine = create_engine('postgresql://' + DBUpdateConfig.user + ':' + DBUpdateConfig.password + '@' + DBUpdateConfig.host + ':5432/' + DBUpdateConfig.database)
print 'Connected'
print '###################################'
connection = engine.connect()
#FOR INCREMENATAL POINTER CALCULATION
if len(sys.argv) > 1:
ticker = sys.argv[1]
# deleteSelective = connection.execute('delete from stockapi_tickers where ticker = \'' + ticker + '\'')
pbar = tqdm([ticker])
else:
resoverall = connection.execute('select * from stockapi_tickers a, stockapi_userinterests b where a.id = b.ticker_id and b.interested = true')
tickers = pd.DataFrame(resoverall.fetchall())
tickers.columns = resoverall.keys()
pbar = tqdm(tickers.Code.tolist())
stock_dataframes = []
#fetching pointers for all tickers
for ticker in pbar:
pbar.set_description('Processing ' + ticker)
try:
rawData = quandl.get('NSE/' + ticker)
except Exception as e:
continue
for interval in ['daily', 'weekly', 'monthly', 'quarterly', 'yearly']:
data = shapeData(rawData.copy(), ticker, interval)
multiplier = 2
phase2Pointers = {
'Freshness': 0,
'Trend': 0,
'Gap up': 0,
'Time Spend': 0,
'High': 0,
'Dividend': 0,
'Earning': 0
}
# $$$$$$$$$$$$$$$$$$$$$
# Finding first pointer (entry > low)
# $$$$$$$$$$$$$$$$$$$$$
startPoint = 0
lowAfterEntry = None
entryFound = False
limitReached = False
entryIndex = 0
# print 'fetching Data for ' + ticker
while not entryFound and not limitReached and len(data) != 0:
P1 = False
P2 = False
P3 = False
P1index = None
P2index = None
P3index = None
for index, row in data[startPoint:].iterrows():
# print row
if index == len(data) - 1:
limitReached = True
if not P1:
if row.color == 'green' and row.nature == 'exciting':
P1 = True
P1index = index
continue
if P1 and not P2:
if row.nature == 'boring' and index == (P1index + 1):
P2 = True
P2index = index
continue
elif row.color == 'green' and row.nature == 'exciting':
P1 = True
P1index = index
continue
else:
P1 = False
P1index = None
if P1 and P2 and not P3:
if row.nature == 'exciting':
P3 = True
P3index = index
continue
if P1 and P2 and P3:
break
# pointers found, now to find the data
if P1 and P2 and P3:
entry = 0
entryAtIndex = 0
for index, row in data[P2index:P3index].iterrows():
if row.nature == 'boring':
# entry_at_index = row.High
entryAtIndex = max(row.Open, row.Close)
if entryAtIndex > entry:
entry = float(entryAtIndex)
entryIndex = index
#finding lowest low
for index, row in data[:entryIndex].iterrows():
if lowAfterEntry is None or row.Low < float(lowAfterEntry):
lowAfterEntry = float(row.Low)
if entry > lowAfterEntry:
startPoint = entryIndex
else:
entryFound = True
stopLoss = None
stopLossAtIndex = 0
#stopLossIndex = 0
for index, row in data[P1index:P3index].iterrows():
# if (row.color == 'green' and row.nature == 'exciting') or (row.nature == 'boring'):
stopLossAtIndex = row.Low
if stopLoss is None or stopLossAtIndex < stopLoss:
stopLoss = float(stopLossAtIndex)
# print stopLoss
target = ((entry - stopLoss) * multiplier) + entry
entry = round(entry, 2)
target = round(target, 2)
stopLoss = round(stopLoss, 2)
phase2Pointers['Freshness'] = 1
if not entryFound:
entry = 0
stopLoss = 0
target = 0
if interval == 'daily' and entryFound:
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# Finding pointer 2
# if 7th week avg<= current week avg then 1
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
lastWeekFound = None
weekAverage = {}
for index, row in data.iterrows():
# gives week number of the year
week = datetime.fromtimestamp(row.Date).isocalendar()[1]
if week not in weekAverage:
weekAverage[week] = []
weekAverage[week].append(row.Close)
else:
weekAverage[week].append(row.Close)
lastWeekFound = week
# break after finding 8th week
if len(weekAverage.keys()) > 7:
break
# print weekAverage
if lastWeekFound is not None and len(weekAverage.keys()) > 7:
del weekAverage[lastWeekFound]
seventhWeek = weekAverage.keys()[0]
currentWeek = weekAverage.keys()[6]
seventhAverage = sum(weekAverage[seventhWeek]) / float(len(weekAverage[seventhWeek]))
currentAverage = sum(weekAverage[currentWeek]) / float(len(weekAverage[currentWeek]))
if seventhAverage <= currentAverage:
phase2Pointers['Trend'] = 1
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# Pointer 3 should be green
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
if data.iloc[P3index].color == 'green':
phase2Pointers['Gap up'] += 1
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# While finding pointer 1 and 2 if the low of the excting body(open) > immidiate boring candle body high (open if it is red else close)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
excitingBodyLow = data.iloc[P1index].Open
colorOfNextBoringCandle = data.iloc[P1index + 1].color
if colorOfNextBoringCandle == 'green':
if excitingBodyLow > data.iloc[P1index + 1].Close:
phase2Pointers['Gap up'] += 1
else:
if excitingBodyLow > data.iloc[P1index + 1].Open:
phase2Pointers['Gap up'] += 1
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# number of boring candles between green excing and exciting pointer 2 and 3
# <=3 then 2 points
# >3 and <=6 then 1 points
# otherwise 0
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
boringCandleCount = P3index - P1index - 1
if boringCandleCount <= 3:
phase2Pointers['Time Spend'] += 2
if 3 < boringCandleCount <= 6:
phase2Pointers['Time Spend'] += 1
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# Find the high before the entry
# Entry + (Entry-stop loss)*6 >=High -> 2 points
# Entry + (Entry-stop loss)*4 >=High -> 1 points
# otherwise 0
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
high = 0
for index, row in data[:entryIndex].iterrows():
if row.High > high:
high = row.High
if (entry + (entry - stopLoss) * 6) <= high:
phase2Pointers['High'] += 2
elif (entry + (entry - stopLoss) * 4) <= high:
phase2Pointers['High'] += 1
totalPoints = sum(phase2Pointers.values())
stock_dataframes.append({'ticker': ticker, 'entry': entry, 'stopLoss': stopLoss, 'target': target, 'gapUp': phase2Pointers['Gap up'], 'trend': phase2Pointers['Trend'], 'timeSpend': phase2Pointers['Time Spend'], 'high': phase2Pointers['High'], 'freshness': phase2Pointers['Freshness'], 'dividend': phase2Pointers['Dividend'], 'earning': phase2Pointers['Earning'], 'totalPoints': totalPoints, 'interval': interval})
print 'Updating Database'
if len(sys.argv) > 1:
engine.execute('delete from stockapi_pointers where ticker = \'' + ticker + '\'')
else:
engine.execute('delete from stockapi_pointers where true')
final_df = pd.DataFrame(stock_dataframes)
final_df.to_sql('stockapi_pointers', engine, if_exists='append', index=False)
print 'Update Complete. Success'
|
ashish-padakannaya/TradingPlatform
|
jobs/DBUpdateScript.py
|
Python
|
mit
| 14,157
|
[
"exciting"
] |
049e397b686fcb3a5fd306948de97eb654b17e084083d6b5a3aa706d754694d7
|
#!/usr/bin/env python
import sys,os,re
import argparse
import subprocess as sub
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input_file", help="fasta formatted file containing the contigs", required=True)
parser.add_argument("-f", "--forward_reads", help="forward reads used to generate the assembly, this will be used to calculate percent assembled", required=False)
parser.add_argument("-r", "--reverse_reads", help="reverse reads used to generate the assembly, this will be used to calculate percent assembled", required=False)
parser.add_argument("-p", "--percent_aligned", help="Calculate the number of reads that map back to the alignment. This uses bowtie2 and can take some time. It gives an approximation of how representative the contigs are of the raw reads. Of course, it requires that bowtie2 is installed and in your path.", required=False, action='store_true')
args = parser.parse_args()
if args.forward_reads:
args.percent_aligned = True
#Remove spaces between fasta sequences (to determine contig sequence lengths)
def convert_to_single_fasta():
command = '''awk '/^>/ {printf("\n%s\n",$0);next; } { printf("%s",$0);} END {printf("\n");}'''
OUTF = open(os.path.abspath("{0}.single".format(args.input_file)), "w")
with open(args.input_file) as f:
curr_seq = ""
for l in f:
if l[0] == ">":
if curr_seq != "":
OUTF.write(curr_seq+"\n")
curr_seq = ""
OUTF.write(l)
else:
curr_seq = curr_seq + l.rstrip()
OUTF.close()
args.input_file = os.path.abspath("{0}.single".format(args.input_file))
#Check if the input file is in multi or single-lined fasta format
"""
This is a VERY simple check and may give errors. All it does is read in the first 3 lines
and makes sure the 3rd line starts with a fasta header (e.g. ">"). I can see this easily
going wrong in the future.
"""
def check_fasta_format(fname):
lines = []
with open(fname) as f:
lines.extend(f.readline() for i in xrange(3))
if lines[2][0] != ">":
print ""
print "Detected file is in multi-line fasta format."
print "Converting file to single-line fasta format."
convert_to_single_fasta()
print "Finished conversion, calculating stats."
print ""
#Count the number of sequences in the file (based on the fasta headers)
def num_seqs(fname):
with open(fname) as f:
count=0
for l in f:
if l[0] == ">":
count = count+1
return count
def calc_N50(seq_len_array, total_size):
running_total = 0
n50 = 0
for i, elem in enumerate(seq_len_array):
running_total = running_total + elem
if running_total < (total_size / 2):
pass
elif running_total == (total_size / 2):
print elem
n50 = ((seq_len_array[i] + seq_len_array[i+1])/2)
else:
n50 = elem
break
return(n50)
#calculate N50, average length, number >1kb
def calc_stats(fname):
#set up list of contig sizes & sort size
seq_length_array = []
with open(fname) as f:
for l in f:
if l[0] == ">":
pass
else:
seq_len = len(l)-1
seq_length_array.append(seq_len)
seq_length_array.sort(key=int)
num_seqs = len(seq_length_array)
largest_contig = seq_length_array[len(seq_length_array)-1]
total_size = sum(seq_length_array)
average_contig_size = total_size / len(seq_length_array)
n50 = calc_N50(seq_length_array, total_size)
#contigs > 1kb
contig_over_1kb = [n for n in seq_length_array if n > 999]
num_contig_over_1kb = len(contig_over_1kb)
len_1kb = sum(contig_over_1kb)
avg_1kb = len_1kb / num_contig_over_1kb
n50_1kb = calc_N50(contig_over_1kb, len_1kb)
#contigs > 3kb
contig_over_3kb = [n for n in seq_length_array if n > 2999]
num_contig_over_3kb = len(contig_over_3kb)
len_3kb = sum(contig_over_3kb)
avg_3kb = len_3kb / num_contig_over_3kb
n50_3kb = calc_N50(contig_over_3kb, len_3kb)
return(
num_seqs, n50, total_size, average_contig_size,
num_contig_over_1kb, n50_1kb, len_1kb, avg_1kb,
num_contig_over_3kb, n50_3kb, len_3kb, avg_3kb,
largest_contig,
)
def run_bowtie2_scaffold(contig):
print "Building the scaffold file for Bowtie2."
outfile = os.path.abspath(contig)
outfile = "{0}.bowtie".format(outfile)
sub.call(["bowtie2-build", args.input_file, outfile], stdout=sub.PIPE, stderr=sub.PIPE)
def run_bowtie2(contig, forward, reverse):
print "Mapping raw reads using Bowtie2, this may take awhile..."
outfile = os.path.abspath(contig)
index_file = "{0}.bowtie".format(outfile)
outfile = "{0}.sam".format(outfile)
output = sub.check_output(["bowtie2", "-x", index_file, "-1", forward, "-2", reverse, "-S", outfile], stderr=sub.STDOUT)
percent_aligned = 0
for line in output.split(os.linesep):
if re.search("overall alignment", line):
line_fields = line.split()
percent_aligned = line_fields[0]
return(percent_aligned)
def clean_up():
#remove the bowtie & the sam files
filelist = [f for f in os.listdir(".") if f.endswith(".bt2") ]
for f in filelist:
os.remove(f)
os.remove("{0}.sam".format(args.input_file))
##Capturing all the outputs
check_fasta_format(args.input_file)
num_seqs = num_seqs(args.input_file)
seq_lens = calc_stats(args.input_file)
if args.percent_aligned:
run_bowtie2_scaffold(args.input_file)
percent_assem = run_bowtie2(args.input_file, args.forward_reads, args.reverse_reads)
clean_up()
row_format = "{:<30}" * 2
print
print "All Contigs"
print row_format.format(*["Number of Contigs", seq_lens[0]])
print row_format.format(*["N50", seq_lens[1]])
print row_format.format(*["Total Assembly Size", seq_lens[2]])
print row_format.format(*["Average Contig Size", seq_lens[3]])
print
print "Contigs > 1kb"
print row_format.format(*["Number of Contigs >1kb", seq_lens[4]])
print row_format.format(*["N50 of >1kb", seq_lens[5]])
print row_format.format(*["Total Assembly Size > 1kb", seq_lens[6]])
print row_format.format(*["Average Contig Size > 1kb", seq_lens[7]])
print
print "Contigs > 3kb"
print row_format.format(*["Number of Contigs >3kb", seq_lens[8]])
print row_format.format(*["N50 of >3kb", seq_lens[9]])
print row_format.format(*["Total Assembly Size > 3kb", seq_lens[10]])
print row_format.format(*["Average Contig Size > 3kb", seq_lens[11]])
print
print row_format.format(*["Largest Contig", seq_lens[12]])
if args.percent_aligned:
print row_format.format(*["Percent of Reads used", percent_assem])
|
waoverholt/waoverholt.github.io
|
assets/internal_files/calc_contig_stats.py
|
Python
|
mit
| 6,810
|
[
"Bowtie"
] |
3eb3c353ab6c974d8d166da148ac13ae36cc723bd15f38a601b99a6660aa0a74
|
"""Data flow analysis based on miasm intermediate representation"""
from builtins import range
from collections import namedtuple
from future.utils import viewitems, viewvalues
from miasm.core.utils import encode_hex
from miasm.core.graph import DiGraph
from miasm.ir.ir import AssignBlock, IRBlock
from miasm.expression.expression import ExprLoc, ExprMem, ExprId, ExprInt,\
ExprAssign, ExprOp, ExprWalk, is_function_call
from miasm.expression.simplifications import expr_simp
from miasm.core.interval import interval
from miasm.expression.expression_helper import possible_values
from miasm.analysis.ssa import get_phi_sources_parent_block, \
irblock_has_phi
class ReachingDefinitions(dict):
"""
Computes for each assignblock the set of reaching definitions.
Example:
IR block:
lbl0:
0 A = 1
B = 3
1 B = 2
2 A = A + B + 4
Reach definition of lbl0:
(lbl0, 0) => {}
(lbl0, 1) => {A: {(lbl0, 0)}, B: {(lbl0, 0)}}
(lbl0, 2) => {A: {(lbl0, 0)}, B: {(lbl0, 1)}}
(lbl0, 3) => {A: {(lbl0, 2)}, B: {(lbl0, 1)}}
Source set 'REACHES' in: Kennedy, K. (1979).
A survey of data flow analysis techniques.
IBM Thomas J. Watson Research Division, Algorithm MK
This class is usable as a dictionary whose structure is
{ (block, index): { lvalue: set((block, index)) } }
"""
ircfg = None
def __init__(self, ircfg):
super(ReachingDefinitions, self).__init__()
self.ircfg = ircfg
self.compute()
def get_definitions(self, block_lbl, assignblk_index):
"""Returns the dict { lvalue: set((def_block_lbl, def_index)) }
associated with self.ircfg.@block.assignblks[@assignblk_index]
or {} if it is not yet computed
"""
return self.get((block_lbl, assignblk_index), {})
def compute(self):
"""This is the main fixpoint"""
modified = True
while modified:
modified = False
for block in viewvalues(self.ircfg.blocks):
modified |= self.process_block(block)
def process_block(self, block):
"""
Fetch reach definitions from predecessors and propagate it to
the assignblk in block @block.
"""
predecessor_state = {}
for pred_lbl in self.ircfg.predecessors(block.loc_key):
if pred_lbl not in self.ircfg.blocks:
continue
pred = self.ircfg.blocks[pred_lbl]
for lval, definitions in viewitems(self.get_definitions(pred_lbl, len(pred))):
predecessor_state.setdefault(lval, set()).update(definitions)
modified = self.get((block.loc_key, 0)) != predecessor_state
if not modified:
return False
self[(block.loc_key, 0)] = predecessor_state
for index in range(len(block)):
modified |= self.process_assignblock(block, index)
return modified
def process_assignblock(self, block, assignblk_index):
"""
Updates the reach definitions with values defined at
assignblock @assignblk_index in block @block.
NB: the effect of assignblock @assignblk_index in stored at index
(@block, @assignblk_index + 1).
"""
assignblk = block[assignblk_index]
defs = self.get_definitions(block.loc_key, assignblk_index).copy()
for lval in assignblk:
defs.update({lval: set([(block.loc_key, assignblk_index)])})
modified = self.get((block.loc_key, assignblk_index + 1)) != defs
if modified:
self[(block.loc_key, assignblk_index + 1)] = defs
return modified
ATTR_DEP = {"color" : "black",
"_type" : "data"}
AssignblkNode = namedtuple('AssignblkNode', ['label', 'index', 'var'])
class DiGraphDefUse(DiGraph):
"""Representation of a Use-Definition graph as defined by
Kennedy, K. (1979). A survey of data flow analysis techniques.
IBM Thomas J. Watson Research Division.
Example:
IR block:
lbl0:
0 A = 1
B = 3
1 B = 2
2 A = A + B + 4
Def use analysis:
(lbl0, 0, A) => {(lbl0, 2, A)}
(lbl0, 0, B) => {}
(lbl0, 1, B) => {(lbl0, 2, A)}
(lbl0, 2, A) => {}
"""
def __init__(self, reaching_defs,
deref_mem=False, *args, **kwargs):
"""Instantiate a DiGraph
@blocks: IR blocks
"""
self._edge_attr = {}
# For dot display
self._filter_node = None
self._dot_offset = None
self._blocks = reaching_defs.ircfg.blocks
super(DiGraphDefUse, self).__init__(*args, **kwargs)
self._compute_def_use(reaching_defs,
deref_mem=deref_mem)
def edge_attr(self, src, dst):
"""
Return a dictionary of attributes for the edge between @src and @dst
@src: the source node of the edge
@dst: the destination node of the edge
"""
return self._edge_attr[(src, dst)]
def _compute_def_use(self, reaching_defs,
deref_mem=False):
for block in viewvalues(self._blocks):
self._compute_def_use_block(block,
reaching_defs,
deref_mem=deref_mem)
def _compute_def_use_block(self, block, reaching_defs, deref_mem=False):
for index, assignblk in enumerate(block):
assignblk_reaching_defs = reaching_defs.get_definitions(block.loc_key, index)
for lval, expr in viewitems(assignblk):
self.add_node(AssignblkNode(block.loc_key, index, lval))
read_vars = expr.get_r(mem_read=deref_mem)
if deref_mem and lval.is_mem():
read_vars.update(lval.ptr.get_r(mem_read=deref_mem))
for read_var in read_vars:
for reach in assignblk_reaching_defs.get(read_var, set()):
self.add_data_edge(AssignblkNode(reach[0], reach[1], read_var),
AssignblkNode(block.loc_key, index, lval))
def del_edge(self, src, dst):
super(DiGraphDefUse, self).del_edge(src, dst)
del self._edge_attr[(src, dst)]
def add_uniq_labeled_edge(self, src, dst, edge_label):
"""Adds the edge (@src, @dst) with label @edge_label.
if edge (@src, @dst) already exists, the previous label is overridden
"""
self.add_uniq_edge(src, dst)
self._edge_attr[(src, dst)] = edge_label
def add_data_edge(self, src, dst):
"""Adds an edge representing a data dependency
and sets the label accordingly"""
self.add_uniq_labeled_edge(src, dst, ATTR_DEP)
def node2lines(self, node):
lbl, index, reg = node
yield self.DotCellDescription(text="%s (%s)" % (lbl, index),
attr={'align': 'center',
'colspan': 2,
'bgcolor': 'grey'})
src = self._blocks[lbl][index][reg]
line = "%s = %s" % (reg, src)
yield self.DotCellDescription(text=line, attr={})
yield self.DotCellDescription(text="", attr={})
class DeadRemoval(object):
"""
Do dead removal
"""
def __init__(self, ir_arch, expr_to_original_expr=None):
self.ir_arch = ir_arch
if expr_to_original_expr is None:
expr_to_original_expr = {}
self.expr_to_original_expr = expr_to_original_expr
def add_expr_to_original_expr(self, expr_to_original_expr):
self.expr_to_original_expr.update(expr_to_original_expr)
def is_unkillable_destination(self, lval, rval):
if (
lval.is_mem() or
self.ir_arch.IRDst == lval or
lval.is_id("exception_flags") or
is_function_call(rval)
):
return True
return False
def get_block_useful_destinations(self, block):
"""
Force keeping of specific cases
block: IRBlock instance
"""
useful = set()
for index, assignblk in enumerate(block):
for lval, rval in viewitems(assignblk):
if self.is_unkillable_destination(lval, rval):
useful.add(AssignblkNode(block.loc_key, index, lval))
return useful
def is_tracked_var(self, lval, variable):
new_lval = self.expr_to_original_expr.get(lval, lval)
return new_lval == variable
def find_definitions_from_worklist(self, worklist, ircfg):
"""
Find variables definition in @worklist by browsing the @ircfg
"""
locs_done = set()
defs = set()
while worklist:
found = False
elt = worklist.pop()
if elt in locs_done:
continue
locs_done.add(elt)
variable, loc_key = elt
block = ircfg.get_block(loc_key)
if block is None:
# Consider no sources in incomplete graph
continue
for index, assignblk in reversed(list(enumerate(block))):
for dst, src in viewitems(assignblk):
if self.is_tracked_var(dst, variable):
defs.add(AssignblkNode(loc_key, index, dst))
found = True
break
if found:
break
if not found:
for predecessor in ircfg.predecessors(loc_key):
worklist.add((variable, predecessor))
return defs
def find_out_regs_definitions_from_block(self, block, ircfg):
"""
Find definitions of out regs starting from @block
"""
worklist = set()
for reg in self.ir_arch.get_out_regs(block):
worklist.add((reg, block.loc_key))
ret = self.find_definitions_from_worklist(worklist, ircfg)
return ret
def add_def_for_incomplete_leaf(self, block, ircfg, reaching_defs):
"""
Add valid definitions at end of @block plus out regs
"""
valid_definitions = reaching_defs.get_definitions(
block.loc_key,
len(block)
)
worklist = set()
for lval, definitions in viewitems(valid_definitions):
for definition in definitions:
new_lval = self.expr_to_original_expr.get(lval, lval)
worklist.add((new_lval, block.loc_key))
ret = self.find_definitions_from_worklist(worklist, ircfg)
useful = ret
useful.update(self.find_out_regs_definitions_from_block(block, ircfg))
return useful
def get_useful_assignments(self, ircfg, defuse, reaching_defs):
"""
Mark useful statements using previous reach analysis and defuse
Return a set of triplets (block, assignblk number, lvalue) of
useful definitions
PRE: compute_reach(self)
"""
useful = set()
for block_lbl, block in viewitems(ircfg.blocks):
block = ircfg.get_block(block_lbl)
if block is None:
# skip unknown blocks: won't generate dependencies
continue
block_useful = self.get_block_useful_destinations(block)
useful.update(block_useful)
successors = ircfg.successors(block_lbl)
for successor in successors:
if successor not in ircfg.blocks:
keep_all_definitions = True
break
else:
keep_all_definitions = False
if keep_all_definitions:
useful.update(self.add_def_for_incomplete_leaf(block, ircfg, reaching_defs))
continue
if len(successors) == 0:
useful.update(self.find_out_regs_definitions_from_block(block, ircfg))
else:
continue
# Useful nodes dependencies
for node in useful:
for parent in defuse.reachable_parents(node):
yield parent
def do_dead_removal(self, ircfg):
"""
Remove useless assignments.
This function is used to analyse relation of a * complete function *
This means the blocks under study represent a solid full function graph.
Source : Kennedy, K. (1979). A survey of data flow analysis techniques.
IBM Thomas J. Watson Research Division, page 43
@ircfg: IntermediateRepresentation instance
"""
modified = False
reaching_defs = ReachingDefinitions(ircfg)
defuse = DiGraphDefUse(reaching_defs, deref_mem=True)
useful = self.get_useful_assignments(ircfg, defuse, reaching_defs)
useful = set(useful)
for block in list(viewvalues(ircfg.blocks)):
irs = []
for idx, assignblk in enumerate(block):
new_assignblk = dict(assignblk)
for lval in assignblk:
if AssignblkNode(block.loc_key, idx, lval) not in useful:
del new_assignblk[lval]
modified = True
irs.append(AssignBlock(new_assignblk, assignblk.instr))
ircfg.blocks[block.loc_key] = IRBlock(block.loc_key, irs)
return modified
def __call__(self, ircfg):
ret = self.do_dead_removal(ircfg)
return ret
def _test_merge_next_block(ircfg, loc_key):
"""
Test if the irblock at @loc_key can be merge with its son
@ircfg: IRCFG instance
@loc_key: LocKey instance of the candidate parent irblock
"""
if loc_key not in ircfg.blocks:
return None
sons = ircfg.successors(loc_key)
if len(sons) != 1:
return None
son = list(sons)[0]
if ircfg.predecessors(son) != [loc_key]:
return None
if son not in ircfg.blocks:
return None
return son
def _do_merge_blocks(ircfg, loc_key, son_loc_key):
"""
Merge two irblocks at @loc_key and @son_loc_key
@ircfg: DiGrpahIR
@loc_key: LocKey instance of the parent IRBlock
@loc_key: LocKey instance of the son IRBlock
"""
assignblks = []
for assignblk in ircfg.blocks[loc_key]:
if ircfg.IRDst not in assignblk:
assignblks.append(assignblk)
continue
affs = {}
for dst, src in viewitems(assignblk):
if dst != ircfg.IRDst:
affs[dst] = src
if affs:
assignblks.append(AssignBlock(affs, assignblk.instr))
assignblks += ircfg.blocks[son_loc_key].assignblks
new_block = IRBlock(loc_key, assignblks)
ircfg.discard_edge(loc_key, son_loc_key)
for son_successor in ircfg.successors(son_loc_key):
ircfg.add_uniq_edge(loc_key, son_successor)
ircfg.discard_edge(son_loc_key, son_successor)
del ircfg.blocks[son_loc_key]
ircfg.del_node(son_loc_key)
ircfg.blocks[loc_key] = new_block
def _test_jmp_only(ircfg, loc_key, heads):
"""
If irblock at @loc_key sets only IRDst to an ExprLoc, return the
corresponding loc_key target.
Avoid creating predecssors for heads LocKeys
None in other cases.
@ircfg: IRCFG instance
@loc_key: LocKey instance of the candidate irblock
@heads: LocKey heads of the graph
"""
if loc_key not in ircfg.blocks:
return None
irblock = ircfg.blocks[loc_key]
if len(irblock.assignblks) != 1:
return None
items = list(viewitems(dict(irblock.assignblks[0])))
if len(items) != 1:
return None
if len(ircfg.successors(loc_key)) != 1:
return None
# Don't create predecessors on heads
dst, src = items[0]
assert dst.is_id("IRDst")
if not src.is_loc():
return None
dst = src.loc_key
if loc_key in heads:
predecessors = set(ircfg.predecessors(dst))
predecessors.difference_update(set([loc_key]))
if predecessors:
return None
return dst
def _relink_block_node(ircfg, loc_key, son_loc_key, replace_dct):
"""
Link loc_key's parents to parents directly to son_loc_key
"""
for parent in set(ircfg.predecessors(loc_key)):
parent_block = ircfg.blocks.get(parent, None)
if parent_block is None:
continue
new_block = parent_block.modify_exprs(
lambda expr:expr.replace_expr(replace_dct),
lambda expr:expr.replace_expr(replace_dct)
)
# Link parent to new dst
ircfg.add_uniq_edge(parent, son_loc_key)
# Unlink block
ircfg.blocks[new_block.loc_key] = new_block
ircfg.del_node(loc_key)
def _remove_to_son(ircfg, loc_key, son_loc_key):
"""
Merge irblocks; The final block has the @son_loc_key loc_key
Update references
Condition:
- irblock at @loc_key is a pure jump block
- @loc_key is not an entry point (can be removed)
@irblock: IRCFG instance
@loc_key: LocKey instance of the parent irblock
@son_loc_key: LocKey instance of the son irblock
"""
# Ircfg loop => don't mess
if loc_key == son_loc_key:
return False
# Unlink block destinations
ircfg.del_edge(loc_key, son_loc_key)
replace_dct = {
ExprLoc(loc_key, ircfg.IRDst.size):ExprLoc(son_loc_key, ircfg.IRDst.size)
}
_relink_block_node(ircfg, loc_key, son_loc_key, replace_dct)
ircfg.del_node(loc_key)
del ircfg.blocks[loc_key]
return True
def _remove_to_parent(ircfg, loc_key, son_loc_key):
"""
Merge irblocks; The final block has the @loc_key loc_key
Update references
Condition:
- irblock at @loc_key is a pure jump block
- @son_loc_key is not an entry point (can be removed)
@irblock: IRCFG instance
@loc_key: LocKey instance of the parent irblock
@son_loc_key: LocKey instance of the son irblock
"""
# Ircfg loop => don't mess
if loc_key == son_loc_key:
return False
# Unlink block destinations
ircfg.del_edge(loc_key, son_loc_key)
old_irblock = ircfg.blocks[son_loc_key]
new_irblock = IRBlock(loc_key, old_irblock.assignblks)
ircfg.blocks[son_loc_key] = new_irblock
ircfg.add_irblock(new_irblock)
replace_dct = {
ExprLoc(son_loc_key, ircfg.IRDst.size):ExprLoc(loc_key, ircfg.IRDst.size)
}
_relink_block_node(ircfg, son_loc_key, loc_key, replace_dct)
ircfg.del_node(son_loc_key)
del ircfg.blocks[son_loc_key]
return True
def merge_blocks(ircfg, heads):
"""
This function modifies @ircfg to apply the following transformations:
- group an irblock with its son if the irblock has one and only one son and
this son has one and only one parent (spaghetti code).
- if an irblock is only made of an assignment to IRDst with a given label,
this irblock is dropped and its parent destination targets are
updated. The irblock must have a parent (avoid deleting the function head)
- if an irblock is a head of the graph and is only made of an assignment to
IRDst with a given label, this irblock is dropped and its son becomes the
head. References are fixed
This function avoid creating predecessors on heads
Return True if at least an irblock has been modified
@ircfg: IRCFG instance
@heads: loc_key to keep
"""
modified = False
todo = set(ircfg.nodes())
while todo:
loc_key = todo.pop()
# Test merge block
son = _test_merge_next_block(ircfg, loc_key)
if son is not None and son not in heads:
_do_merge_blocks(ircfg, loc_key, son)
todo.add(loc_key)
modified = True
continue
# Test jmp only block
son = _test_jmp_only(ircfg, loc_key, heads)
if son is not None and loc_key not in heads:
ret = _remove_to_son(ircfg, loc_key, son)
modified |= ret
if ret:
todo.add(loc_key)
continue
# Test head jmp only block
if (son is not None and
son not in heads and
son in ircfg.blocks):
# jmp only test done previously
ret = _remove_to_parent(ircfg, loc_key, son)
modified |= ret
if ret:
todo.add(loc_key)
continue
return modified
def remove_empty_assignblks(ircfg):
"""
Remove empty assignblks in irblocks of @ircfg
Return True if at least an irblock has been modified
@ircfg: IRCFG instance
"""
modified = False
for loc_key, block in list(viewitems(ircfg.blocks)):
irs = []
block_modified = False
for assignblk in block:
if len(assignblk):
irs.append(assignblk)
else:
block_modified = True
if block_modified:
new_irblock = IRBlock(loc_key, irs)
ircfg.blocks[loc_key] = new_irblock
modified = True
return modified
class SSADefUse(DiGraph):
"""
Generate DefUse information from SSA transformation
Links are not valid for ExprMem.
"""
def add_var_def(self, node, src):
index2dst = self._links.setdefault(node.label, {})
dst2src = index2dst.setdefault(node.index, {})
dst2src[node.var] = src
def add_def_node(self, def_nodes, node, src):
if node.var.is_id():
def_nodes[node.var] = node
def add_use_node(self, use_nodes, node, src):
sources = set()
if node.var.is_mem():
sources.update(node.var.ptr.get_r(mem_read=True))
sources.update(src.get_r(mem_read=True))
for source in sources:
if not source.is_mem():
use_nodes.setdefault(source, set()).add(node)
def get_node_target(self, node):
return self._links[node.label][node.index][node.var]
def set_node_target(self, node, src):
self._links[node.label][node.index][node.var] = src
@classmethod
def from_ssa(cls, ssa):
"""
Return a DefUse DiGraph from a SSA graph
@ssa: SSADiGraph instance
"""
graph = cls()
# First pass
# Link line to its use and def
def_nodes = {}
use_nodes = {}
graph._links = {}
for lbl in ssa.graph.nodes():
block = ssa.graph.blocks.get(lbl, None)
if block is None:
continue
for index, assignblk in enumerate(block):
for dst, src in viewitems(assignblk):
node = AssignblkNode(lbl, index, dst)
graph.add_var_def(node, src)
graph.add_def_node(def_nodes, node, src)
graph.add_use_node(use_nodes, node, src)
for dst, node in viewitems(def_nodes):
graph.add_node(node)
if dst not in use_nodes:
continue
for use in use_nodes[dst]:
graph.add_uniq_edge(node, use)
return graph
def expr_has_mem(expr):
"""
Return True if expr contains at least one memory access
@expr: Expr instance
"""
def has_mem(self):
return self.is_mem()
visitor = ExprWalk(has_mem)
return visitor.visit(expr)
class PropagateThroughExprId(object):
"""
Propagate expressions though ExprId
"""
def has_propagation_barrier(self, assignblks):
"""
Return True if propagation cannot cross the @assignblks
@assignblks: list of AssignBlock to check
"""
for assignblk in assignblks:
for dst, src in viewitems(assignblk):
if is_function_call(src):
return True
if dst.is_mem():
return True
return False
def is_mem_written(self, ssa, node_a, node_b):
"""
Return True if memory is written at least once between @node_a and
@node_b
@node: AssignblkNode representing the start position
@successor: AssignblkNode representing the end position
"""
block_b = ssa.graph.blocks[node_b.label]
nodes_to_do = self.compute_reachable_nodes_from_a_to_b(ssa.graph, node_a.label, node_b.label)
if node_a.label == node_b.label:
# src is dst
assert nodes_to_do == set([node_a.label])
if self.has_propagation_barrier(block_b.assignblks[node_a.index:node_b.index]):
return True
else:
# Check everyone but node_a.label and node_b.label
for loc in nodes_to_do - set([node_a.label, node_b.label]):
if loc not in ssa.graph.blocks:
continue
block = ssa.graph.blocks[loc]
if self.has_propagation_barrier(block.assignblks):
return True
# Check node_a.label partially
block_a = ssa.graph.blocks[node_a.label]
if self.has_propagation_barrier(block_a.assignblks[node_a.index:]):
return True
if nodes_to_do.intersection(ssa.graph.successors(node_b.label)):
# There is a path from node_b.label to node_b.label => Check node_b.label fully
if self.has_propagation_barrier(block_b.assignblks):
return True
else:
# Check node_b.label partially
if self.has_propagation_barrier(block_b.assignblks[:node_b.index]):
return True
return False
def compute_reachable_nodes_from_a_to_b(self, ssa, loc_a, loc_b):
reachables_a = set(ssa.reachable_sons(loc_a))
reachables_b = set(ssa.reachable_parents_stop_node(loc_b, loc_a))
return reachables_a.intersection(reachables_b)
def propagation_allowed(self, ssa, to_replace, node_a, node_b):
"""
Return True if we can replace @node_a source present in @to_replace into
@node_b
@node_a: AssignblkNode position
@node_b: AssignblkNode position
"""
if not expr_has_mem(to_replace[node_a.var]):
return True
if self.is_mem_written(ssa, node_a, node_b):
return False
return True
def get_var_definitions(self, ssa):
"""
Return a dictionary linking variable to its assignment location
@ssa: SSADiGraph instance
"""
ircfg = ssa.graph
def_dct = {}
for node in ircfg.nodes():
block = ircfg.blocks.get(node, None)
if block is None:
continue
for index, assignblk in enumerate(block):
for dst, src in viewitems(assignblk):
if not dst.is_id():
continue
if dst in ssa.immutable_ids:
continue
assert dst not in def_dct
def_dct[dst] = node, index
return def_dct
def get_candidates(self, ssa, head, max_expr_depth):
def_dct = self.get_var_definitions(ssa)
defuse = SSADefUse.from_ssa(ssa)
to_replace = {}
node_to_reg = {}
for node in defuse.nodes():
if node.var in ssa.immutable_ids:
continue
src = defuse.get_node_target(node)
if max_expr_depth is not None and len(str(src)) > max_expr_depth:
continue
if is_function_call(src):
continue
if node.var.is_mem():
continue
if src.is_op('Phi'):
continue
to_replace[node.var] = src
node_to_reg[node] = node.var
return node_to_reg, to_replace, defuse
def propagate(self, ssa, head, max_expr_depth=None):
"""
Do expression propagation
@ssa: SSADiGraph instance
@head: the head location of the graph
@max_expr_depth: the maximum allowed depth of an expression
"""
node_to_reg, to_replace, defuse = self.get_candidates(ssa, head, max_expr_depth)
modified = False
for node, reg in viewitems(node_to_reg):
for successor in defuse.successors(node):
if not self.propagation_allowed(ssa, to_replace, node, successor):
continue
node_a = node
node_b = successor
block = ssa.graph.blocks[node_b.label]
replace = {node_a.var: to_replace[node_a.var]}
# Replace
assignblks = list(block)
assignblk = block[node_b.index]
out = {}
for dst, src in viewitems(assignblk):
if src.is_op('Phi'):
out[dst] = src
continue
if src.is_mem():
ptr = src.ptr.replace_expr(replace)
new_src = ExprMem(ptr, src.size)
else:
new_src = src.replace_expr(replace)
if dst.is_id():
new_dst = dst
elif dst.is_mem():
ptr = dst.ptr.replace_expr(replace)
new_dst = ExprMem(ptr, dst.size)
else:
new_dst = dst.replace_expr(replace)
if not (new_dst.is_id() or new_dst.is_mem()):
new_dst = dst
if src != new_src or dst != new_dst:
modified = True
out[new_dst] = new_src
out = AssignBlock(out, assignblk.instr)
assignblks[node_b.index] = out
new_block = IRBlock(block.loc_key, assignblks)
ssa.graph.blocks[block.loc_key] = new_block
return modified
class PropagateExprIntThroughExprId(PropagateThroughExprId):
"""
Propagate ExprInt though ExprId: classic constant propagation
This is a sub family of PropagateThroughExprId.
It reduces leaves in expressions of a program.
"""
def get_candidates(self, ssa, head, max_expr_depth):
defuse = SSADefUse.from_ssa(ssa)
to_replace = {}
node_to_reg = {}
for node in defuse.nodes():
src = defuse.get_node_target(node)
if not src.is_int():
continue
if is_function_call(src):
continue
if node.var.is_mem():
continue
to_replace[node.var] = src
node_to_reg[node] = node.var
return node_to_reg, to_replace, defuse
def propagation_allowed(self, ssa, to_replace, node_a, node_b):
"""
Propagating ExprInt is always ok
"""
return True
class PropagateThroughExprMem(object):
"""
Propagate through ExprMem in very simple cases:
- if no memory write between source and target
- if source does not contain any memory reference
"""
def propagate(self, ssa, head, max_expr_depth=None):
ircfg = ssa.graph
todo = set()
modified = False
for block in viewvalues(ircfg.blocks):
for i, assignblk in enumerate(block):
for dst, src in viewitems(assignblk):
if not dst.is_mem():
continue
if expr_has_mem(src):
continue
todo.add((block.loc_key, i + 1, dst, src))
ptr = dst.ptr
for size in range(8, dst.size, 8):
todo.add((block.loc_key, i + 1, ExprMem(ptr, size), src[:size]))
while todo:
loc_key, index, mem_dst, mem_src = todo.pop()
block = ircfg.blocks.get(loc_key, None)
if block is None:
continue
assignblks = list(block)
block_modified = False
for i in range(index, len(block)):
assignblk = block[i]
write_mem = False
assignblk_modified = False
out = dict(assignblk)
out_new = {}
for dst, src in viewitems(out):
if dst.is_mem():
write_mem = True
ptr = dst.ptr.replace_expr({mem_dst:mem_src})
dst = ExprMem(ptr, dst.size)
src = src.replace_expr({mem_dst:mem_src})
out_new[dst] = src
if out != out_new:
assignblk_modified = True
if assignblk_modified:
assignblks[i] = AssignBlock(out_new, assignblk.instr)
block_modified = True
if write_mem:
break
else:
# If no memory written, we may propagate to sons
# if son has only parent
for successor in ircfg.successors(loc_key):
predecessors = ircfg.predecessors(successor)
if len(predecessors) != 1:
continue
todo.add((successor, 0, mem_dst, mem_src))
if block_modified:
modified = True
new_block = IRBlock(block.loc_key, assignblks)
ircfg.blocks[block.loc_key] = new_block
return modified
def stack_to_reg(expr):
if expr.is_mem():
ptr = expr.arg
SP = ir_arch_a.sp
if ptr == SP:
return ExprId("STACK.0", expr.size)
elif (ptr.is_op('+') and
len(ptr.args) == 2 and
ptr.args[0] == SP and
ptr.args[1].is_int()):
diff = int(ptr.args[1])
assert diff % 4 == 0
diff = (0 - diff) & 0xFFFFFFFF
return ExprId("STACK.%d" % (diff // 4), expr.size)
return False
def is_stack_access(ir_arch_a, expr):
if not expr.is_mem():
return False
ptr = expr.ptr
diff = expr_simp(ptr - ir_arch_a.sp)
if not diff.is_int():
return False
return expr
def visitor_get_stack_accesses(ir_arch_a, expr, stack_vars):
if is_stack_access(ir_arch_a, expr):
stack_vars.add(expr)
return expr
def get_stack_accesses(ir_arch_a, expr):
result = set()
def get_stack(expr_to_test):
visitor_get_stack_accesses(ir_arch_a, expr_to_test, result)
return None
visitor = ExprWalk(get_stack)
visitor.visit(expr)
return result
def get_interval_length(interval_in):
length = 0
for start, stop in interval_in.intervals:
length += stop + 1 - start
return length
def check_expr_below_stack(ir_arch_a, expr):
"""
Return False if expr pointer is below original stack pointer
@ir_arch_a: ira instance
@expr: Expression instance
"""
ptr = expr.ptr
diff = expr_simp(ptr - ir_arch_a.sp)
if not diff.is_int():
return True
if int(diff) == 0 or int(expr_simp(diff.msb())) == 0:
return False
return True
def retrieve_stack_accesses(ir_arch_a, ircfg):
"""
Walk the ssa graph and find stack based variables.
Return a dictionary linking stack base address to its size/name
@ir_arch_a: ira instance
@ircfg: IRCFG instance
"""
stack_vars = set()
for block in viewvalues(ircfg.blocks):
for assignblk in block:
for dst, src in viewitems(assignblk):
stack_vars.update(get_stack_accesses(ir_arch_a, dst))
stack_vars.update(get_stack_accesses(ir_arch_a, src))
stack_vars = [expr for expr in stack_vars if check_expr_below_stack(ir_arch_a, expr)]
base_to_var = {}
for var in stack_vars:
base_to_var.setdefault(var.ptr, set()).add(var)
base_to_interval = {}
for addr, vars in viewitems(base_to_var):
var_interval = interval()
for var in vars:
offset = expr_simp(addr - ir_arch_a.sp)
if not offset.is_int():
# skip non linear stack offset
continue
start = int(offset)
stop = int(expr_simp(offset + ExprInt(var.size // 8, offset.size)))
mem = interval([(start, stop-1)])
var_interval += mem
base_to_interval[addr] = var_interval
if not base_to_interval:
return {}
# Check if not intervals overlap
_, tmp = base_to_interval.popitem()
while base_to_interval:
addr, mem = base_to_interval.popitem()
assert (tmp & mem).empty
tmp += mem
base_to_info = {}
for addr, vars in viewitems(base_to_var):
name = "var_%d" % (len(base_to_info))
size = max([var.size for var in vars])
base_to_info[addr] = size, name
return base_to_info
def fix_stack_vars(expr, base_to_info):
"""
Replace local stack accesses in expr using information in @base_to_info
@expr: Expression instance
@base_to_info: dictionary linking stack base address to its size/name
"""
if not expr.is_mem():
return expr
ptr = expr.ptr
if ptr not in base_to_info:
return expr
size, name = base_to_info[ptr]
var = ExprId(name, size)
if size == expr.size:
return var
assert expr.size < size
return var[:expr.size]
def replace_mem_stack_vars(expr, base_to_info):
return expr.visit(lambda expr:fix_stack_vars(expr, base_to_info))
def replace_stack_vars(ir_arch_a, ircfg):
"""
Try to replace stack based memory accesses by variables.
Hypothesis: the input ircfg must have all it's accesses to stack explicitly
done through the stack register, ie every aliases on those variables is
resolved.
WARNING: may fail
@ir_arch_a: ira instance
@ircfg: IRCFG instance
"""
base_to_info = retrieve_stack_accesses(ir_arch_a, ircfg)
modified = False
for block in list(viewvalues(ircfg.blocks)):
assignblks = []
for assignblk in block:
out = {}
for dst, src in viewitems(assignblk):
new_dst = dst.visit(lambda expr:replace_mem_stack_vars(expr, base_to_info))
new_src = src.visit(lambda expr:replace_mem_stack_vars(expr, base_to_info))
if new_dst != dst or new_src != src:
modified |= True
out[new_dst] = new_src
out = AssignBlock(out, assignblk.instr)
assignblks.append(out)
new_block = IRBlock(block.loc_key, assignblks)
ircfg.blocks[block.loc_key] = new_block
return modified
def memlookup_test(expr, bs, is_addr_ro_variable, result):
if expr.is_mem() and expr.ptr.is_int():
ptr = int(expr.ptr)
if is_addr_ro_variable(bs, ptr, expr.size):
result.add(expr)
return False
return True
def memlookup_visit(expr, bs, is_addr_ro_variable):
result = set()
def retrieve_memlookup(expr_to_test):
memlookup_test(expr_to_test, bs, is_addr_ro_variable, result)
return None
visitor = ExprWalk(retrieve_memlookup)
visitor.visit(expr)
return result
def get_memlookup(expr, bs, is_addr_ro_variable):
return memlookup_visit(expr, bs, is_addr_ro_variable)
def read_mem(bs, expr):
ptr = int(expr.ptr)
var_bytes = bs.getbytes(ptr, expr.size // 8)[::-1]
try:
value = int(encode_hex(var_bytes), 16)
except ValueError:
return expr
return ExprInt(value, expr.size)
def load_from_int(ir_arch, bs, is_addr_ro_variable):
"""
Replace memory read based on constant with static value
@ir_arch: ira instance
@bs: binstream instance
@is_addr_ro_variable: callback(addr, size) to test memory candidate
"""
modified = False
for block in list(viewvalues(ir_arch.blocks)):
assignblks = list()
for assignblk in block:
out = {}
for dst, src in viewitems(assignblk):
# Test src
mems = get_memlookup(src, bs, is_addr_ro_variable)
src_new = src
if mems:
replace = {}
for mem in mems:
value = read_mem(bs, mem)
replace[mem] = value
src_new = src.replace_expr(replace)
if src_new != src:
modified = True
# Test dst pointer if dst is mem
if dst.is_mem():
ptr = dst.ptr
mems = get_memlookup(ptr, bs, is_addr_ro_variable)
if mems:
replace = {}
for mem in mems:
value = read_mem(bs, mem)
replace[mem] = value
ptr_new = ptr.replace_expr(replace)
if ptr_new != ptr:
modified = True
dst = ExprMem(ptr_new, dst.size)
out[dst] = src_new
out = AssignBlock(out, assignblk.instr)
assignblks.append(out)
block = IRBlock(block.loc_key, assignblks)
ir_arch.blocks[block.loc_key] = block
return modified
class AssignBlockLivenessInfos(object):
"""
Description of live in / live out of an AssignBlock
"""
__slots__ = ["gen", "kill", "var_in", "var_out", "live", "assignblk"]
def __init__(self, assignblk, gen, kill):
self.gen = gen
self.kill = kill
self.var_in = set()
self.var_out = set()
self.live = set()
self.assignblk = assignblk
def __str__(self):
out = []
out.append("\tVarIn:" + ", ".join(str(x) for x in self.var_in))
out.append("\tGen:" + ", ".join(str(x) for x in self.gen))
out.append("\tKill:" + ", ".join(str(x) for x in self.kill))
out.append(
'\n'.join(
"\t%s = %s" % (dst, src)
for (dst, src) in viewitems(self.assignblk)
)
)
out.append("\tVarOut:" + ", ".join(str(x) for x in self.var_out))
return '\n'.join(out)
class IRBlockLivenessInfos(object):
"""
Description of live in / live out of an AssignBlock
"""
__slots__ = ["loc_key", "infos", "assignblks"]
def __init__(self, irblock):
self.loc_key = irblock.loc_key
self.infos = []
self.assignblks = []
for assignblk in irblock:
gens, kills = set(), set()
for dst, src in viewitems(assignblk):
expr = ExprAssign(dst, src)
read = expr.get_r(mem_read=True)
write = expr.get_w()
gens.update(read)
kills.update(write)
self.infos.append(AssignBlockLivenessInfos(assignblk, gens, kills))
self.assignblks.append(assignblk)
def __getitem__(self, index):
"""Getitem on assignblks"""
return self.assignblks.__getitem__(index)
def __str__(self):
out = []
out.append("%s:" % self.loc_key)
for info in self.infos:
out.append(str(info))
out.append('')
return "\n".join(out)
class DiGraphLiveness(DiGraph):
"""
DiGraph representing variable liveness
"""
def __init__(self, ircfg, loc_db=None):
super(DiGraphLiveness, self).__init__()
self.ircfg = ircfg
self.loc_db = loc_db
self._blocks = {}
# Add irblocks gen/kill
for node in ircfg.nodes():
irblock = ircfg.blocks.get(node, None)
if irblock is None:
continue
irblockinfos = IRBlockLivenessInfos(irblock)
self.add_node(irblockinfos.loc_key)
self.blocks[irblockinfos.loc_key] = irblockinfos
for succ in ircfg.successors(node):
self.add_uniq_edge(node, succ)
for pred in ircfg.predecessors(node):
self.add_uniq_edge(pred, node)
@property
def blocks(self):
return self._blocks
def init_var_info(self):
"""Add ircfg out regs"""
raise NotImplementedError("Abstract method")
def node2lines(self, node):
"""
Output liveness information in dot format
"""
if self.loc_db is None:
node_name = str(node)
else:
names = self.loc_db.get_location_names(node)
if not names:
node_name = self.loc_db.pretty_str(node)
else:
node_name = "".join("%s:\n" % name for name in names)
yield self.DotCellDescription(
text="%s" % node_name,
attr={
'align': 'center',
'colspan': 2,
'bgcolor': 'grey',
}
)
if node not in self._blocks:
yield [self.DotCellDescription(text="NOT PRESENT", attr={})]
return
for i, info in enumerate(self._blocks[node].infos):
var_in = "VarIn:" + ", ".join(str(x) for x in info.var_in)
var_out = "VarOut:" + ", ".join(str(x) for x in info.var_out)
assignmnts = ["%s = %s" % (dst, src) for (dst, src) in viewitems(info.assignblk)]
if i == 0:
yield self.DotCellDescription(
text=var_in,
attr={
'bgcolor': 'green',
}
)
for assign in assignmnts:
yield self.DotCellDescription(text=assign, attr={})
yield self.DotCellDescription(
text=var_out,
attr={
'bgcolor': 'green',
}
)
yield self.DotCellDescription(text="", attr={})
def back_propagate_compute(self, block):
"""
Compute the liveness information in the @block.
@block: AssignBlockLivenessInfos instance
"""
infos = block.infos
modified = False
for i in reversed(range(len(infos))):
new_vars = set(infos[i].gen.union(infos[i].var_out.difference(infos[i].kill)))
if infos[i].var_in != new_vars:
modified = True
infos[i].var_in = new_vars
if i > 0 and infos[i - 1].var_out != set(infos[i].var_in):
modified = True
infos[i - 1].var_out = set(infos[i].var_in)
return modified
def back_propagate_to_parent(self, todo, node, parent):
"""
Back propagate the liveness information from @node to @parent.
@node: loc_key of the source node
@parent: loc_key of the node to update
"""
parent_block = self.blocks[parent]
cur_block = self.blocks[node]
if cur_block.infos[0].var_in == parent_block.infos[-1].var_out:
return
var_info = cur_block.infos[0].var_in.union(parent_block.infos[-1].var_out)
parent_block.infos[-1].var_out = var_info
todo.add(parent)
def compute_liveness(self):
"""
Compute the liveness information for the digraph.
"""
todo = set(self.leaves())
while todo:
node = todo.pop()
cur_block = self.blocks.get(node, None)
if cur_block is None:
continue
modified = self.back_propagate_compute(cur_block)
if not modified:
continue
# We modified parent in, propagate to parents
for pred in self.predecessors(node):
self.back_propagate_to_parent(todo, node, pred)
return True
class DiGraphLivenessIRA(DiGraphLiveness):
"""
DiGraph representing variable liveness for IRA
"""
def init_var_info(self, ir_arch_a):
"""Add ircfg out regs"""
for node in self.leaves():
irblock = self.ircfg.blocks.get(node, None)
if irblock is None:
continue
var_out = ir_arch_a.get_out_regs(irblock)
irblock_liveness = self.blocks[node]
irblock_liveness.infos[-1].var_out = var_out
def discard_phi_sources(ircfg, deleted_vars):
"""
Remove phi sources in @ircfg belonging to @deleted_vars set
@ircfg: IRCFG instance in ssa form
@deleted_vars: unused phi sources
"""
for block in list(viewvalues(ircfg.blocks)):
if not block.assignblks:
continue
assignblk = block[0]
todo = {}
modified = False
for dst, src in viewitems(assignblk):
if not src.is_op('Phi'):
todo[dst] = src
continue
srcs = set(expr for expr in src.args if expr not in deleted_vars)
assert(srcs)
if len(srcs) > 1:
todo[dst] = ExprOp('Phi', *srcs)
continue
todo[dst] = srcs.pop()
modified = True
if not modified:
continue
assignblks = list(block)
assignblk = dict(assignblk)
assignblk.update(todo)
assignblk = AssignBlock(assignblk, assignblks[0].instr)
assignblks[0] = assignblk
new_irblock = IRBlock(block.loc_key, assignblks)
ircfg.blocks[block.loc_key] = new_irblock
return True
def get_unreachable_nodes(ircfg, edges_to_del, heads):
"""
Return the unreachable nodes starting from heads and the associated edges to
be deleted.
@ircfg: IRCFG instance
@edges_to_del: edges already marked as deleted
heads: locations of graph heads
"""
todo = set(heads)
visited_nodes = set()
new_edges_to_del = set()
while todo:
node = todo.pop()
if node in visited_nodes:
continue
visited_nodes.add(node)
for successor in ircfg.successors(node):
if (node, successor) not in edges_to_del:
todo.add(successor)
all_nodes = set(ircfg.nodes())
nodes_to_del = all_nodes.difference(visited_nodes)
for node in nodes_to_del:
for successor in ircfg.successors(node):
if successor not in nodes_to_del:
# Frontier: link from a deleted node to a living node
new_edges_to_del.add((node, successor))
return nodes_to_del, new_edges_to_del
def update_phi_with_deleted_edges(ircfg, edges_to_del):
"""
Update phi which have a source present in @edges_to_del
@ssa: IRCFG instance in ssa form
@edges_to_del: edges to delete
"""
phi_locs_to_srcs = {}
for loc_src, loc_dst in edges_to_del:
phi_locs_to_srcs.setdefault(loc_dst, set()).add(loc_src)
modified = False
blocks = dict(ircfg.blocks)
for loc_dst, loc_srcs in viewitems(phi_locs_to_srcs):
block = ircfg.blocks[loc_dst]
if not irblock_has_phi(block):
continue
assignblks = list(block)
assignblk = assignblks[0]
out = {}
for dst, phi_sources in viewitems(assignblk):
if not phi_sources.is_op('Phi'):
out[dst] = phi_sources
continue
var_to_parents = get_phi_sources_parent_block(
ircfg,
loc_dst,
phi_sources.args
)
to_keep = set(phi_sources.args)
for src in phi_sources.args:
parents = var_to_parents[src]
remaining = parents.difference(loc_srcs)
if not remaining:
to_keep.discard(src)
modified = True
assert to_keep
if len(to_keep) == 1:
out[dst] = to_keep.pop()
else:
out[dst] = ExprOp('Phi', *to_keep)
assignblk = AssignBlock(out, assignblks[0].instr)
assignblks[0] = assignblk
new_irblock = IRBlock(loc_dst, assignblks)
blocks[block.loc_key] = new_irblock
for loc_key, block in viewitems(blocks):
ircfg.blocks[loc_key] = block
return modified
def del_unused_edges(ircfg, heads):
"""
Delete non accessible edges in the @ircfg graph.
@ircfg: IRCFG instance in ssa form
@heads: location of the heads of the graph
"""
deleted_vars = set()
modified = False
edges_to_del_1 = set()
for node in ircfg.nodes():
successors = set(ircfg.successors(node))
block = ircfg.blocks.get(node, None)
if block is None:
continue
dst = block.dst
possible_dsts = set(solution.value for solution in possible_values(dst))
if not all(dst.is_loc() for dst in possible_dsts):
continue
possible_dsts = set(dst.loc_key for dst in possible_dsts)
if len(possible_dsts) == len(successors):
continue
dsts_to_del = successors.difference(possible_dsts)
for dst in dsts_to_del:
edges_to_del_1.add((node, dst))
# Remove edges and update phi accordingly
# Two cases here:
# - edge is directly linked to a phi node
# - edge is indirect linked to a phi node
nodes_to_del, edges_to_del_2 = get_unreachable_nodes(ircfg, edges_to_del_1, heads)
modified |= update_phi_with_deleted_edges(ircfg, edges_to_del_1.union(edges_to_del_2))
for src, dst in edges_to_del_1.union(edges_to_del_2):
ircfg.del_edge(src, dst)
for node in nodes_to_del:
block = ircfg.blocks[node]
ircfg.del_node(node)
del ircfg.blocks[node]
for assignblock in block:
for dst in assignblock:
deleted_vars.add(dst)
if deleted_vars:
modified |= discard_phi_sources(ircfg, deleted_vars)
return modified
class DiGraphLivenessSSA(DiGraphLivenessIRA):
"""
DiGraph representing variable liveness is a SSA graph
"""
def __init__(self, ircfg):
super(DiGraphLivenessSSA, self).__init__(ircfg)
self.loc_key_to_phi_parents = {}
for irblock in viewvalues(self.blocks):
if not irblock_has_phi(irblock):
continue
out = {}
for sources in viewvalues(irblock[0]):
if not sources.is_op('Phi'):
# Some phi sources may have already been resolved to an
# expression
continue
var_to_parents = get_phi_sources_parent_block(self, irblock.loc_key, sources.args)
for var, var_parents in viewitems(var_to_parents):
out.setdefault(var, set()).update(var_parents)
self.loc_key_to_phi_parents[irblock.loc_key] = out
def back_propagate_to_parent(self, todo, node, parent):
if parent not in self.blocks:
return
parent_block = self.blocks[parent]
cur_block = self.blocks[node]
irblock = self.ircfg.blocks[node]
if cur_block.infos[0].var_in == parent_block.infos[-1].var_out:
return
var_info = cur_block.infos[0].var_in.union(parent_block.infos[-1].var_out)
if irblock_has_phi(irblock):
# Remove phi special case
out = set()
phi_sources = self.loc_key_to_phi_parents[irblock.loc_key]
for var in var_info:
if var not in phi_sources:
out.add(var)
continue
if parent in phi_sources[var]:
out.add(var)
var_info = out
parent_block.infos[-1].var_out = var_info
todo.add(parent)
|
commial/miasm
|
miasm/analysis/data_flow.py
|
Python
|
gpl-2.0
| 55,696
|
[
"VisIt"
] |
c1ab24d88d6d7a9f5229069344a4abd648c845fb75907139fa0286064aafdba6
|
# -*- coding: utf-8 -*-
#
# hl_api_server.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
import importlib
import inspect
import io
import sys
import flask
from flask import Flask, request, jsonify
from flask_cors import CORS, cross_origin
from werkzeug.exceptions import abort
from werkzeug.wrappers import Response
import nest
import RestrictedPython
import time
import traceback
from copy import deepcopy
import os
MODULES = os.environ.get('NEST_SERVER_MODULES', 'nest').split(',')
RESTRICTION_OFF = bool(os.environ.get('NEST_SERVER_RESTRICTION_OFF', False))
EXCEPTION_ERROR_STATUS = 400
if RESTRICTION_OFF:
msg = 'NEST Server runs without a RestrictedPython trusted environment.'
print(f'***\n*** WARNING: {msg}\n***')
__all__ = [
'app',
'do_exec',
'set_mpi_comm',
'run_mpi_app',
'nestify',
]
app = Flask(__name__)
CORS(app)
mpi_comm = None
@app.route('/', methods=['GET'])
def index():
return jsonify({
'nest': nest.__version__,
'mpi': mpi_comm is not None,
})
def do_exec(args, kwargs):
try:
source_code = kwargs.get('source', '')
source_cleaned = clean_code(source_code)
locals_ = dict()
response = dict()
if RESTRICTION_OFF:
with Capturing() as stdout:
exec(source_cleaned, get_globals(), locals_)
if len(stdout) > 0:
response['stdout'] = '\n'.join(stdout)
else:
code = RestrictedPython.compile_restricted(source_cleaned, '<inline>', 'exec') # noqa
exec(code, get_restricted_globals(), locals_)
if '_print' in locals_:
response['stdout'] = ''.join(locals_['_print'].txt)
if 'return' in kwargs:
if isinstance(kwargs['return'], list):
data = dict()
for variable in kwargs['return']:
data[variable] = locals_.get(variable, None)
else:
data = locals_.get(kwargs['return'], None)
response['data'] = nest.hl_api.serializable(data)
return response
except Exception as e:
for line in traceback.format_exception(*sys.exc_info()):
print(line, flush=True)
abort(Response(str(e), EXCEPTION_ERROR_STATUS))
def log(call_name, msg):
msg = f'==> MASTER 0/{time.time():.7f} ({call_name}): {msg}'
print(msg, flush=True)
def do_call(call_name, args=[], kwargs={}):
"""Call a PYNEST function or execute a script within the server.
If the server is run serially (i.e., without MPI), this function
will do one of two things: If call_name is "exec", it will execute
the script given in args via do_exec(). If call_name is the name
of a PyNEST API function, it will call that function and pass args
and kwargs to it.
If the server is run with MPI, this function will first communicate
the call type ("exec" or API call) and the args and kwargs to all
worker processes. Only then will it execute the call in the same
way as described above for the serial case. After the call, all
worker responses are collected, combined and returned.
Please note that this function must only be called on the master
process (i.e., the task with rank 0) in a distributed scenario.
"""
if mpi_comm is not None:
assert mpi_comm.Get_rank() == 0
if mpi_comm is not None:
log(call_name, 'sending call bcast')
mpi_comm.bcast(call_name, root=0)
data = (args, kwargs)
log(call_name, f'sending data bcast, data={data}')
mpi_comm.bcast(data, root=0)
if call_name == "exec":
master_response = do_exec(args, kwargs)
else:
call, args, kwargs = nestify(call_name, args, kwargs)
log(call_name, f'local call, args={args}, kwargs={kwargs}')
master_response = call(*args, **kwargs)
response = [nest.hl_api.serializable(master_response)]
if mpi_comm is not None:
log(call_name, 'waiting for response gather')
response = mpi_comm.gather(response[0], root=0)
log(call_name, f'received response gather, data={response}')
return combine(call_name, response)
@app.route('/exec', methods=['GET', 'POST'])
@cross_origin()
def route_exec():
""" Route to execute script in Python.
"""
args, kwargs = get_arguments(request)
response = do_call('exec', args, kwargs)
return jsonify(response)
# --------------------------
# RESTful API
# --------------------------
nest_calls = dir(nest)
nest_calls = list(filter(lambda x: not x.startswith('_'), nest_calls))
nest_calls.sort()
@app.route('/api', methods=['GET'])
@cross_origin()
def route_api():
""" Route to list call functions in NEST.
"""
return jsonify(nest_calls)
@app.route('/api/<call>', methods=['GET', 'POST'])
@cross_origin()
def route_api_call(call):
""" Route to call function in NEST.
"""
print(f"\n{'='*40}\n", flush=True)
args, kwargs = get_arguments(request)
log("route_api_call", f"call={call}, args={args}, kwargs={kwargs}")
response = api_client(call, args, kwargs)
return jsonify(response)
# ----------------------
# Helpers for the server
# ----------------------
class Capturing(list):
""" Monitor stdout contents i.e. print.
"""
def __enter__(self):
self._stdout = sys.stdout
sys.stdout = self._stringio = io.StringIO()
return self
def __exit__(self, *args):
self.extend(self._stringio.getvalue().splitlines())
del self._stringio # free up some memory
sys.stdout = self._stdout
def clean_code(source):
codes = source.split('\n')
code_cleaned = filter(lambda code: not (code.startswith('import') or code.startswith('from')), codes) # noqa
return '\n'.join(code_cleaned)
def get_arguments(request):
""" Get arguments from the request.
"""
args, kwargs = [], {}
if request.is_json:
json = request.get_json()
if isinstance(json, str) and len(json) > 0:
args = [json]
elif isinstance(json, list):
args = json
elif isinstance(json, dict):
kwargs = json
if 'args' in kwargs:
args = kwargs.pop('args')
elif len(request.form) > 0:
if 'args' in request.form:
args = request.form.getlist('args')
else:
kwargs = request.form.to_dict()
elif len(request.args) > 0:
if 'args' in request.args:
args = request.args.getlist('args')
else:
kwargs = request.args.to_dict()
return list(args), kwargs
def get_globals():
""" Get globals for exec function.
"""
copied_globals = globals().copy()
# Add modules to copied globals
modlist = [(module, importlib.import_module(module)) for module in MODULES]
modules = dict(modlist)
copied_globals.update(modules)
return copied_globals
def get_or_error(func):
""" Wrapper to get data and status.
"""
def func_wrapper(call, args, kwargs):
try:
return func(call, args, kwargs)
except Exception as e:
for line in traceback.format_exception(*sys.exc_info()):
print(line, flush=True)
abort(Response(str(e), EXCEPTION_ERROR_STATUS))
return func_wrapper
def get_restricted_globals():
""" Get restricted globals for exec function.
"""
def getitem(obj, index):
typelist = (list, tuple, dict, nest.NodeCollection)
if obj is not None and type(obj) in typelist:
return obj[index]
msg = f"Error getting restricted globals: unidentified object '{obj}'."
raise TypeError(msg)
restricted_builtins = RestrictedPython.safe_builtins.copy()
restricted_builtins.update(RestrictedPython.limited_builtins)
restricted_builtins.update(RestrictedPython.utility_builtins)
restricted_builtins.update(dict(
max=max,
min=min,
sum=sum,
time=time,
))
restricted_globals = dict(
__builtins__=restricted_builtins,
_print_=RestrictedPython.PrintCollector,
_getattr_=RestrictedPython.Guards.safer_getattr,
_getitem_=getitem,
_getiter_=iter,
_unpack_sequence_=RestrictedPython.Guards.guarded_unpack_sequence,
_write_=RestrictedPython.Guards.full_write_guard,
)
# Add modules to restricted globals
modlist = [(module, importlib.import_module(module)) for module in MODULES]
modules = dict(modlist)
restricted_globals.update(modules)
return restricted_globals
def nestify(call_name, args, kwargs):
"""Get the NEST API call and convert arguments if neccessary.
"""
call = getattr(nest, call_name)
objectnames = ['nodes', 'source', 'target', 'pre', 'post']
paramKeys = list(inspect.signature(call).parameters.keys())
args = [nest.NodeCollection(arg) if paramKeys[idx] in objectnames
else arg for (idx, arg) in enumerate(args)]
for (key, value) in kwargs.items():
if key in objectnames:
kwargs[key] = nest.NodeCollection(value)
return call, args, kwargs
@get_or_error
def api_client(call_name, args, kwargs):
""" API Client to call function in NEST.
"""
call = getattr(nest, call_name)
if callable(call):
if 'inspect' in kwargs:
response = {
'data': getattr(inspect, kwargs['inspect'])(call)
}
else:
response = do_call(call_name, args, kwargs)
else:
response = call
return response
def set_mpi_comm(comm):
global mpi_comm
mpi_comm = comm
def run_mpi_app(host="127.0.0.1", port=5000):
# NEST crashes with a segmentation fault if the number of threads
# is changed from the outside. Calling run() with threaded=False
# prevents Flask from performing such changes.
app.run(host=host, port=port, threaded=False)
def combine(call_name, response):
"""Combine responses from different MPI processes.
In a distributed scenario, each MPI process creates its own share
of the response from the data available locally. To present a
coherent view on the reponse data for the caller, this data has to
be combined.
If this function is run serially (i.e., without MPI), it just
returns the response data from the only process immediately.
The type of the returned result can vary depending on the call
that produced it.
The combination of results is based on a cascade of heuristics
based on the call that was issued and individual repsonse data:
* if all responses are None, the combined response will also just
be None
* for some specific calls, the responses are known to be the
same from the master and all workers. In this case, the
combined response is just the master response
* if the response list contains only a single actual response and
None otherwise, the combined response will be that one actual
response
* for calls to GetStatus on recording devices, the combined
response will be a merged dictionary in the sense that all
fields that contain a single value in the individual responsed
are kept as a single values, while lists will be appended in
order of appearance; dictionaries in the response are
recursively treated in the same way
* for calls to GetStatus on neurons, the combined response is just
the single dictionary returned by the process on which the
neuron is actually allocated
* if the response contains one list per process, the combined
response will be those lists concatenated and flattened.
"""
if mpi_comm is None:
return response[0]
if all(v is None for v in response):
return None
# return the master response if all responses are known to be the same
if call_name in ('exec', 'Create', 'GetDefaults', 'GetKernelStatus',
'SetKernelStatus', 'SetStatus'):
return response[0]
# return a single response if there is only one which is not None
filtered_response = list(filter(lambda x: x is not None, response))
if len(filtered_response) == 1:
return filtered_response[0]
# return a single merged dictionary if there are many of them
if all(type(v[0]) is dict for v in response):
return merge_dicts(response)
# return a flattened list if the response only consists of lists
if all(type(v) is list for v in response):
return [item for lst in response for item in lst]
log("combine()", f"ERROR: cannot combine response={response}")
msg = "Cannot combine data because of unknown reason"
raise Exception(msg)
def merge_dicts(response):
"""Merge status dictionaries of recorders
This function runs through a zipped list and performs the
following steps:
* sum up all n_events fields
* if recording to memory: merge the event dictionaries by joining
all contained arrays
* if recording to ascii: join filenames arrays
* take all other values directly from the device on the first
process
"""
result = []
for device_dicts in zip(*response):
# TODO: either stip fields like thread, vp, thread_local_id,
# and local or make them lists that contain the values from
# all dicts.
element_type = device_dicts[0]['element_type']
if element_type not in ('neuron', 'recorder', 'stimulator'):
msg = f'Cannot combine data of element with type "{element_type}".'
raise Exception(msg)
if element_type == 'neuron':
tmp = list(filter(lambda status: status['local'], device_dicts))
assert len(tmp) == 1
result.append(tmp[0])
if element_type == 'recorder':
tmp = deepcopy(device_dicts[0])
tmp['n_events'] = 0
for device_dict in device_dicts:
tmp['n_events'] += device_dict['n_events']
record_to = tmp['record_to']
if record_to not in ('ascii', 'memory'):
msg = f'Cannot combine data when recording to "{record_to}".'
raise Exception(msg)
if record_to == 'memory':
event_keys = tmp['events'].keys()
for key in event_keys:
tmp['events'][key] = []
for device_dict in device_dicts:
for key in event_keys:
tmp['events'][key].extend(device_dict['events'][key])
if record_to == 'ascii':
tmp['filenames'] = []
for device_dict in device_dicts:
tmp['filenames'].extend(device_dict['filenames'])
result.append(tmp)
if element_type == 'stimulator':
result.append(device_dicts[0])
return result
if __name__ == "__main__":
app.run()
|
lekshmideepu/nest-simulator
|
pynest/nest/server/hl_api_server.py
|
Python
|
gpl-2.0
| 15,681
|
[
"NEURON"
] |
46de919de9eb92cde317d0b1f2fb037cb92bcd887d513f4160952f5b0306fca9
|
from __future__ import print_function, absolute_import, division
import os
import shutil
import tempfile
from nose.plugins.skip import SkipTest
import numpy as np
import sklearn.datasets
from sklearn.externals.joblib import dump
from numpy.testing.decorators import skipif
from osprey.dataset_loaders import (DSVDatasetLoader, FilenameDatasetLoader,
JoblibDatasetLoader, HDF5DatasetLoader,
MDTrajDatasetLoader,
MSMBuilderDatasetLoader,
NumpyDatasetLoader, SklearnDatasetLoader)
try:
__import__('msmbuilder.example_datasets')
HAVE_MSMBUILDER = True
except:
HAVE_MSMBUILDER = False
def test_FilenameDatasetLoader_1():
cwd = os.path.abspath(os.curdir)
dirname = tempfile.mkdtemp()
try:
os.chdir(dirname)
open('filename-1', 'w').close()
open('filename-2', 'w').close()
assert FilenameDatasetLoader.short_name == 'filename'
loader = FilenameDatasetLoader('filename-*')
X, y = loader.load()
X_ref = list(map(os.path.abspath, ['filename-1', 'filename-2']))
assert sorted(X) == X_ref, X
assert y is None, y
finally:
os.chdir(cwd)
shutil.rmtree(dirname)
def test_JoblibDatasetLoader_1():
assert JoblibDatasetLoader.short_name == 'joblib'
cwd = os.path.abspath(os.curdir)
dirname = tempfile.mkdtemp()
try:
os.chdir(dirname)
# one file
dump(np.zeros((10, 2)), 'f1.pkl')
loader = JoblibDatasetLoader('f1.pkl')
X, y = loader.load()
assert np.all(X == np.zeros((10, 2)))
assert y is None
# two files
dump(np.ones((10, 2)), 'f2.pkl')
loader = JoblibDatasetLoader('f*.pkl')
X, y = loader.load()
assert isinstance(X, list)
assert np.all(X[0] == np.zeros((10, 2)))
assert np.all(X[1] == np.ones((10, 2)))
assert y is None
# one file, with x and y
dump({'foo': 'baz', 'bar': 'qux'}, 'foobar.pkl')
loader = JoblibDatasetLoader('foobar.pkl', x_name='foo', y_name='bar')
X, y = loader.load()
assert X == 'baz', X
assert y == 'qux', y
finally:
os.chdir(cwd)
shutil.rmtree(dirname)
def test_HDF5DatasetLoader_1():
from mdtraj import io
assert HDF5DatasetLoader.short_name == 'hdf5'
cwd = os.path.abspath(os.curdir)
dirname = tempfile.mkdtemp()
try:
os.chdir(dirname)
# one file
io.saveh('f1.h5', **{'test': np.zeros((10, 3))})
loader = HDF5DatasetLoader('f1.h5', concat=False)
X, y = loader.load()
assert np.all(X == np.zeros((10, 3)))
assert y is None
# two files
io.saveh('f2.h5', **{'test': np.ones((10, 3))})
loader = HDF5DatasetLoader('f*.h5', concat=False)
X, y = loader.load()
assert isinstance(X, list)
assert np.all(X[0] == np.zeros((10, 3)))
assert np.all(X[1] == np.ones((10, 3)))
assert y is None
# concat and stride and y_col
loader = HDF5DatasetLoader('f*.h5', y_col=2, stride=2, concat=True)
X, y = loader.load()
assert X.shape[0] == 10 and X.shape[1] == 2
assert y.shape[0] == 10
finally:
os.chdir(cwd)
shutil.rmtree(dirname)
def test_DSVDatasetLoader_1():
assert DSVDatasetLoader.short_name == 'dsv'
cwd = os.path.abspath(os.curdir)
dirname = tempfile.mkdtemp()
try:
os.chdir(dirname)
# one file
np.savetxt('f1.csv', np.zeros((10, 4)), fmt='%f,%f,%f,%f')
loader = DSVDatasetLoader('f1.csv', concat=False)
X, y = loader.load()
assert np.all(X == np.zeros((10, 4)))
assert y is None
# two files
np.savetxt('f2.csv', np.ones((10, 4)), fmt='%f,%f,%f,%f')
loader = DSVDatasetLoader('f*.csv', concat=False)
X, y = loader.load()
assert isinstance(X, list)
assert np.all(X[0] == np.zeros((10, 4)))
assert np.all(X[1] == np.ones((10, 4)))
assert y is None
# y_col and usecols and concat and stride
loader = DSVDatasetLoader('f*.csv',
y_col=3,
usecols=(0, 2),
stride=2,
concat=True)
X, y = loader.load()
assert X.shape[0] == 10 and X.shape[1] == 2
assert y.shape[0] == 10
finally:
os.chdir(cwd)
shutil.rmtree(dirname)
@skipif(not HAVE_MSMBUILDER, 'this test requires MSMBuilder')
def test_MDTrajDatasetLoader_1():
try:
from msmbuilder.example_datasets import FsPeptide
except ImportError as e:
raise SkipTest(e)
cwd = os.path.abspath(os.curdir)
dirname = tempfile.mkdtemp()
fs_pept = FsPeptide(dirname)
fs_pept.get()
try:
loader = MDTrajDatasetLoader(os.path.join(fs_pept.data_dir, '*.xtc'),
topology=os.path.join(fs_pept.data_dir, 'fs-peptide.pdb'))
X, y = loader.load()
assert len(X) == 28
assert y is None
finally:
shutil.rmtree(dirname)
def test_MSMBuilderDatasetLoader_1():
# TODO Why does this work when other msmbuilder imports don't?
from msmbuilder.dataset import dataset
path = tempfile.mkdtemp()
shutil.rmtree(path)
try:
x = np.random.randn(10, 2)
ds = dataset(path, 'w', 'dir-npy')
ds[0] = x
loader = MSMBuilderDatasetLoader(path, fmt='dir-npy')
X, y = loader.load()
assert np.all(X[0] == x)
assert y is None
finally:
shutil.rmtree(path)
def test_NumpyDatasetLoader_1():
cwd = os.path.abspath(os.curdir)
dirname = tempfile.mkdtemp()
try:
os.chdir(dirname)
x = np.random.randn(10, 2)
np.save('f1.npy', x)
loader = NumpyDatasetLoader('f1.npy')
X, y = loader.load()
assert np.all(X[0] == x)
assert y is None
finally:
os.chdir(cwd)
shutil.rmtree(dirname)
def test_SklearnDatasetLoader_1():
assert SklearnDatasetLoader.short_name == 'sklearn_dataset'
X, y = SklearnDatasetLoader('load_iris').load()
iris = sklearn.datasets.load_iris()
assert np.all(X == iris['data'])
assert np.all(y == iris['target'])
|
pandegroup/osprey
|
osprey/tests/test_dataset_loader.py
|
Python
|
apache-2.0
| 6,459
|
[
"MDTraj"
] |
d834bb12575a51d8dc2da729ee7da9bb0439a67857f0bd28dc6ddf2eef276e2e
|
import click
from parsec.cli import pass_context, json_loads
from parsec.decorators import custom_exception, json_output
@click.command('create_quota')
@click.argument("name", type=str)
@click.argument("description", type=str)
@click.argument("amount", type=str)
@click.argument("operation", type=str)
@click.option(
"--default",
help="Whether or not this is a default quota. Valid values are ``no``, ``unregistered``, ``registered``. None is equivalent to ``no``.",
default="no",
show_default=True,
type=str
)
@click.option(
"--in_users",
help="A list of user IDs or user emails.",
type=str,
multiple=True
)
@click.option(
"--in_groups",
help="A list of group IDs or names.",
type=str,
multiple=True
)
@pass_context
@custom_exception
@json_output
def cli(ctx, name, description, amount, operation, default="no", in_users="", in_groups=""):
"""Create a new quota
Output:
A description of quota.
For example::
{'url': '/galaxy/api/quotas/386f14984287a0f7',
'model_class': 'Quota',
'message': "Quota 'Testing' has been created with 1 associated users and 0 associated groups.",
'id': '386f14984287a0f7',
'name': 'Testing'}
"""
return ctx.gi.quotas.create_quota(name, description, amount, operation, default=default, in_users=in_users, in_groups=in_groups)
|
galaxy-iuc/parsec
|
parsec/commands/quotas/create_quota.py
|
Python
|
apache-2.0
| 1,401
|
[
"Galaxy"
] |
c9525d19ecf59ecc171908168503aa6076e6b85e90e6fb881a7b137289a81479
|
# ################################################################
#
# Active Particles on Curved Spaces (APCS)
#
# Author: Rastko Sknepnek
#
# Division of Physics
# School of Engineering, Physics and Mathematics
# University of Dundee
#
# (c) 2013
#
# This program cannot be used, copied, or modified without
# explicit permission of the author.
#
# ################################################################
#
# NOTE: THIS CODE HAS BEEN ADOPTED AND MODIFIED FROM:
# https://github.com/cfinch/Shocksolution_Examples/blob/master/Visualization/vtktools.py
#
class VTK_XML_Serial_Unstructured:
"""
USAGE:
vtk_writer = VTK_XML_Serial_Unstructured()
vtk_writer.snapshot("filename.vtu", x, y, z, optional arguments...)
vtk_writer.writePVD("filename.pvd")
"""
def __init__(self):
self.fileNames = []
def coords_to_string(self, x,y,z):
string = str()
for i in range(len(x)):
string = string + repr(x[i]) + ' ' + repr(y[i]) \
+ ' ' + repr(z[i]) + ' '
return string
def array_to_string(self, a):
string = str()
for i in range(len(a)):
string = string + repr(a[i]) + ' '
return string
def snapshot(self, fileName, x,y,z, vx=[], vy=[], vz=[], nx=[], \
ny=[], nz=[], radii=[], colors=[], energies=[], nneigh=[], dist=[]):
"""
ARGUMENTS:
fileName file name and/or path/filename
x array of x coordinates of particle centers
y array of y coordinates of particle centers
z array of z coordinates of particle centers
vx optional array of x components of particle velocity
vy optional array of y components of particle velocity
vz optional array of z components of particle velocity
nx optional array of x components of particle director
ny optional array of y components of particle director
nz optional array of z components of particle director
radii optional array of particle radii
colors optional array of scalars to use to set particle colors
The exact colors will depend on the color map you set up in Paraview.
energies optional array of energies assigned to each particle
nneigh optional array of total number of neighbors for each particle
dist optional array of absolute distance to the seed (0th) particle
"""
import xml.dom.minidom
#import xml.dom.ext # python 2.5 and later
# Document and root element
doc = xml.dom.minidom.Document()
root_element = doc.createElementNS("VTK", "VTKFile")
root_element.setAttribute("type", "UnstructuredGrid")
root_element.setAttribute("version", "0.1")
root_element.setAttribute("byte_order", "LittleEndian")
doc.appendChild(root_element)
# Unstructured grid element
unstructuredGrid = doc.createElementNS("VTK", "UnstructuredGrid")
root_element.appendChild(unstructuredGrid)
# Piece 0 (only one)
piece = doc.createElementNS("VTK", "Piece")
piece.setAttribute("NumberOfPoints", str(len(x)))
piece.setAttribute("NumberOfCells", "0")
unstructuredGrid.appendChild(piece)
### Points ####
points = doc.createElementNS("VTK", "Points")
piece.appendChild(points)
# Point location data
point_coords = doc.createElementNS("VTK", "DataArray")
point_coords.setAttribute("type", "Float32")
point_coords.setAttribute("format", "ascii")
point_coords.setAttribute("NumberOfComponents", "3")
points.appendChild(point_coords)
string = self.coords_to_string(x, y, z)
point_coords_data = doc.createTextNode(string)
point_coords.appendChild(point_coords_data)
#### Cells ####
cells = doc.createElementNS("VTK", "Cells")
piece.appendChild(cells)
# Cell locations
cell_connectivity = doc.createElementNS("VTK", "DataArray")
cell_connectivity.setAttribute("type", "Int32")
cell_connectivity.setAttribute("Name", "connectivity")
cell_connectivity.setAttribute("format", "ascii")
cells.appendChild(cell_connectivity)
# Cell location data
connectivity = doc.createTextNode("0")
cell_connectivity.appendChild(connectivity)
cell_offsets = doc.createElementNS("VTK", "DataArray")
cell_offsets.setAttribute("type", "Int32")
cell_offsets.setAttribute("Name", "offsets")
cell_offsets.setAttribute("format", "ascii")
cells.appendChild(cell_offsets)
offsets = doc.createTextNode("0")
cell_offsets.appendChild(offsets)
cell_types = doc.createElementNS("VTK", "DataArray")
cell_types.setAttribute("type", "UInt8")
cell_types.setAttribute("Name", "types")
cell_types.setAttribute("format", "ascii")
cells.appendChild(cell_types)
types = doc.createTextNode("1")
cell_types.appendChild(types)
#### Data at Points ####
point_data = doc.createElementNS("VTK", "PointData")
piece.appendChild(point_data)
# Points
point_coords_2 = doc.createElementNS("VTK", "DataArray")
point_coords_2.setAttribute("Name", "Points")
point_coords_2.setAttribute("NumberOfComponents", "3")
point_coords_2.setAttribute("type", "Float32")
point_coords_2.setAttribute("format", "ascii")
point_data.appendChild(point_coords_2)
string = self.coords_to_string(x, y, z)
point_coords_2_Data = doc.createTextNode(string)
point_coords_2.appendChild(point_coords_2_Data)
# Particle velocity
if len(vx) > 0:
jumps = doc.createElementNS("VTK", "DataArray")
jumps.setAttribute("Name", "velocity")
jumps.setAttribute("NumberOfComponents", "3")
jumps.setAttribute("type", "Float32")
jumps.setAttribute("format", "ascii")
point_data.appendChild(jumps)
string = self.coords_to_string(vx,vy,vz)
jumpData = doc.createTextNode(string)
jumps.appendChild(jumpData)
# Particle director
if len(nx) > 0:
forces = doc.createElementNS("VTK", "DataArray")
forces.setAttribute("Name", "director")
forces.setAttribute("NumberOfComponents", "3")
forces.setAttribute("type", "Float32")
forces.setAttribute("format", "ascii")
point_data.appendChild(forces)
string = self.coords_to_string(nx,ny,nz)
forceData = doc.createTextNode(string)
forces.appendChild(forceData)
# Particle radii
if len(radii) > 0:
radiiNode = doc.createElementNS("VTK", "DataArray")
radiiNode.setAttribute("Name", "radii")
radiiNode.setAttribute("type", "Float32")
radiiNode.setAttribute("format", "ascii")
point_data.appendChild(radiiNode)
string = self.array_to_string(radii)
radiiData = doc.createTextNode(string)
radiiNode.appendChild(radiiData)
if len(colors) > 0:
# Particle colors
colorNode= doc.createElementNS("VTK", "DataArray")
colorNode.setAttribute("Name", "colors")
colorNode.setAttribute("type", "Float32")
colorNode.setAttribute("format", "ascii")
point_data.appendChild(colorNode)
string = self.array_to_string(colors)
color_Data = doc.createTextNode(string)
colorNode.appendChild(color_Data)
if len(energies) > 0:
# Particle colors
energyNode= doc.createElementNS("VTK", "DataArray")
energyNode.setAttribute("Name", "energies")
energyNode.setAttribute("type", "Float32")
energyNode.setAttribute("format", "ascii")
point_data.appendChild(energyNode)
string = self.array_to_string(energies)
energy_Data = doc.createTextNode(string)
energyNode.appendChild(energy_Data)
if len(nneigh) > 0:
# Particle number of neighbours
neighNode= doc.createElementNS("VTK", "DataArray")
neighNode.setAttribute("Name", "nneigh")
neighNode.setAttribute("type", "Float32")
neighNode.setAttribute("format", "ascii")
point_data.appendChild(neighNode)
string = self.array_to_string(nneigh)
neigh_Data = doc.createTextNode(string)
neighNode.appendChild(neigh_Data)
if len(dist) > 0:
# Particle distance
distNode = doc.createElementNS("VTK", "DataArray")
distNode.setAttribute("Name", "dist")
distNode.setAttribute("type", "Float32")
distNode.setAttribute("format", "ascii")
point_data.appendChild(distNode)
string = self.array_to_string(dist)
dist_Data = doc.createTextNode(string)
distNode.appendChild(dist_Data)
#### Cell data (dummy) ####
cell_data = doc.createElementNS("VTK", "CellData")
piece.appendChild(cell_data)
# Write to file and exit
outFile = open(fileName, 'w')
# xml.dom.ext.PrettyPrint(doc, file)
doc.writexml(outFile, newl='\n')
outFile.close()
self.fileNames.append(fileName)
def writePVD(self, fileName):
outFile = open(fileName, 'w')
import xml.dom.minidom
pvd = xml.dom.minidom.Document()
pvd_root = pvd.createElementNS("VTK", "VTKFile")
pvd_root.setAttribute("type", "Collection")
pvd_root.setAttribute("version", "0.1")
pvd_root.setAttribute("byte_order", "LittleEndian")
pvd.appendChild(pvd_root)
collection = pvd.createElementNS("VTK", "Collection")
pvd_root.appendChild(collection)
for i in range(len(self.fileNames)):
dataSet = pvd.createElementNS("VTK", "DataSet")
dataSet.setAttribute("timestep", str(i))
dataSet.setAttribute("group", "")
dataSet.setAttribute("part", "0")
dataSet.setAttribute("file", str(self.fileNames[i]))
collection.appendChild(dataSet)
outFile = open(fileName, 'w')
pvd.writexml(outFile, newl='\n')
outFile.close()
|
sknepneklab/SAMoS
|
utils/InitialAnalysis/vtktools.py
|
Python
|
gpl-3.0
| 10,740
|
[
"ParaView",
"VTK"
] |
645b97acf8bea599e48e8a51aec0b64d4111a5f4499dedb1cc838d3c7ad6bf73
|
# -*- coding: utf-8 -*-
# Infrared on Android devices using build in transmitter
"""
ORCA Open Remote Control Application
Copyright (C) 2013-2020 Carsten Thielepape
Please contact me by : http://www.orca-remote.org/
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from typing import Dict
from typing import List
from typing import Optional
from kivy.logger import Logger
from ORCA.vars.Replace import ReplaceVars
from ORCA.utils.TypeConvert import ToInt
from ORCA.utils.FileName import cFileName
from ORCA.Action import cAction
from ORCA.actions.ReturnCode import eReturnCode
import ORCA.Globals as Globals
try:
# noinspection PyUnresolvedReferences
from plyer import irblaster
except Exception as e:
Logger.info("plyer not available")
pass
'''
<root>
<repositorymanager>
<entry>
<name>IR Control on Android devices</name>
<description language='English'>Send IR Commands on Android devices with IR tranmitter WIP</description>
<description language='German'>Sendet IR Befehle auf Android Geräten mit eingebautem IR Sender WIP</description>
<author>Carsten Thielepape</author>
<version>5.0.4</version>
<minorcaversion>5.0.4</minorcaversion>
<skip>0</skip>
<sources>
<source>
<local>$var(APPLICATIONPATH)/interfaces/ir_on_android</local>
<sourcefile>$var(REPOSITORYWWWPATH)/interfaces/ir_on_android.zip</sourcefile>
<targetpath>interfaces</targetpath>
</source>
</sources>
<dependencies>
<dependency>
<type>interfaces</type>
<name>Generic Infrared Interface</name>
</dependency>
</dependencies>
<skipfiles>
</skipfiles>
</entry>
</repositorymanager>
</root>
'''
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from interfaces.generic_infrared.interface import cInterface as oBaseInterFaceInfrared
else:
oBaseInterFaceInfrared = Globals.oInterFaces.LoadInterface('generic_infrared').GetClass("cInterface")
class cInterface(oBaseInterFaceInfrared):
class cInterFaceSettings(oBaseInterFaceInfrared.cInterFaceSettings):
def __init__(self,oInterFace):
super().__init__(oInterFace)
self.bIsConnected = False
self.bOnError = False
def Connect(self) -> bool:
self.bIsConnected = False
if not super().Connect():
Logger.debug("ir_on_android: Connect cancelled by root class")
return False
try:
if irblaster.exists():
self.ShowDebug(uMsg="Connected")
self.bIsConnected = True
return True
else:
self.ShowDebug(uMsg="No Ir-Blaster at device")
self.bIsConnected = False
except Exception as ex:
self.ShowError(uMsg=u'Cannot open IR Device',oException=ex)
self.bOnError=True
return False
def Disconnect(self) -> bool:
if super().Disconnect():
return False
return True
def __init__(self):
super().__init__()
cInterFaceSettings=cInterface.cInterFaceSettings
self.dSettings:Dict[cInterFaceSettings] = {}
self.oSetting:Optional[cInterFaceSettings] = None
def Init(self, uObjectName:str, oFnObject:Optional[cFileName]=None) -> None:
super().Init(uObjectName=uObjectName, oFnObject=oFnObject)
self.oObjectConfig.dDefaultSettings['FNCodeset']['active'] = "enabled"
def DeInit(self, **kwargs) -> None:
super().DeInit(**kwargs)
for uSettingName in self.dSettings:
self.dSettings[uSettingName].DeInit()
def SendCommand(self,oAction:cAction,oSetting:cInterFaceSettings,uRetVar:str,bNoLogOut:bool=False) -> eReturnCode:
super().SendCommand(oAction=oAction,oSetting=oSetting,uRetVar=uRetVar,bNoLogOut=bNoLogOut)
eRet:eReturnCode = eReturnCode.Error
if oAction.uCCF_Code != u"":
# noinspection PyUnresolvedReferences
oAction.oIRCode=CCfToAndroidIR(oAction.uCCF_Code,ToInt(oAction.uRepeatCount))
oAction.uCCF_Code = u""
uCmd:str=ReplaceVars(oAction.uCmd)
self.ShowInfo(uMsg=u'Sending Command: '+uCmd + u' to '+oSetting.uConfigName)
oSetting.Connect()
if oSetting.bIsConnected:
try:
Logger.debug("Sending IR Commend to IRBLASTER")
irblaster.transmit(oAction.oIRCode.iFrequency,oAction.oIRCode.aPattern)
eRet = eReturnCode.Success
except Exception as ex:
self.ShowWarning(uMsg=u'Can\'t send message: '+str(ex))
else:
Logger.debug("Not Connected")
return eRet
class cIRCommand:
""" Object to hold an Android IR Command """
def __init__(self,iFrequency:int, aPattern:List):
self.iFrequency:int = iFrequency
self.aPattern:List = aPattern
# noinspection PyUnusedLocal
def CCfToAndroidIR(sCCFString:str,iRepeatCount:int) -> cIRCommand:
iCount:int
aList:List = sCCFString.split(" ")
iFrequency:int = int(aList[1], 16)
aList=aList[3:]
iFrequency = ToInt(iFrequency * 0.241246)
iPulses:int = int(1000000 / iFrequency)
aPattern:List = []
for uElem in aList:
iCount = int(uElem, 16)
aPattern.append(int(iCount*iPulses))
return cIRCommand(iFrequency, aPattern)
'''
// based on code from http://stackoverflow.com/users/1679571/randy (http://stackoverflow.com/a/25518468)
private IRCommand hex2ir(final String irData) {
List<String> list = new ArrayList<String>(Arrays.asList(irData.split(" ")));
list.remove(0); // dummy
int frequency = Integer.parseInt(list.remove(0), 16); // frequency
list.remove(0); // seq1
list.remove(0); // seq2
frequency = (int) (1000000 / (frequency * 0.241246));
int pulses = 1000000 / frequency;
int count;
int[] pattern = new int[list.size()];
for (int i = 0; i < list.size(); i++) {
count = Integer.parseInt(list.get(i), 16);
pattern[i] = count * pulses;
}
return new IRCommand(frequency, pattern);
}
'''
|
thica/ORCA-Remote
|
src/interfaces/ir_on_android/interface.py
|
Python
|
gpl-3.0
| 7,271
|
[
"ORCA"
] |
bd43042c4f47c23d511c2185b57f466d02a0244b02edde906e35d6879b090310
|
# -*- coding: utf-8 -*-
import sys, logging
import numpy as np
from math import ceil
from gseapy.stats import multiple_testing_correction
from joblib import delayed, Parallel
def enrichment_score(gene_list, correl_vector, gene_set, weighted_score_type=1,
nperm=1000, seed=None, single=False, scale=False):
"""This is the most important function of GSEApy. It has the same algorithm with GSEA and ssGSEA.
:param gene_list: The ordered gene list gene_name_list, rank_metric.index.values
:param gene_set: gene_sets in gmt file, please use gsea_gmt_parser to get gene_set.
:param weighted_score_type: It's the same with gsea's weighted_score method. Weighting by the correlation
is a very reasonable choice that allows significant gene sets with less than perfect coherence.
options: 0(classic),1,1.5,2. default:1. if one is interested in penalizing sets for lack of
coherence or to discover sets with any type of nonrandom distribution of tags, a value p < 1
might be appropriate. On the other hand, if one uses sets with large number of genes and only
a small subset of those is expected to be coherent, then one could consider using p > 1.
Our recommendation is to use p = 1 and use other settings only if you are very experienced
with the method and its behavior.
:param correl_vector: A vector with the correlations (e.g. signal to noise scores) corresponding to the genes in
the gene list. Or rankings, rank_metric.values
:param nperm: Only use this parameter when computing esnull for statistical testing. Set the esnull value
equal to the permutation number.
:param seed: Random state for initializing gene list shuffling. Default: seed=None
:return:
ES: Enrichment score (real number between -1 and +1)
ESNULL: Enrichment score calculated from random permutations.
Hits_Indices: Index of a gene in gene_list, if gene is included in gene_set.
RES: Numerical vector containing the running enrichment score for all locations in the gene list .
"""
N = len(gene_list)
# Test whether each element of a 1-D array is also present in a second array
# It's more intuitive here than original enrichment_score source code.
# use .astype to covert bool to integer
tag_indicator = np.in1d(gene_list, gene_set, assume_unique=True).astype(int) # notice that the sign is 0 (no tag) or 1 (tag)
if weighted_score_type == 0 :
correl_vector = np.repeat(1, N)
else:
correl_vector = np.abs(correl_vector)**weighted_score_type
# get indices of tag_indicator
hit_ind = np.flatnonzero(tag_indicator).tolist()
# if used for compute esnull, set esnull equal to permutation number, e.g. 1000
# else just compute enrichment scores
# set axis to 1, because we have 2D array
axis = 1
tag_indicator = np.tile(tag_indicator, (nperm+1,1))
correl_vector = np.tile(correl_vector,(nperm+1,1))
# gene list permutation
rs = np.random.RandomState(seed)
for i in range(nperm): rs.shuffle(tag_indicator[i])
# np.apply_along_axis(rs.shuffle, 1, tag_indicator)
Nhint = tag_indicator.sum(axis=axis, keepdims=True)
sum_correl_tag = np.sum(correl_vector*tag_indicator, axis=axis, keepdims=True)
# compute ES score, the code below is identical to gsea enrichment_score method.
no_tag_indicator = 1 - tag_indicator
Nmiss = N - Nhint
norm_tag = 1.0/sum_correl_tag
norm_no_tag = 1.0/Nmiss
RES = np.cumsum(tag_indicator * correl_vector * norm_tag - no_tag_indicator * norm_no_tag, axis=axis)
if scale: RES = RES / N
if single:
es_vec = RES.sum(axis=axis)
else:
max_ES, min_ES = RES.max(axis=axis), RES.min(axis=axis)
es_vec = np.where(np.abs(max_ES) > np.abs(min_ES), max_ES, min_ES)
# extract values
es, esnull, RES = es_vec[-1], es_vec[:-1], RES[-1,:]
return es, esnull, hit_ind, RES
def enrichment_score_tensor(gene_mat, cor_mat, gene_sets, weighted_score_type, nperm=1000,
seed=None, single=False, scale=False):
"""Next generation algorithm of GSEA and ssGSEA. Works for 3d array
:param gene_mat: the ordered gene list(vector) with or without gene indices matrix.
:param cor_mat: correlation vector or matrix (e.g. signal to noise scores)
corresponding to the genes in the gene list or matrix.
:param dict gene_sets: gmt file dict.
:param float weighted_score_type: weighting by the correlation.
options: 0(classic), 1, 1.5, 2. default:1 for GSEA and 0.25 for ssGSEA.
:param int nperm: permutation times.
:param bool scale: If True, normalize the scores by number of genes_mat.
:param bool single: If True, use ssGSEA algorithm, otherwise use GSEA.
:param seed: Random state for initialize gene list shuffling.
Default: seed=None
:return: a tuple contains::
| ES: Enrichment score (real number between -1 and +1), for ssGSEA, set scale eq to True.
| ESNULL: Enrichment score calculated from random permutation.
| Hits_Indices: Indices of genes if genes are included in gene_set.
| RES: The running enrichment score for all locations in the gene list.
"""
rs = np.random.RandomState(seed)
# gene_mat -> 1d: prerank, ssSSEA or 2d: GSEA
keys = sorted(gene_sets.keys())
if weighted_score_type == 0:
# don't bother doing calcuation, just set to 1
cor_mat = np.ones(cor_mat.shape)
elif weighted_score_type > 0:
pass
else:
logging.error("Using negative values of weighted_score_type, not allowed")
raise ValueError("weighted_score_type should be postive numerics")
cor_mat = np.abs(cor_mat)
if cor_mat.ndim ==1:
# ssGSEA or Prerank
# genestes->M, genes->N, perm-> axis=2
N, M = len(gene_mat), len(keys)
# generate gene hits matrix
# for 1d ndarray of gene_mat, set assume_unique=True,
# means the input arrays are both assumed to be unique,
# which can speed up the calculation.
tag_indicator = np.vstack([np.in1d(gene_mat, gene_sets[key], assume_unique=True) for key in keys])
tag_indicator = tag_indicator.astype(int)
# index of hits
hit_ind = [ np.flatnonzero(tag).tolist() for tag in tag_indicator ]
# generate permutated hits matrix
perm_tag_tensor = np.repeat(tag_indicator, nperm+1).reshape((M,N,nperm+1))
# shuffle matrix, last matrix is not shuffled when nperm > 0
if nperm: np.apply_along_axis(lambda x: np.apply_along_axis(rs.shuffle,0,x),1, perm_tag_tensor[:,:,:-1])
# missing hits
no_tag_tensor = 1 - perm_tag_tensor
# calculate numerator, denominator of each gene hits
rank_alpha = (perm_tag_tensor*cor_mat[np.newaxis,:,np.newaxis])** weighted_score_type
elif cor_mat.ndim == 2:
# GSEA
# 2d ndarray, gene_mat and cor_mat are shuffled already
# reshape matrix
cor_mat = cor_mat.T
# gene_mat is a tuple contains (gene_name, permuate_gene_name_indices)
genes, genes_ind = gene_mat
# genestes->M, genes->N, perm-> axis=2
# don't use assume_unique=True in 2d array when use np.isin().
# elements in gene_mat are not unique, or will cause unwanted results
tag_indicator = np.vstack([np.in1d(genes, gene_sets[key], assume_unique=True) for key in keys])
tag_indicator = tag_indicator.astype(int)
perm_tag_tensor = np.stack([tag.take(genes_ind).T for tag in tag_indicator], axis=0)
#index of hits
hit_ind = [ np.flatnonzero(tag).tolist() for tag in perm_tag_tensor[:,:,-1] ]
# nohits
no_tag_tensor = 1 - perm_tag_tensor
# calculate numerator, denominator of each gene hits
rank_alpha = (perm_tag_tensor*cor_mat[np.newaxis,:,:])** weighted_score_type
else:
logging.error("Program die because of unsupported input")
raise ValueError("Correlation vector or matrix (cor_mat) is not supported")
# Nhint = tag_indicator.sum(1)
# Nmiss = N - Nhint
axis=1
P_GW_denominator = np.sum(rank_alpha, axis=axis, keepdims=True)
P_NG_denominator = np.sum(no_tag_tensor, axis=axis, keepdims=True)
REStensor = np.cumsum(rank_alpha / P_GW_denominator - no_tag_tensor / P_NG_denominator, axis=axis)
# ssGSEA: scale es by gene numbers ?
# https://gist.github.com/gaoce/39e0907146c752c127728ad74e123b33
if scale: REStensor = REStensor / len(gene_mat)
if single:
#ssGSEA
esmatrix = REStensor.sum(axis=axis)
else:
#GSEA
esmax, esmin = REStensor.max(axis=axis), REStensor.min(axis=axis)
esmatrix = np.where(np.abs(esmax)>np.abs(esmin), esmax, esmin)
es, esnull, RES = esmatrix[:,-1], esmatrix[:,:-1], REStensor[:,:,-1]
return es, esnull, hit_ind, RES
def ranking_metric_tensor(exprs, method, permutation_num, pos, neg, classes,
ascending, seed=None, skip_last=False):
"""Build shuffled ranking matrix when permutation_type eq to phenotype.
Works for 3d array.
:param exprs: gene_expression DataFrame, gene_name indexed.
:param str method: calculate correlation or ranking. methods including:
1. 'signal_to_noise' (s2n) or 'abs_signal_to_noise' (abs_s2n).
2. 't_test'.
3. 'ratio_of_classes' (also referred to as fold change).
4. 'diff_of_classes'.
5. 'log2_ratio_of_classes'.
:param int permuation_num: how many times of classes is being shuffled
:param str pos: one of labels of phenotype's names.
:param str neg: one of labels of phenotype's names.
:param list classes: a list of phenotype labels, to specify which column of
dataframe belongs to what class of phenotype.
:param bool ascending: bool. Sort ascending vs. descending.
:param seed: random_state seed
:param bool skip_last: (internal use only) whether to skip the permutation of the last rankings.
:return:
returns two 2d ndarray with shape (nperm, gene_num).
| cor_mat_indices: the indices of sorted and permutated (exclude last row) ranking matrix.
| cor_mat: sorted and permutated (exclude last row) ranking matrix.
"""
rs = np.random.RandomState(seed)
# S: samples, G: gene number
G, S = exprs.shape
# genes = exprs.index.values
expr_mat = exprs.values.T
perm_cor_tensor = np.tile(expr_mat, (permutation_num,1,1))
if skip_last:
# random shuffle on the first dim, the last matrix (expr_mat) is not shuffled
for arr in perm_cor_tensor[:-1]: rs.shuffle(arr)
else:
for arr in perm_cor_tensor: rs.shuffle(arr)
# metrics
classes = np.array(classes)
pos = classes == pos
neg = classes == neg
n_pos = np.sum(pos)
n_neg = np.sum(neg)
pos_cor_mean = perm_cor_tensor[:,pos,:].mean(axis=1)
neg_cor_mean = perm_cor_tensor[:,neg,:].mean(axis=1)
pos_cor_std = perm_cor_tensor[:,pos,:].std(axis=1, ddof=1)
neg_cor_std = perm_cor_tensor[:,neg,:].std(axis=1, ddof=1)
if method in ['signal_to_noise', 's2n']:
cor_mat = (pos_cor_mean - neg_cor_mean)/(pos_cor_std + neg_cor_std)
elif method in ['abs_signal_to_noise', 'abs_s2n']:
cor_mat = np.abs((pos_cor_mean - neg_cor_mean)/(pos_cor_std + neg_cor_std))
elif method == 't_test':
denom = np.sqrt((pos_cor_std**2)/n_pos + (neg_cor_std**2)/n_neg)
cor_mat = (pos_cor_mean - neg_cor_mean)/ denom
elif method == 'ratio_of_classes':
cor_mat = pos_cor_mean / neg_cor_mean
elif method == 'diff_of_classes':
cor_mat = pos_cor_mean - neg_cor_mean
elif method == 'log2_ratio_of_classes':
cor_mat = np.log2(pos_cor_mean / neg_cor_mean)
else:
logging.error("Please provide correct method name!!!")
raise LookupError("Input method: %s is not supported"%method)
# return matix[nperm+1, perm_cors]
cor_mat_ind = cor_mat.argsort()
# ndarray: sort in place
cor_mat.sort()
# genes_mat = genes.take(cor_mat_ind)
if ascending: return cor_mat_ind, cor_mat
# descending order of ranking and genes
# return genes_mat[:,::-1], cor_mat[:,::-1]
return cor_mat_ind[:, ::-1], cor_mat[:, ::-1]
def ranking_metric(df, method, pos, neg, classes, ascending):
"""The main function to rank an expression table. works for 2d array.
:param df: gene_expression DataFrame.
:param method: The method used to calculate a correlation or ranking. Default: 'log2_ratio_of_classes'.
Others methods are:
1. 'signal_to_noise' (s2n) or 'abs_signal_to_noise' (abs_s2n)
You must have at least three samples for each phenotype to use this metric.
The larger the signal-to-noise ratio, the larger the differences of the means (scaled by the standard deviations);
that is, the more distinct the gene expression is in each phenotype and the more the gene acts as a “class marker.”
2. 't_test'
Uses the difference of means scaled by the standard deviation and number of samples.
Note: You must have at least three samples for each phenotype to use this metric.
The larger the tTest ratio, the more distinct the gene expression is in each phenotype
and the more the gene acts as a “class marker.”
3. 'ratio_of_classes' (also referred to as fold change).
Uses the ratio of class means to calculate fold change for natural scale data.
4. 'diff_of_classes'
Uses the difference of class means to calculate fold change for natural scale data
5. 'log2_ratio_of_classes'
Uses the log2 ratio of class means to calculate fold change for natural scale data.
This is the recommended statistic for calculating fold change for log scale data.
:param str pos: one of labels of phenotype's names.
:param str neg: one of labels of phenotype's names.
:param dict classes: column id to group mapping.
:param bool ascending: bool or list of bool. Sort ascending vs. descending.
:return:
returns a pd.Series of correlation to class of each variable. Gene_name is index, and value is rankings.
visit here for more docs: http://software.broadinstitute.org/gsea/doc/GSEAUserGuideFrame.html
"""
# exclude any zero stds.
df_mean = df.groupby(by=classes, axis=1).mean()
df_std = df.groupby(by=classes, axis=1).std()
n_pos = np.sum(classes == pos)
n_neg = np.sum(classes == neg)
if method in ['signal_to_noise', 's2n']:
ser = (df_mean[pos] - df_mean[neg])/(df_std[pos] + df_std[neg])
elif method in ['abs_signal_to_noise', 'abs_s2n']:
ser = ((df_mean[pos] - df_mean[neg])/(df_std[pos] + df_std[neg])).abs()
elif method == 't_test':
ser = (df_mean[pos] - df_mean[neg])/ np.sqrt(df_std[pos]**2/n_pos+df_std[neg]**2/n_neg)
elif method == 'ratio_of_classes':
ser = df_mean[pos] / df_mean[neg]
elif method == 'diff_of_classes':
ser = df_mean[pos] - df_mean[neg]
elif method == 'log2_ratio_of_classes':
ser = np.log2(df_mean[pos] / df_mean[neg])
else:
logging.error("Please provide correct method name!!!")
raise LookupError("Input method: %s is not supported"%method)
ser = ser.sort_values(ascending=ascending)
return ser
def gsea_compute_tensor(data, gmt, n, weighted_score_type, permutation_type,
method, pheno_pos, pheno_neg, classes, ascending,
processes=1, seed=None, single=False, scale=False):
"""compute enrichment scores and enrichment nulls.
This function will split large array into smaller pieces to advoid memroy overflow.
:param data: preprocessed expression dataframe or a pre-ranked file if prerank=True.
:param dict gmt: all gene sets in .gmt file. need to call load_gmt() to get results.
:param int n: permutation number. default: 1000.
:param str method: ranking_metric method. see above.
:param str pheno_pos: one of labels of phenotype's names.
:param str pheno_neg: one of labels of phenotype's names.
:param list classes: a list of phenotype labels, to specify which column of dataframe belongs to what category of phenotype.
:param float weighted_score_type: default:1
:param bool ascending: sorting order of rankings. Default: False.
:param seed: random seed. Default: np.random.RandomState()
:param bool scale: if true, scale es by gene number.
:return: a tuple contains::
| zipped results of es, nes, pval, fdr.
| nested list of hit indices of input gene_list.
| nested list of ranked enrichment score of each input gene_sets.
| list of enriched terms
"""
w = weighted_score_type
subsets = sorted(gmt.keys())
genes_mat, cor_mat = data.index.values, data.values
base = 5 if data.shape[0] >= 5000 else 10
## phenotype permutation
np.random.seed(seed) # control the ranodm numbers
if permutation_type == "phenotype":
# shuffling classes and generate random correlation rankings
logging.debug("Start to permutate classes..............................")
if (n + 1) % base == 0: # n+1: last permute is for orignial ES calculation
num_bases = [ base ] * ((n + 1) // base)
skip_last = [0] * ( n // base) + [1] # last is not permuted
else:
num_bases = [ base ] * ((n + 1) // base) + [ (n +1) % base]
skip_last = [0] * ((n + 1) // base) + [ (n +1) % base]
random_seeds = np.random.randint(np.iinfo(np.int32).max, size=len(num_bases))
genes_ind = []
cor_mat = []
# split permutation array into smaller blocks to save memory
temp_rnk = Parallel(n_jobs=processes, require='sharedmem')(delayed(ranking_metric_tensor)(
data, method, b, pheno_pos, pheno_neg, classes, ascending, se, skip)
for b, skip, se in zip(num_bases, skip_last, random_seeds))
for k, temp in enumerate(temp_rnk):
gi, cor = temp
genes_ind.append(gi)
cor_mat.append(cor)
genes_ind, cor_mat = np.vstack(genes_ind), np.vstack(cor_mat)
# convert to tuple
genes_mat = (data.index.values, genes_ind)
logging.debug("Start to compute es and esnulls........................")
# Prerank, ssGSEA, GSEA
es = []
RES = []
hit_ind = []
esnull = []
temp_esnu = []
# split gmt dataset, too
block = ceil(len(subsets) / base)
random_seeds = np.random.randint(np.iinfo(np.int32).max, size=block)
# split large array into smaller blocks to avoid memory overflow
i, m = 1, 0
gmt_block = []
while i <= block:
# you have to reseed, or all your processes are sharing the same seed value
rs = random_seeds[i-1]
gmtrim = {k: gmt.get(k) for k in subsets[m:base * i]}
gmt_block.append(gmtrim)
m = base * i
i += 1
## if permutation_type == "phenotype": n = 0
## NOTE for GSEA: cor_mat is 2d array, it won't permute again when call enrichment_score_tensor
temp_esnu = Parallel(n_jobs=processes, require='sharedmem')(delayed(enrichment_score_tensor)(
genes_mat, cor_mat, gmtrim, w, n, rs, single, scale)
for gmtrim, rs in zip(gmt_block, random_seeds))
# esn is a list, don't need to use append method.
for si, temp in enumerate(temp_esnu):
# e, enu, hit, rune = temp.get()
e, enu, hit, rune = temp
esnull.append(enu)
es.append(e)
RES.append(rune)
hit_ind += hit
# concate results
es, esnull, RES = np.hstack(es), np.vstack(esnull), np.vstack(RES)
return gsea_significance(es, esnull), hit_ind, RES, subsets
def gsea_compute(data, gmt, n, weighted_score_type, permutation_type,
method, pheno_pos, pheno_neg, classes, ascending,
processes=1, seed=None, single=False, scale=False):
"""compute enrichment scores and enrichment nulls.
:param data: preprocessed expression dataframe or a pre-ranked file if prerank=True.
:param dict gmt: all gene sets in .gmt file. need to call load_gmt() to get results.
:param int n: permutation number. default: 1000.
:param str method: ranking_metric method. see above.
:param str pheno_pos: one of labels of phenotype's names.
:param str pheno_neg: one of labels of phenotype's names.
:param list classes: a list of phenotype labels, to specify which column of dataframe belongs to what category of phenotype.
:param float weighted_score_type: default:1
:param bool ascending: sorting order of rankings. Default: False.
:param seed: random seed. Default: np.random.RandomState()
:param bool scale: if true, scale es by gene number.
:return: a tuple contains::
| zipped results of es, nes, pval, fdr.
| nested list of hit indices of input gene_list.
| nested list of ranked enrichment score of each input gene_sets.
| list of enriched terms
"""
w = weighted_score_type
subsets = sorted(gmt.keys())
es = []
RES=[]
hit_ind=[]
esnull = [ [] for a in range(len(subsets)) ]
np.random.seed(seed) # control the ranodm numbers
logging.debug("Start to compute enrichment scores......................")
if permutation_type == "phenotype":
logging.debug("Start to permutate classes..............................")
# this version won't split large array into smaller ones
genes_mat, cor_mat = ranking_metric_tensor(exprs=data, method=method,
permutation_num=n+1,
pos=pheno_pos, neg=pheno_neg,
classes=classes,
ascending=ascending, seed=seed,
skip_last=True)
# compute es, esnulls. hits, RES
logging.debug("Start to compute enrichment nulls.......................")
es, esnull, hit_ind, RES = enrichment_score_tensor(gene_mat=genes_mat,
cor_mat=cor_mat,
gene_sets=gmt,
weighted_score_type=w,
nperm=n, seed=seed,
single=False, scale=False,)
else:
# Prerank, ssGSEA, GSEA with gene_set permutation
gl, cor_vec = data.index.values, data.values
logging.debug("Start to compute es and esnulls........................")
## this version don't split large array into smaller ones
# es, esnull, hit_ind, RES = enrichment_score_tensor(gene_mat=gl,
# cor_mat=cor_vec,
# gene_sets=gmt,
# weighted_score_type=w,
# nperm=n, rs=rs
# single=single, scale=scale)
temp_esnu=[]
# you have to reseed, or all your processes are sharing the same seed value
# np.random.seed(seed)
random_seeds= np.random.randint(np.iinfo(np.int32).max, size=len(subsets))
temp_esnu = Parallel(n_jobs=processes, require='sharedmem')(delayed(enrichment_score)(
gl, cor_vec, gmt.get(subset), w, n,
rs, single, scale)
for subset, rs in zip(subsets, random_seeds))
# esn is a list, don't need to use append method.
for si, temp in enumerate(temp_esnu):
e, enu, hit, rune = temp
esnull[si] = enu
es.append(e)
RES.append(rune)
hit_ind.append(hit)
return gsea_significance(es, esnull), hit_ind, RES, subsets
def normalize(es, esnull):
"""normalize the ES(S,pi) and the observed ES(S), separately rescaling
the positive and negative scores by dividing the mean of the ES(S,pi).
return: NES, NESnull
"""
nEnrichmentScores =np.zeros(es.shape)
nEnrichmentNulls=np.zeros(esnull.shape)
# esnullmean = np.zeros(es.shape)
# # calculate nESnulls
# for i in range(esnull.shape[0]):
# # NES
# enrNull = esnull[i]
# if es[i] >= 0:
# mes = enrNull[enrNull >= 0].mean()
# nEnrichmentScores[i] = es[i] / mes
# else:
# mes = enrNull[enrNull < 0 ].mean()
# nEnrichmentScores[i] = - es[i] / mes
# esnullmean[i] = mes
# # NESnull
# for j in range(esnull.shape[1]):
# if esnull[i,j] >= 0:
# nEnrichmentNulls[i,j] = esnull[i,j] / esnullmean[i]
# else:
# nEnrichmentNulls[i,j] = - esnull[i,j] / esnullmean[i]
esnull_pos = np.ma.MaskedArray(esnull, mask=(esnull<0)).mean(axis=1)
esnull_neg = np.ma.MaskedArray(esnull, mask=(esnull>=0)).mean(axis=1)
esnull_pos = np.array(esnull_pos)
esnull_neg = np.array(esnull_neg)
# NES
nEnrichmentScores = np.where(es>=0, es/esnull_pos, -es/esnull_neg)
# NES_NULL
nEnrichmentNulls = np.where(esnull>=0, esnull/esnull_pos[:,np.newaxis],
-esnull/esnull_neg[:,np.newaxis])
return nEnrichmentScores, nEnrichmentNulls
def gsea_pval(es, esnull):
"""Compute nominal p-value.
From article (PNAS):
estimate nominal p-value for S from esnull by using the positive
or negative portion of the distribution corresponding to the sign
of the observed ES(S).
"""
# to speed up, using numpy function to compute pval in parallel.
condlist = [ es < 0, es >=0]
choicelist = [(esnull < es.reshape(len(es),1)).sum(axis=1)/ (esnull < 0).sum(axis=1),
(esnull >= es.reshape(len(es),1)).sum(axis=1)/ (esnull >= 0).sum(axis=1)]
pvals = np.select(condlist, choicelist)
return pvals
def gsea_fdr(nEnrichmentScores, nEnrichmentNulls):
"""Create a histogram of all NES(S,pi) over all S and pi.
Use this null distribution to compute an FDR q value.
:param nEnrichmentScores: normalized ES
:param nEnrichmentNulls: normalized ESnulls
:return: FDR
"""
# FDR null distribution histogram
# vals = reduce(lambda x,y: x+y, nEnrichmentNulls, [])
# nvals = np.array(sorted(vals))
# or
nvals = np.sort(nEnrichmentNulls.flatten())
nnes = np.sort(nEnrichmentScores)
fdrs = []
# FDR computation
for i in range(len(nEnrichmentScores)):
nes = nEnrichmentScores[i]
# use the same pval method to calculate fdr
if nes >= 0:
allPos = int(len(nvals) - np.searchsorted(nvals, 0, side="left"))
allHigherAndPos = int(len(nvals) - np.searchsorted(nvals, nes, side="left"))
nesPos = len(nnes) - int(np.searchsorted(nnes, 0, side="left"))
nesHigherAndPos = len(nnes) - int(np.searchsorted(nnes, nes, side="left"))
# allPos = (nvals >= 0).sum()
# allHigherAndPos = (nvals >= nes).sum()
# nesPos = (nnes >=0).sum()
# nesHigherAndPos = (nnes >= nes).sum()
else:
allPos = int(np.searchsorted(nvals, 0, side="left"))
allHigherAndPos = int(np.searchsorted(nvals, nes, side="right"))
nesPos = int(np.searchsorted(nnes, 0, side="left"))
nesHigherAndPos = int(np.searchsorted(nnes, nes, side="right"))
# allPos = (nvals < 0).sum()
# allHigherAndPos = (nvals < nes).sum()
# nesPos = (nnes < 0).sum()
# nesHigherAndPos = (nnes < nes).sum()
try:
pi_norm = allHigherAndPos / float(allPos)
pi_obs = nesHigherAndPos / float(nesPos)
fdr = pi_norm / pi_obs
fdrs.append(fdr if fdr < 1 else 1.0)
except:
fdrs.append(1000000000.0)
logging.debug("Statistical testing finished.............................")
return fdrs
def gsea_significance(enrichment_scores, enrichment_nulls):
"""Compute nominal pvals, normalized ES, and FDR q value.
For a given NES(S) = NES* >= 0. The FDR is the ratio of the percentage of all (S,pi) with
NES(S,pi) >= 0, whose NES(S,pi) >= NES*, divided by the percentage of
observed S wih NES(S) >= 0, whose NES(S) >= NES*, and similarly if NES(S) = NES* <= 0.
"""
# For a zero by zero division (undetermined, results in a NaN),
np.seterr(divide='ignore', invalid='ignore')
# import warnings
# warnings.simplefilter("ignore")
es = np.array(enrichment_scores)
esnull = np.array(enrichment_nulls)
logging.debug("Start to compute pvals..................................")
# P-values.
pvals = gsea_pval(es, esnull).tolist()
logging.debug("Start to compute nes and nesnull........................")
# NES
nEnrichmentScores, nEnrichmentNulls = normalize(es, esnull)
logging.debug("Start to compute fdrs..................................")
# FDR
fdrs = gsea_fdr(nEnrichmentScores, nEnrichmentNulls)
#TODO: use multiple testing correction for ssgsea? ssGSEA2.0 use BH correction.
# https://github.com/broadinstitute/ssGSEA2.0/blob/master/src/ssGSEA2.0.R
# line 969
# fdrs, _ = multiple_testing_correction(pvals, alpha=0.05)
return zip(enrichment_scores, nEnrichmentScores, pvals, fdrs)
|
BioNinja/gseapy
|
gseapy/algorithm.py
|
Python
|
mit
| 31,007
|
[
"VisIt"
] |
6a24c93e9266d6eae4c89fc3dfe51624952b722d2618727933e37b6d516fcd5c
|
from bugle.shortcuts import render, redirect, get_object_or_404
from forms import BlastForm
from models import Blast, ImageUpload
from search import query_to_q_object
from django.contrib.auth.models import User
from django.http import HttpResponse, Http404
from django.utils import dateformat
from django.template import Template, Context
from django.db.models import Count
from django.utils import simplejson
from django.db.models import Q
import urllib
NUM_ON_HOMEPAGE = 100
class BlastBundle(object):
is_bundle = True
def __init__(self, blasts):
self.blasts = blasts
def created(self):
return self.blasts[0].created
def summary(self):
return ', '.join([b.short for b in self.blasts])
def prepare_blasts(blasts, user=None, bundle=False):
blasts = list(blasts.select_related('user'))
for blast in blasts:
blast.set_viewing_user(user)
if bundle:
# Now coagulate chains of blasts with 'short' set in to bundles
new_blasts = []
current_bundle = []
current_bundle_date = None
for blast in blasts:
if blast.short and (
not current_bundle_date
or blast.created.date() == current_bundle_date
):
current_bundle.append(blast)
current_bundle_date = blast.created.date()
else:
if current_bundle:
new_blasts.append(BlastBundle(current_bundle))
current_bundle = []
current_bundle_date = None
new_blasts.append(blast)
# Any stragglers?
if current_bundle:
new_blasts.append(BlastBundle(current_bundle))
blasts = new_blasts
return blasts
def homepage(request):
return render(request, 'homepage.html', {
'blasts': prepare_blasts(
Blast.objects.all().order_by('-created')[:NUM_ON_HOMEPAGE],
request.user, bundle=True
),
'more_blasts': Blast.objects.count() > NUM_ON_HOMEPAGE,
'initial_blast': request.GET.get('blast', ''),
})
def all(request):
return render(request, 'homepage.html', {
'blasts': prepare_blasts(
Blast.objects.all().order_by('-created'), request.user,
bundle = True
),
'more_blasts': False,
})
def blast(request, pk):
try:
b = prepare_blasts(
Blast.objects.filter(pk = pk), request.user
)[0]
except IndexError:
raise Http404
return render(request, 'blast.html', {
'blast': b,
'is_single': True
})
def post(request):
if request.user.is_anonymous():
return redirect('/login/')
form = BlastForm(request.POST, request.FILES)
if form.is_valid():
blast = form.save(commit = False)
if blast.message.startswith('?'):
return redirect('/search/?' + urllib.urlencode({
'q': blast.message[1:].strip(),
}))
else:
blast.message = blast.message.strip()
blast.user = request.user
blast.save()
return redirect('/')
def post_api(request):
username = request.POST.get('username', '')
try:
user = User.objects.get(username = username)
except User.DoesNotExist:
return HttpResponse('Invalid username')
if not user.check_password(request.POST.get('password', '')):
return HttpResponse('Invalid password')
message = request.POST.get('message', '').strip()
if not message:
return HttpResponse('Invalid message')
Blast.objects.create(
user = user,
message = message,
extended = request.POST.get('extended', ''),
short = request.POST.get('short', ''),
)
return HttpResponse('Message saved')
def post_image(request):
"""Let iPhone Twitter client users attach images"""
image_upload = ImageUpload.objects.create(
user = request.user,
attachment = request.FILES['media']
)
return HttpResponse('<mediaurl>image_upload:%s</mediaurl>' % image_upload.pk)
def delete(request):
if request.user.is_anonymous():
return redirect('/login/')
blast = get_object_or_404(Blast, pk = request.POST.get('id', ''))
if blast.user == request.user:
blast.delete()
return redirect('/%s/' % request.user)
def profile(request, username):
user = get_object_or_404(User, username = username)
return render(request, 'profile.html', {
'profile': user,
'blasts': prepare_blasts(
user.blasts.all(), request.user, bundle=False
),
'show_delete': request.user == user,
})
def mentions(request, username):
user = get_object_or_404(User, username = username)
blasts = Blast.objects.filter(
Q(mentioned_users = user) | Q(is_broadcast = True)
).distinct()
return render(request, 'mentions.html', {
'section': 'mentions',
'profile': user,
'blasts': prepare_blasts(blasts, request.user),
'initial_blast': '@%s ' % username,
})
def all_mentions(request):
return render(request, 'all_mentions.html', {
'section': 'mentions',
'blasts': prepare_blasts(
Blast.objects.filter(
Q(mentioned_users__isnull = False) | Q(is_broadcast = True)
).distinct(), request.user
)
})
def search(request):
q = request.GET.get('q', '').strip()
blasts = []
if q:
blasts = prepare_blasts(
Blast.objects.filter(query_to_q_object(q, 'message')),
request.user
)
return render(request, 'search.html', {
'q': q,
'blasts': blasts,
})
def pastes(request, username):
user = get_object_or_404(User, username = username)
blasts = user.blasts.exclude(extended = None).exclude(extended = '')
return render(request, 'pastes.html', {
'section': 'pastes',
'profile': user,
'blasts': prepare_blasts(blasts, request.user),
})
def all_pastes(request):
return render(request, 'all_pastes.html', {
'section': 'pastes',
'blasts': prepare_blasts(
Blast.objects.exclude(extended=None).exclude(extended=''),
request.user
)
})
def todos(request, username):
user = get_object_or_404(User, username = username)
blasts = Blast.objects.filter(is_todo = True).filter(
Q(user = user) | Q(mentioned_users = user) | Q(is_broadcast = True)
).distinct()
if request.user.username == username:
initial_blast = 'todo: '
else:
initial_blast = 'todo: @%s ' % username
return render(request, 'todos.html', {
'section': 'todos',
'profile': user,
'blasts': prepare_blasts(blasts, request.user),
'initial_blast': initial_blast,
})
def all_todos(request):
return render(request, 'all_todos.html', {
'section': 'todos',
'blasts': prepare_blasts(
Blast.objects.filter(is_todo = True), request.user
),
'initial_blast': 'todo: @all ',
})
def favourites(request, username):
user = get_object_or_404(User, username = username)
blasts = Blast.objects.filter(
favourited_by = user
)
return render(request, 'favourites.html', {
'section': 'favourites',
'profile': user,
'blasts': prepare_blasts(blasts, request.user),
})
def all_favourites(request):
return render(request, 'all_favourites.html', {
'section': 'favourites',
'blasts': prepare_blasts(
Blast.objects.filter(
favourited_by__isnull = False
).distinct(), request.user
)
})
def files(request, username):
user = get_object_or_404(User, username = username)
blasts = user.blasts.exclude(attachment = '')
return render(request, 'files.html', {
'section': 'files',
'profile': user,
'blasts': prepare_blasts(blasts, request.user),
})
def all_files(request):
return render(request, 'all_files.html', {
'section': 'files',
'blasts': prepare_blasts(
Blast.objects.exclude(attachment = ''), request.user
),
})
def stats(request):
blast_dates = list(Blast.objects.values_list('created', flat=True))
date_counts = {}
for date in blast_dates:
d = date.date()
date_counts[d] = date_counts.get(d, 0) + 1
top_dates = date_counts.items()
top_dates.sort(key = lambda x: x[0])
return render(request, 'stats.html', {
'top_users': User.objects.annotate(
num_blasts = Count('blasts')
).order_by('-num_blasts'),
'top_dates': top_dates,
})
def toggle(request):
if request.user.is_anonymous():
return redirect('/login/')
key = [k for k in request.POST.keys() if 'check' in k][0].split('.')[0]
# key will now be uncheck-45 or check-23
verb, pk = key.split('-')
blast = get_object_or_404(Blast, pk = pk)
# Check the user is allowed to modify this blast
blast.set_viewing_user(request.user)
if not blast.viewing_user_can_mark_done():
return HttpResponse('You are not allowed to check off that task')
if verb == 'check':
blast.done = True
if verb == 'uncheck':
blast.done = False
blast.save()
return redirect(request.POST.get('back_to', '') or '/')
def favourite(request):
if request.user.is_anonymous():
return redirect('/login/')
key = [k for k in request.POST.keys() if 'fave' in k][0].split('.')[0]
# key will now be uncheck-45 or check-23
verb, pk = key.split('-')
blast = get_object_or_404(Blast, pk = pk)
# Check the user is allowed to modify this blast
blast.set_viewing_user(request.user)
if not blast.user_can_favourite():
return HttpResponse('You are not allowed to favourite that')
if verb == 'fave':
blast.favourited_by.add(request.user)
if verb == 'notfave':
blast.favourited_by.remove(request.user)
return redirect(request.POST.get('back_to', '') or '/')
|
devfort/bugle
|
bugle_project/bugle/views.py
|
Python
|
bsd-2-clause
| 10,222
|
[
"BLAST"
] |
ec665aa2d7f58d856c04b0c7a8154a2bee68ab4740ca7b0b3d133e74595577c2
|
'''Setup script for theia.'''
from setuptools import setup, find_packages
setup(
#General
name = "theia",
version = "0.1.3",
author = u"Rapha\u00EBl Duque",
description = "3D Gaussian beam tracing and visualization",
license = "GNU GPLv3+",
url = "http://theia.hopto.org",
#Requires and entries
packages = find_packages(exclude='tests'),
install_requires = ['numpy>=1.7.0'],
scripts = ['bin/theia'],
#Metadata
author_email = "raphael.duque@polytechnique.edu"
)
|
bandang0/theia
|
setup.py
|
Python
|
gpl-3.0
| 505
|
[
"Gaussian"
] |
27fad8e8b5fb0860b9bb8ff5e82bc916ad4eb7edb550737bce78f81a407dd365
|
# Copyright 2012 Viewfinder Inc. All Rights Reserved.
"""Viewfinder viewpoint.
Viewpoints are collections of episodes. Every user has a 'default'
viewpoint which contains all uploaded episodes. Viewpoints of type
'event' are created when a user shares episodes. Additional viewpoints
of type 'thematic' may be created to encompass arbitrary content. For
example, a shared set of family events, funny things you've seen in
NYC, all concerts at an event space, or a photographer's fashion
photos.
Viewpoint ids are constructed from a variable-length-encoded integer
device id and a variable-length-encoded unique id from the device. The
final value is base64-hex encoded.
Viewpoints can have followers, which are users who have permission to
view and possibly modify the viewpoint's content. Episodes are added
to a viewpoint via the 'Episode' relation.
Viewpoint types include:
'default': every user has a default viewpoint to which all uploaded
episodes are published.
'event': event viewpoints are created every time an episode is shared.
The sharees are added to the viewpoint as followers.
'system': system-generated viewpoints used to welcome new users.
Viewpoint: aggregation of episodes.
"""
__authors__ = ['andy@emailscrubbed.com (Andy Kimball)',
'spencer@emailscrubbed.com (Spencer Kimball)']
import json
from tornado import gen
from viewfinder.backend.db import db_client, vf_schema
from viewfinder.backend.db.activity import Activity
from viewfinder.backend.db.asset_id import IdPrefix, ConstructAssetId, DeconstructAssetId, VerifyAssetId
from viewfinder.backend.db.base import DBObject
from viewfinder.backend.db.comment import Comment
from viewfinder.backend.db.hash_base import DBHashObject
from viewfinder.backend.db.friend import Friend
from viewfinder.backend.db.followed import Followed
from viewfinder.backend.db.follower import Follower
from viewfinder.backend.db.lock import Lock
from viewfinder.backend.db.lock_resource_type import LockResourceType
from viewfinder.backend.db.operation import Operation
from viewfinder.backend.db.viewpoint_lock_tracker import ViewpointLockTracker
@DBObject.map_table_attributes
class Viewpoint(DBHashObject):
"""Viewfinder viewpoint data object."""
__slots__ = []
_table = DBObject._schema.GetTable(vf_schema.VIEWPOINT)
DEFAULT = 'default'
EVENT = 'event'
SYSTEM = 'system'
TYPES = [DEFAULT, EVENT, SYSTEM]
"""Kinds of viewpoints."""
# Limit how many followers may be part of a viewpoint.
# Any change to this value should be coordinated with viewfinder client code
# to ensure that our clients catch this condition before sending to the server.
MAX_FOLLOWERS = 150
# Attributes that are projected for removed viewpoints.
_IF_REMOVED_ATTRIBUTES = set(['viewpoint_id',
'type',
'follower_id',
'user_id',
'timestamp',
'labels',
'adding_user_id'])
def __init__(self, viewpoint_id=None):
super(Viewpoint, self).__init__()
self.viewpoint_id = viewpoint_id
@classmethod
def ShouldScrubColumn(cls, name):
return name == 'title'
def IsDefault(self):
"""Returns true if the viewpoint is a default viewpoint."""
return self.type == Viewpoint.DEFAULT
def IsSystem(self):
"""Returns true if the viewpoint is a system viewpoint (ex. welcome conversation)."""
return self.type == Viewpoint.SYSTEM
@classmethod
def ConstructViewpointId(cls, device_id, uniquifier):
"""Returns a viewpoint id constructed from component parts. See
"ConstructAssetId" for details of the encoding.
"""
return ConstructAssetId(IdPrefix.Viewpoint, device_id, uniquifier)
@classmethod
def DeconstructViewpointId(cls, viewpoint_id):
"""Returns the components of a viewpoint id: device_id and
uniquifier.
"""
return DeconstructAssetId(IdPrefix.Viewpoint, viewpoint_id)
@classmethod
def ConstructCoverPhoto(cls, episode_id, photo_id):
"""Construct a cover_photo dict."""
assert episode_id is not None, episode_id
assert photo_id is not None, photo_id
return {'episode_id': episode_id, 'photo_id': photo_id}
@classmethod
@gen.coroutine
def VerifyViewpointId(cls, client, user_id, device_id, viewpoint_id):
"""Ensures that a client-provided viewpoint id is valid according
to the rules specified in VerifyAssetId.
"""
yield VerifyAssetId(client, user_id, device_id, IdPrefix.Viewpoint, viewpoint_id, has_timestamp=False)
@classmethod
@gen.engine
def AcquireLock(cls, client, viewpoint_id, callback):
"""Acquires a persistent global lock on the specified viewpoint."""
op = Operation.GetCurrent()
lock = yield gen.Task(Lock.Acquire, client, LockResourceType.Viewpoint, viewpoint_id,
op.operation_id)
ViewpointLockTracker.AddViewpointId(viewpoint_id)
callback(lock)
@classmethod
@gen.engine
def ReleaseLock(cls, client, viewpoint_id, lock, callback):
"""Releases a previously acquired lock on the specified viewpoint."""
yield gen.Task(lock.Release, client)
ViewpointLockTracker.RemoveViewpointId(viewpoint_id)
callback()
@classmethod
def AssertViewpointLockAcquired(cls, viewpoint_id):
"""Asserts that a lock has been acquired on the specified viewpoint."""
assert ViewpointLockTracker.HasViewpointId(viewpoint_id), \
'Lock for viewpoint, %s, should be acquired at this point but isn\'t.' % viewpoint_id
def IsCoverPhotoSet(self):
"""The cover photo is consider set if it is a non empty dict."""
if self.cover_photo is not None:
assert len(self.cover_photo) > 0, self
return True
return False
def MakeMetadataDict(self, follower):
"""Constructs a dictionary containing viewpoint metadata attributes, overridden by follower
attributes where required (as viewed by the follower himself). The format conforms to
VIEWPOINT_METADATA in json_schema.py.
"""
# Combine all attributes from the viewpoint and follower records.
vp_dict = self._asdict()
foll_dict = follower.MakeMetadataDict()
vp_dict.update(foll_dict)
# If the follower is removed from the viewpoint, then only project certain attributes.
if follower.IsRemoved():
for attr_name in vp_dict.keys():
if attr_name not in Viewpoint._IF_REMOVED_ATTRIBUTES:
del vp_dict[attr_name]
return vp_dict
@gen.coroutine
def AddFollowers(self, client, adding_user_id, existing_follower_ids, add_follower_ids, timestamp):
"""Adds the specified followers to this viewpoint, giving each follower CONTRIBUTE
permission on the viewpoint. The caller is responsible for ensuring that the user adding
the followers has permission to do so, and that the users to add are not yet followers.
Returns the newly added followers.
"""
@gen.coroutine
def _UpdateFollower(follower_id):
"""Create a new follower of this viewpoint in the database."""
follower = Follower(user_id=follower_id, viewpoint_id=self.viewpoint_id)
follower.timestamp = timestamp
follower.adding_user_id = adding_user_id
follower.viewed_seq = 0
follower.labels = [Follower.CONTRIBUTE]
# Create the follower and corresponding Followed record.
yield [gen.Task(follower.Update, client),
gen.Task(Followed.UpdateDateUpdated, client, follower_id, self.viewpoint_id,
old_timestamp=None, new_timestamp=timestamp)]
raise gen.Return(follower)
# Adding user should be an existing user.
assert adding_user_id is None or adding_user_id in existing_follower_ids, \
(adding_user_id, existing_follower_ids)
# Caller should never pass overlapping existing/add user id sets.
assert not any(follower_id in existing_follower_ids for follower_id in add_follower_ids), \
(existing_follower_ids, add_follower_ids)
# Ensure that friendships are created between the followers to add.
yield gen.Task(Friend.MakeFriendsWithGroup, client, add_follower_ids)
# Ensure that friendships are created with existing followers.
yield [gen.Task(Friend.MakeFriends, client, existing_id, add_id)
for existing_id in existing_follower_ids
for add_id in add_follower_ids]
# Add new followers to viewpoint with CONTRIBUTE permission.
add_followers = yield [_UpdateFollower(follower_id) for follower_id in add_follower_ids]
raise gen.Return(add_followers)
@gen.engine
def SelectCoverPhoto(self, client, exclude_posts_set, callback, activities_list=None, available_posts_dict=None):
"""Select a cover photo for this viewpoint.
This is used to select a cover photo if the current cover photo gets unshared.
The selection order here assumes that the order of episodes and photos in the
activities reflects the intended order of selection. This won't be true
of activities created before this change goes into production, but we've
decided to accept this small variation in cover photo selection for these
older activities because we don't think it's worth the extra complexity that it
would take to make selection of those more 'correct'.
Newer clients should order episodes and photos within share requests according to
cover photo selection priority.
Although older clients provide un-ordered lists of episodes/photos, a request transform
will order episodes/photos from those clients using the original mobile client
algorithm for cover photo selection. So we will select a new cover photo
assuming these activities are already ordered.
Caller may supply list of activities to use so that querying them for the viewpoint isn't needed.
Caller may supply dict of available (not Removed/Unshared) posts so querying for posts isn't needed.
Search process:
1) oldest to newest activity (share_new or share_existing activities).
2) within activity, first to last episode.
3) within episode, first to last photo.
Only photos which aren't unshared qualify.
Returns: cover_photo dict of selected photo or None if one if no selection is found.
"""
from viewfinder.backend.db.post import Post
batch_limit = 50
# cover_photo is not supported on default viewpoints.
assert not self.IsDefault(), self
@gen.coroutine
def _QueryAvailablePost(episode_id, photo_id):
if available_posts_dict is not None:
post = available_posts_dict.get(Post.ConstructPostId(episode_id, photo_id), None)
else:
post = yield gen.Task(Post.Query, client, episode_id, photo_id, col_names=None)
if post.IsRemoved():
post = None
raise gen.Return(post)
# Loop over activities starting from the oldest.
excl_start_key = None
while True:
if activities_list is None:
activities = yield gen.Task(Activity.RangeQuery,
client,
self.viewpoint_id,
range_desc=None,
limit=batch_limit,
col_names=None,
excl_start_key=excl_start_key,
scan_forward=False)
else:
activities = activities_list
for activity in activities:
if activity.name == 'share_new' or activity.name == 'share_existing':
args_dict = json.loads(activity.json)
# Now, loop through the episodes in the activity.
for ep_dict in args_dict['episodes']:
episode_id = ep_dict['episode_id']
# And loop through the photos in each episode.
for photo_id in ep_dict['photo_ids']:
# Save cost of query on a post that we know is UNSHARED.
if (episode_id, photo_id) not in exclude_posts_set:
post = yield _QueryAvailablePost(episode_id, photo_id)
if post is not None:
# If it hasn't been unshared, we're good to go.
callback(Viewpoint.ConstructCoverPhoto(episode_id, photo_id))
return
if activities_list is not None or len(activities) < batch_limit:
break
else:
excl_start_key = activities[-1].GetKey()
# No unshared photos found in this viewpoint.
callback(None)
@classmethod
def SelectCoverPhotoFromEpDicts(cls, ep_dicts):
"""Select a cover photo from the ep_dicts argument.
Selection assumes episodes and photos are ordered according to selection preference.
Returns: Either None if no photos found, or a cover_photo dict with selected photo.
"""
cover_photo = None
for ep_dict in ep_dicts:
if len(ep_dict['photo_ids']) > 0:
cover_photo = Viewpoint.ConstructCoverPhoto(ep_dict['episode_id'], ep_dict['photo_ids'][0])
break
return cover_photo
@classmethod
def IsCoverPhotoContainedInEpDicts(cls, cover_episode_id, cover_photo_id, ep_dicts):
"""Confirm existence of specified cover_photo in ep_dicts.
Return: True if specified cover_photo matches photo in ep_dicts. Otherwise, False."""
for ep_dict in ep_dicts:
if cover_episode_id == ep_dict['episode_id']:
for photo_id in ep_dict['photo_ids']:
if cover_photo_id == photo_id:
return True
# Not found.
return False
@classmethod
@gen.coroutine
def CreateDefault(cls, client, user_id, device_id, timestamp):
"""Creates and returns a new user's default viewpoint."""
from viewfinder.backend.db.user import User
vp_dict = {'viewpoint_id': Viewpoint.ConstructViewpointId(device_id, User.DEFAULT_VP_ASSET_ID),
'user_id': user_id,
'timestamp': timestamp,
'type': Viewpoint.DEFAULT}
viewpoint, _ = yield gen.Task(Viewpoint.CreateNew, client, **vp_dict)
raise gen.Return(viewpoint)
@classmethod
@gen.coroutine
def CreateNew(cls, client, **vp_dict):
"""Creates the viewpoint specified by 'vp_dict' and creates a follower relation between
the requesting user and the viewpoint with the ADMIN label. The caller is responsible for
checking permission to do this, as well as ensuring that the viewpoint does not yet exist
(or is just being identically rewritten).
Returns a tuple containing the newly created objects: (viewpoint, follower).
"""
tasks = []
# Create the viewpoint.
assert 'viewpoint_id' in vp_dict and 'user_id' in vp_dict and 'timestamp' in vp_dict, vp_dict
viewpoint = Viewpoint.CreateFromKeywords(**vp_dict)
viewpoint.last_updated = viewpoint.timestamp
viewpoint.update_seq = 0
tasks.append(gen.Task(viewpoint.Update, client))
# Create the follower and give all permissions, since it's the owner.
foll_dict = {'user_id': viewpoint.user_id,
'viewpoint_id': viewpoint.viewpoint_id,
'timestamp': viewpoint.timestamp,
'labels': list(Follower.PERMISSION_LABELS),
'viewed_seq': 0}
if viewpoint.IsDefault():
foll_dict['labels'].append(Follower.PERSONAL)
follower = Follower.CreateFromKeywords(**foll_dict)
tasks.append(gen.Task(follower.Update, client))
# Create the corresponding Followed record.
tasks.append(gen.Task(Followed.UpdateDateUpdated,
client,
viewpoint.user_id,
viewpoint.viewpoint_id,
old_timestamp=None,
new_timestamp=viewpoint.last_updated))
yield tasks
raise gen.Return((viewpoint, follower))
@classmethod
@gen.coroutine
def CreateNewWithFollowers(cls, client, follower_ids, **vp_dict):
"""Calls the "CreateWithFollower" method to create a viewpoint with a single follower
(the current user). Then, all users identified by "follower_ids" are added to that
viewpoint as followers. Ensure that every pair of followers is friends with each other.
The caller is responsible for checking permission to do this, as well as ensuring that
the viewpoint and followers do not yet exist (or are just being identically rewritten).
Returns a tuple containing the newly created objects: (viewpoint, followers). The
followers list includes the owner.
"""
# Create the viewpoint with the current user as its only follower.
viewpoint, owner_follower = yield Viewpoint.CreateNew(client, **vp_dict)
# Now add the additional followers.
followers = yield viewpoint.AddFollowers(client,
vp_dict['user_id'],
[vp_dict['user_id']],
follower_ids,
viewpoint.timestamp)
followers.append(owner_follower)
raise gen.Return((viewpoint, followers))
@classmethod
@gen.coroutine
def QueryWithFollower(cls, client, user_id, viewpoint_id):
"""Queries the specified viewpoint and follower and returns them as a (viewpoint, follower)
tuple.
"""
viewpoint, follower = yield [gen.Task(Viewpoint.Query, client, viewpoint_id, None, must_exist=False),
gen.Task(Follower.Query, client, user_id, viewpoint_id, None, must_exist=False)]
assert viewpoint is not None or follower is None, (viewpoint, follower)
raise gen.Return((viewpoint, follower))
@classmethod
@gen.engine
def QueryEpisodes(cls, client, viewpoint_id, callback, excl_start_key=None, limit=None):
"""Queries episodes belonging to the viewpoint (up to 'limit' total) for
the specified 'viewpoint_id'. Starts with episodes having a key greater
than 'excl_start_key'. Returns a tuple with the array of episodes and
the last queried key.
"""
from viewfinder.backend.db.episode import Episode
# Query the viewpoint_id secondary index with excl_start_key & limit.
query_expr = ('episode.viewpoint_id={id}', {'id': viewpoint_id})
start_index_key = db_client.DBKey(excl_start_key, None) if excl_start_key is not None else None
episode_keys = yield gen.Task(Episode.IndexQueryKeys, client, query_expr,
start_index_key=start_index_key, limit=limit)
episodes = yield gen.Task(Episode.BatchQuery, client, episode_keys, None)
callback((episodes, episode_keys[-1].hash_key if len(episode_keys) > 0 else None))
@classmethod
@gen.coroutine
def QueryFollowers(cls, client, viewpoint_id, excl_start_key=None, limit=None):
"""Query followers belonging to the viewpoint (up to 'limit' total) for
the specified 'viewpoint_id'. The query is for followers starting with
(but excluding) 'excl_start_key'. The callback is invoked with an array
of follower objects and the last queried key.
"""
# Query the viewpoint_id secondary index with excl_start_key & limit.
query_expr = ('follower.viewpoint_id={id}', {'id': viewpoint_id})
start_index_key = db_client.DBKey(excl_start_key, viewpoint_id) if excl_start_key is not None else None
follower_keys = yield gen.Task(Follower.IndexQueryKeys,
client,
query_expr,
start_index_key=start_index_key,
limit=limit)
last_key = follower_keys[-1].hash_key if len(follower_keys) > 0 else None
followers = yield gen.Task(Follower.BatchQuery, client, follower_keys, None)
raise gen.Return((followers, last_key))
@classmethod
def QueryFollowerIds(cls, client, viewpoint_id, callback, excl_start_key=None, limit=None):
"""Query followers belonging to the viewpoint (up to 'limit' total) for
the specified 'viewpoint_id'. The query is for followers starting with
(but excluding) 'excl_start_key'. The callback is invoked with an array
of follower user ids and the last queried key.
"""
def _OnQueryFollowerKeys(follower_keys):
follower_ids = [key.hash_key for key in follower_keys]
last_key = follower_ids[-1] if len(follower_ids) > 0 else None
callback((follower_ids, last_key))
# Query the viewpoint_id secondary index with excl_start_key & limit.
query_expr = ('follower.viewpoint_id={id}', {'id': viewpoint_id})
start_index_key = db_client.DBKey(excl_start_key, viewpoint_id) if excl_start_key is not None else None
Follower.IndexQueryKeys(client, query_expr, callback=_OnQueryFollowerKeys,
start_index_key=start_index_key, limit=limit)
@classmethod
def VisitFollowerIds(cls, client, viewpoint_id, visitor, callback, consistent_read=False):
"""Visit all followers of the specified viewpoint and invoke the
"visitor" function with each follower id. See VisitIndexKeys for
additional detail.
"""
def _OnVisit(follower_key, visit_callback):
visitor(follower_key.hash_key, visit_callback)
query_expr = ('follower.viewpoint_id={id}', {'id': viewpoint_id})
Follower.VisitIndexKeys(client, query_expr, _OnVisit, callback, consistent_read=consistent_read)
@classmethod
def QueryActivities(cls, client, viewpoint_id, callback, excl_start_key=None, limit=None):
"""Queries activities belonging to the viewpoint (up to 'limit' total) for
the specified 'viewpoint_id'. Starts with activities having a key greater
than 'excl_start_key'. Returns a tuple with the array of activities and
the last queried key.
"""
def _OnQueryActivities(activities):
callback((activities, activities[-1].activity_id if len(activities) > 0 else None))
Activity.RangeQuery(client, viewpoint_id, range_desc=None, limit=limit, col_names=None,
callback=_OnQueryActivities, excl_start_key=excl_start_key)
@classmethod
def QueryComments(cls, client, viewpoint_id, callback, excl_start_key=None, limit=None):
"""Queries comments belonging to the viewpoint (up to 'limit' total) for
the specified 'viewpoint_id'. Starts with comments having a key greater
than 'excl_start_key'. Returns a tuple with the array of comments and
the last queried key.
"""
def _OnQueryComments(comments):
callback((comments, comments[-1].comment_id if len(comments) > 0 else None))
Comment.RangeQuery(client, viewpoint_id, range_desc=None, limit=limit, col_names=None,
callback=_OnQueryComments, excl_start_key=excl_start_key)
@classmethod
@gen.engine
def AddFollowersOperation(cls, client, callback, activity, user_id, viewpoint_id, contacts):
"""Adds contacts as followers to the specified viewpoint. Notifies all viewpoint
followers about the new followers.
"""
# TODO(Andy): Remove this once the AddFollowersOperation is in production.
from viewfinder.backend.op.add_followers_op import AddFollowersOperation
AddFollowersOperation.Execute(client, activity, user_id, viewpoint_id, contacts, callback=callback)
@classmethod
@gen.engine
def UpdateOperation(cls, client, callback, act_dict, vp_dict):
"""Updates viewpoint metadata."""
# TODO(Andy): Remove this once the UpdateViewpointOperation is in production.
from viewfinder.backend.op.update_viewpoint_op import UpdateViewpointOperation
user_id = vp_dict.pop('user_id')
UpdateViewpointOperation.Execute(client, act_dict, user_id, vp_dict, callback=callback)
|
0359xiaodong/viewfinder
|
backend/db/viewpoint.py
|
Python
|
apache-2.0
| 23,638
|
[
"VisIt"
] |
5a7c86bb1cd554b6df20fab7c24f9d97bbc8ce9d5e42459ccec15290eaf821a5
|
import itertools
from collections import defaultdict, namedtuple
from copy import copy
from .block import Block
from .common import Tagged, fail
from .errors import Errors
from .env import env
from .graph import Graph
from .loop import LoopNestTree
from .symbol import Symbol
from .synth import make_synth_params
from .type import Type
from .irvisitor import IRVisitor
from .ir import CONST, JUMP, CJUMP, MCJUMP, PHIBase
from .signal import Signal
from logging import getLogger
logger = getLogger(__name__)
FunctionParam = namedtuple('FunctionParam', ('sym', 'copy', 'defval'))
class Scope(Tagged):
ordered_scopes = []
TAGS = {
'global', 'function', 'class', 'method', 'ctor',
'callable', 'returnable', 'mutable', 'inherited', 'predicate',
'testbench', 'pure',
'module', 'worker', 'instantiated',
'lib', 'namespace', 'builtin', 'decorator',
'port', 'typeclass',
'function_module',
'inlinelib',
'package', 'directory'
}
scope_id = 0
@classmethod
def create(cls, parent, name, tags, lineno=0, origin=None):
if name is None:
name = "unnamed_scope" + str(cls.scope_id)
s = Scope(parent, name, tags, lineno, cls.scope_id)
if s.name in env.scopes:
env.append_scope(s)
fail((env.scope_file_map[s], lineno), Errors.REDEFINED_NAME, {name})
env.append_scope(s)
if origin:
s.origin = origin
env.scope_file_map[s] = env.scope_file_map[origin]
cls.scope_id += 1
return s
@classmethod
def create_namespace(cls, parent, name, tags, path=None):
tags |= {'namespace'}
namespace = Scope.create(parent, name, tags, lineno=1)
namesym = namespace.add_sym('__name__', typ=Type.str_t)
if namespace.is_global():
namespace.constants[namesym] = CONST('__main__')
else:
namespace.constants[namesym] = CONST(namespace.name)
if path:
filesym = namespace.add_sym('__file__', typ=Type.str_t)
namespace.constants[filesym] = CONST(path)
return namespace
@classmethod
def destroy(cls, scope):
assert scope.name in env.scopes
env.remove_scope(scope)
@classmethod
def get_scopes(cls, bottom_up=True, with_global=False, with_class=False, with_lib=False):
def ret_helper():
scopes = cls.ordered_scopes[:]
scopes = [s for s in scopes if not s.is_pure()]
# Exclude an no code scope
scopes = [s for s in scopes
if not (s.is_lib() and s.is_function())
and not (s.is_lib() and s.is_method())
and not s.is_builtin()
and not s.is_decorator()
and not s.is_typeclass()
and not s.is_directory()]
if not with_global:
scopes.remove(Scope.global_scope())
if not with_class:
scopes = [s for s in scopes if not s.is_class()]
if not with_lib:
scopes = [s for s in scopes if not s.is_lib()]
if bottom_up:
scopes.reverse()
return scopes
cls.reorder_scopes()
cls.ordered_scopes = sorted(env.scopes.values())
return ret_helper()
@classmethod
def reorder_scopes(cls):
# hierarchical order
def set_h_order(scope, order):
if order > scope.order[0]:
scope.order = (order, -1)
else:
return
order += 1
for s in scope.children:
set_h_order(s, order)
for s in env.scopes.values():
if s.is_namespace():
s.order = (0, 0)
for f in s.children:
set_h_order(f, 1)
if env.depend_graph:
nodes = env.depend_graph.bfs_ordered_nodes()
for s in nodes:
d_order = nodes.index(s)
preds = env.depend_graph.preds(s)
if preds:
preds_max_order = max([nodes.index(p) for p in preds])
else:
preds_max_order = 0
if d_order < preds_max_order:
s.order = (s.order[0], d_order)
else:
s.order = (s.order[0], preds_max_order + 1)
@classmethod
def get_class_scopes(cls, bottom_up=True):
return [s for s in cls.get_scopes(bottom_up=bottom_up, with_class=True) if s.is_class()]
@classmethod
def global_scope(cls):
return env.scopes[env.global_scope_name]
@classmethod
def is_unremovable(cls, s):
return s.is_instantiated() or (s.parent and s.parent.is_instantiated())
def __init__(self, parent, name, tags, lineno, scope_id):
super().__init__(tags)
self.name = name
self.orig_name = name
self.parent = parent
if parent:
self.name = parent.name + "." + name
parent.append_child(self)
self.lineno = lineno
self.scope_id = scope_id
self.symbols = {}
self.params = []
self.return_type = None
self.entry_block = None
self.exit_block = None
self.children = []
self.bases = []
self.origin = None
self.subs = []
self.usedef = None
self.loop_tree = LoopNestTree()
self.callee_instances = defaultdict(set)
#self.stgs = []
self.order = (-1, -1)
self.block_count = 0
self.workers = []
self.worker_owner = None
self.asap_latency = -1
self.type_args = []
self.synth_params = make_synth_params()
self.constants = {}
self.branch_graph = Graph()
def __str__(self):
s = '\n================================\n'
tags = ", ".join([att for att in self.tags])
if self.parent:
s += "Scope: {}, parent={} ({})\n".format(self.orig_name, self.parent.name, tags)
else:
s += "Scope: {} ({})\n".format(self.orig_name, tags)
s += ", ".join([str(sym) for sym in self.symbols])
s += "\n"
s += '================================\n'
s += 'Parameters\n'
for p, _, val in self.params:
if val:
s += '{}:{} = {}\n'.format(p, repr(p.typ), val)
else:
s += '{}:{}\n'.format(p, repr(p.typ))
s += "\n"
s += 'Return\n'
if self.return_type:
s += '{}\n'.format(repr(self.return_type))
else:
s += 'None\n'
s += 'Synthesis\n{}\n'.format(self.synth_params)
s += '================================\n'
for blk in self.traverse_blocks():
s += str(blk)
s += '================================\n'
for r in self.loop_tree.traverse():
s += str(r)
s += '================================\n'
return s
def __repr__(self):
return self.name
def __lt__(self, other):
if self.order < other.order:
return True
elif self.order > other.order:
return False
elif self.order == other.order:
return self.lineno < other.lineno
def clone_symbols(self, scope, postfix=''):
symbol_map = {}
for orig_sym in self.symbols.values():
new_sym = orig_sym.clone(scope, postfix)
assert new_sym.name not in scope.symbols
scope.symbols[new_sym.name] = new_sym
symbol_map[orig_sym] = new_sym
return symbol_map
def clone_blocks(self, scope):
block_map = {}
stm_map = {}
for b in self.traverse_blocks():
block_map[b] = b.clone(scope, stm_map)
for b in self.traverse_blocks():
b_clone = block_map[b]
b_clone.reconnect(block_map)
# jump target
for stm in stm_map.values():
if stm.is_a(JUMP):
stm.target = block_map[stm.target]
elif stm.is_a(CJUMP):
stm.true = block_map[stm.true]
stm.false = block_map[stm.false]
elif stm.is_a(MCJUMP):
stm.targets = [block_map[t] for t in stm.targets]
return block_map, stm_map
def clone(self, prefix, postfix, parent=None):
#if self.is_lib():
# return
name = prefix + '_' if prefix else ''
name += self.orig_name
name = name + '_' + postfix if postfix else name
parent = self.parent if parent is None else parent
s = Scope.create(parent, name, set(self.tags), self.lineno, origin=self)
logger.debug('CLONE {} {}'.format(self.name, s.name))
s.children = list(self.children)
# TODO: should be reconsidered the owned policy
#for child in s.children:
# child.parent = s
s.bases = list(self.bases)
s.subs = list(self.subs)
s.type_args = list(self.type_args)
symbol_map = self.clone_symbols(s)
s.params = []
for p, cp, defval in self.params:
param = FunctionParam(symbol_map[p],
symbol_map[cp],
defval.clone() if defval else None)
s.params.append(param)
s.return_type = self.return_type
block_map, stm_map = self.clone_blocks(s)
s.entry_block = block_map[self.entry_block]
s.exit_block = block_map[self.exit_block]
s.usedef = None
for n in self.branch_graph.nodes:
if n in stm_map:
new_n = stm_map[n]
s.branch_graph.add_node(new_n)
for n0, n1, _ in self.branch_graph.edges:
if n0 in stm_map and n1 in stm_map:
new_n0 = stm_map[n0]
new_n1 = stm_map[n1]
if new_n0 < new_n1:
s.branch_graph.add_edge(new_n0, new_n1)
else:
s.branch_graph.add_edge(new_n1, new_n0)
if self.is_function_module():
new_callee_instances = defaultdict(set)
for func_sym, inst_names in self.callee_instances.items():
new_func_sym = symbol_map[func_sym]
new_callee_instances[new_func_sym] = copy(inst_names)
s.callee_instances = new_callee_instances
s.order = self.order
sym_replacer = SymbolReplacer(symbol_map)
sym_replacer.process(s)
#s.parent.append_child(s)
#env.append_scope(s)
s.cloned_symbols = symbol_map
s.cloned_blocks = block_map
s.cloned_stms = stm_map
s.synth_params = self.synth_params.copy()
# TODO:
#s.loop_tree = None
#s.constants
return s
def inherit(self, name, overrides):
sub = Scope.create(self.parent, name, set(self.tags), self.lineno, origin=self)
sub.bases.append(self)
sub.symbols = copy(self.symbols)
sub.workers = copy(self.workers)
sub.children = copy(self.children)
sub.exit_block = sub.entry_block = Block(sub)
sub.add_tag('inherited')
#env.append_scope(sub)
self.subs.append(sub)
for method in overrides:
sub.children.remove(method)
sub_method = method.clone('', '', sub)
_in_self_sym, self_sym, _ = sub_method.params[0]
assert self_sym.name == 'self'
assert self_sym.typ.get_scope() is self
self_typ = Type.object(sub)
_in_self_sym.set_type(self_typ)
self_sym.set_type(self_typ)
method_sym = sub.symbols[sub_method.orig_name]
sub_method_sym = method_sym.clone(sub)
sub_method_sym.typ.set_scope(sub_method)
sub.symbols[sub_method.orig_name] = sub_method_sym
return sub
def find_child(self, name):
for child in self.children:
if child.orig_name == name:
return child
return None
def find_parent_scope(self, name):
if self.find_child(name):
return self
elif self.parent:
return self.parent.find_parent_scope(name)
else:
return None
def find_scope(self, name):
if self.orig_name == name:
return self
child = self.find_child(name)
if child:
return child
if self.parent:
return self.parent.find_scope(name)
return None
def add_sym(self, name, tags=None, typ=Type.undef_t):
if name in self.symbols:
raise RuntimeError("symbol '{}' is already registered ".format(name))
sym = Symbol(name, self, tags, typ)
self.symbols[name] = sym
return sym
def add_temp(self, temp_name=None, tags=None, typ=Type.undef_t):
name = Symbol.unique_name(temp_name)
if tags:
tags.add('temp')
else:
tags = {'temp'}
return self.add_sym(name, tags, typ)
def add_condition_sym(self):
return self.add_temp(Symbol.condition_prefix, {'condition'}, typ=Type.bool_t)
def add_param_sym(self, param_name, typ=Type.undef_t):
name = '{}_{}'.format(Symbol.param_prefix, param_name)
return self.add_sym(name, {'param'}, typ)
def find_param_sym(self, param_name):
name = '{}_{}'.format(Symbol.param_prefix, param_name)
return self.find_sym(name)
def add_return_sym(self):
return self.add_sym(Symbol.return_prefix, ['return'])
def del_sym(self, name):
if name in self.symbols:
del self.symbols[name]
def import_sym(self, sym):
if sym.name in self.symbols and sym is not self.symbols[sym.name]:
raise RuntimeError("symbol '{}' is already registered ".format(sym.name))
self.symbols[sym.name] = sym
def find_sym(self, name):
names = name.split('.')
if len(names) > 1:
return self.find_sym_r(names)
if name in self.symbols:
return self.symbols[name]
elif self.parent:
if self.parent.is_class():
# look-up from bases
for base in self.bases:
found = base.find_sym(name)
if found:
break
else:
# otherwise, look-up from global
found = env.outermost_scope().find_sym(name)
if not found:
found = self.global_scope().find_sym(name)
else:
found = self.parent.find_sym(name)
return found
return None
def find_sym_r(self, names):
name = names[0]
sym = self.find_sym(name)
if sym and len(names) > 1:
if sym.typ.is_containable():
return sym.typ.get_scope().find_sym_r(names[1:])
else:
return None
return sym
def has_sym(self, name):
return name in self.symbols
def gen_sym(self, name):
if self.has_sym(name):
sym = self.symbols[name]
else:
sym = self.add_sym(name)
return sym
def rename_sym(self, old, new):
assert old in self.symbols
sym = self.symbols[old]
del self.symbols[old]
sym.name = new
self.symbols[new] = sym
return sym
def inherit_sym(self, orig_sym, new_name):
#assert orig_sym.scope is self
if self.has_sym(new_name):
new_sym = self.symbols[new_name]
else:
new_sym = self.add_sym(new_name, set(orig_sym.tags), typ=orig_sym.typ.clone())
if orig_sym.ancestor:
new_sym.ancestor = orig_sym.ancestor
else:
new_sym.ancestor = orig_sym
return new_sym
def qualified_name(self):
if self.name.startswith(env.global_scope_name):
name = self.name[len(env.global_scope_name) + 1:]
else:
name = self.name
return name.replace('.', '_')
def set_entry_block(self, blk):
assert self.entry_block is None
self.entry_block = blk
def set_exit_block(self, blk):
self.exit_block = blk
def traverse_blocks(self):
assert len(self.entry_block.preds) == 0
yield from self.entry_block.traverse()
def replace_block(self, old, new):
new.preds = old.preds[:]
new.preds_loop = old.preds_loop[:]
new.succs = old.succs[:]
new.succs_loop = old.succs_loop[:]
for blk in self.traverse_blocks():
if blk is old:
for pred in old.preds:
pred.replace_succ(old, new)
pred.replace_succ_loop(old, new)
for succ in old.succs:
succ.replace_pred(old, new)
succ.replace_pred_loop(old, new)
def append_child(self, child_scope):
if child_scope not in self.children:
self.children.append(child_scope)
def add_param(self, sym, copy, defval):
self.params.append(FunctionParam(sym, copy, defval))
def has_param(self, sym):
name = sym.name.split('#')[0]
for p, _, _ in self.params:
if p.name == name:
return True
return False
def get_param_index(self, sym):
name = sym.name.split('#')[0]
for i, (p, _, _) in enumerate(self.params):
if p.name == name:
return i
return -1
def append_callee_instance(self, callee_scope, inst_name):
self.callee_instances[callee_scope].add(inst_name)
def dfgs(self, bottom_up=False):
def collect_dfg(dfg, ds):
ds.append(dfg)
for c in dfg.children:
collect_dfg(c, ds)
ds = []
collect_dfg(self.top_dfg, ds)
return ds
def find_ctor(self):
assert self.is_class()
for child in self.children:
if child.is_ctor():
return child
return None
def is_global(self):
return self.name == env.global_scope_name
def is_containable(self):
return self.is_namespace() or self.is_class()
def is_subclassof(self, clazz):
if self is clazz:
return True
for base in self.bases:
if base is clazz:
return True
if base.is_subclassof(clazz):
return True
return False
def class_fields(self):
assert self.is_class()
class_fields = {}
if self.bases:
for base in self.bases:
fields = base.class_fields()
class_fields.update(fields)
class_fields.update(self.symbols)
return class_fields
def register_worker(self, worker_scope, worker_args):
for i, (w, _) in enumerate(self.workers[:]):
if w is worker_scope:
self.workers.pop(i)
self.workers.append((worker_scope, worker_args))
assert worker_scope.worker_owner is None or worker_scope.worker_owner is self
worker_scope.worker_owner = self
def reset_loop_tree(self):
self.loop_tree = LoopNestTree()
def top_region(self):
return self.loop_tree.root
def parent_region(self, r):
return self.loop_tree.get_parent_of(r)
def child_regions(self, r):
return self.loop_tree.get_children_of(r)
def set_top_region(self, r):
self.loop_tree.root = r
self.loop_tree.add_node(r)
def append_child_regions(self, parent, children):
for child in children:
self.loop_tree.add_edge(parent, child)
def append_sibling_region(self, r, new_r):
parent = self.loop_tree.get_parent_of(r)
self.loop_tree.add_edge(parent, new_r)
def remove_region(self, r):
parent = self.loop_tree.get_parent_of(r)
self.loop_tree.del_edge(parent, r, auto_del_node=False)
self.loop_tree.del_node(r)
def find_region(self, blk):
for r in self.loop_tree.traverse():
if blk in r.blocks():
return r
return None
def remove_block_from_region(self, blk):
if not self.loop_tree.root:
return
r = self.find_region(blk)
r.remove_body(blk)
def is_leaf_region(self, r):
return self.loop_tree.is_leaf(r)
def traverse_regions(self, reverse=False):
return self.loop_tree.traverse(reverse)
def add_branch_graph_edge(self, k, vs):
assert isinstance(vs, list)
self.branch_graph.add_node(k)
for v in itertools.chain(*vs):
if k < v:
self.branch_graph.add_edge(k, v)
else:
self.branch_graph.add_edge(v, k)
def has_branch_edge(self, stm0, stm1):
if stm0 < stm1:
return self.branch_graph.find_edge(stm0, stm1) is not None
else:
return self.branch_graph.find_edge(stm1, stm0) is not None
class SymbolReplacer(IRVisitor):
def __init__(self, sym_map):
super().__init__()
self.sym_map = sym_map
def visit_TEMP(self, ir):
if ir.sym in self.sym_map:
ir.sym = self.sym_map[ir.sym]
else:
logger.debug('WARNING: not found {}'.format(ir.sym))
def visit_ATTR(self, ir):
self.visit(ir.exp)
if ir.attr in self.sym_map:
ir.attr = self.sym_map[ir.attr]
else:
logger.debug('WARNING: not found {}'.format(ir.attr))
def visit_ARRAY(self, ir):
if ir.sym in self.sym_map:
ir.sym = self.sym_map[ir.sym]
for item in ir.items:
self.visit(item)
self.visit(ir.repeat)
def write_dot(scope, tag):
try:
import pydot
except ImportError:
raise
# force disable debug mode to simplify the caption
debug_mode = env.dev_debug_mode
env.dev_debug_mode = False
name = scope.orig_name + '_' + str(tag)
g = pydot.Dot(name, graph_type='digraph')
def get_text(blk):
s = blk.name + '\n'
for stm in blk.stms:
s += str(stm).replace('\n', '\l') + '\l'
s = s.replace(':', '_')
return s
blk_map = {blk: pydot.Node(get_text(blk), shape='box') for blk in scope.traverse_blocks()}
for n in blk_map.values():
g.add_node(n)
for blk in blk_map.keys():
from_node = blk_map[blk]
for succ in blk.succs:
to_node = blk_map[succ]
if succ in blk.succs_loop:
g.add_edge(pydot.Edge(from_node, to_node, color='red'))
else:
g.add_edge(pydot.Edge(from_node, to_node))
#for pred in blk.preds:
# to_node = blk_map[pred]
# if pred in blk.preds_loop:
# g.add_edge(pydot.Edge(from_node, to_node, style='dashed', color='red'))
# else:
# g.add_edge(pydot.Edge(from_node, to_node, style='dashed'))
g.write_png('{}/{}.png'.format(env.debug_output_dir, name))
env.dev_debug_mode = debug_mode
|
ktok07b6/polyphony
|
polyphony/compiler/scope.py
|
Python
|
mit
| 23,258
|
[
"VisIt"
] |
fb565ac97768d119dd9088935b8dbceb971cbc4d4131a0cb6e26ccd35c635f3f
|
#!/usr/bin/env python
import Bio.PDB
import warnings
from Bio import BiopythonWarning
warnings.simplefilter('ignore', BiopythonWarning)
import sys
import os
import re
import tempfile
import numpy as np
from Bio.SVDSuperimposer import SVDSuperimposer
from math import sqrt
from argparse import ArgumentParser
import itertools
import subprocess
def parse_fnat(fnat_out):
fnat=-1;
nat_correct=-1
nat_total=-1
fnonnat=-1
nonnat_count=-1
model_total=-1
inter=[]
for line in fnat_out.split("\n"):
# print line
line=line.rstrip('\n')
match=re.search(r'NATIVE: (\d+)(\w) (\d+)(\w)',line)
if(re.search(r'^Fnat',line)):
list=line.split(' ')
fnat=float(list[3])
nat_correct=int(list[1])
nat_total=int(list[2])
elif(re.search(r'^Fnonnat',line)):
list=line.split(' ')
fnonnat=float(list[3])
nonnat_count=int(list[1])
model_total=int(list[2])
elif(match):
#print line
res1=match.group(1)
chain1=match.group(2)
res2=match.group(3)
chain2=match.group(4)
# print res1 + ' ' + chain1 + ' ' + res2 + ' ' + chain2
inter.append(res1 + chain1)
inter.append(res2 + chain2)
return (fnat,nat_correct,nat_total,fnonnat,nonnat_count,model_total,inter)
def capri_class(fnat,iRMS,LRMS,capri_peptide=False):
if capri_peptide:
if(fnat < 0.2 or (LRMS > 5.0 and iRMS > 2.0)):
return 'Incorrect'
elif((fnat >= 0.2 and fnat < 0.5) and (LRMS <= 5.0 or iRMS <= 2.0) or (fnat >= 0.5 and LRMS > 2.0 and iRMS > 1.0)):
return 'Acceptable'
elif((fnat >= 0.5 and fnat < 0.8) and (LRMS <= 2.0 or iRMS <= 1.0) or (fnat >= 0.8 and LRMS > 1.0 and iRMS > 0.5)):
return 'Medium'
elif(fnat >= 0.8 and (LRMS <= 1.0 or iRMS <= 0.5)):
return 'High'
else:
return 'Undef'
else:
if(fnat < 0.1 or (LRMS > 10.0 and iRMS > 4.0)):
return 'Incorrect'
elif((fnat >= 0.1 and fnat < 0.3) and (LRMS <= 10.0 or iRMS <= 4.0) or (fnat >= 0.3 and LRMS > 5.0 and iRMS > 2.0)):
return 'Acceptable'
elif((fnat >= 0.3 and fnat < 0.5) and (LRMS <= 5.0 or iRMS <= 2.0) or (fnat >= 0.5 and LRMS > 1.0 and iRMS > 1.0)):
return 'Medium'
elif(fnat >= 0.5 and (LRMS <= 1.0 or iRMS <= 1.0)):
return 'High'
else:
return 'Undef'
def capri_class_DockQ(DockQ,capri_peptide=False):
if capri_peptide:
return 'Undef for capri_peptides'
(c1,c2,c3)=(0.23,0.49,0.80)
if(DockQ < c1):
return 'Incorrect'
elif(DockQ >= c1 and DockQ < c2):
return 'Acceptable'
elif(DockQ >= c2 and DockQ < c3):
return 'Medium'
elif(DockQ >= c3):
return 'High'
else:
return 'Undef'
def calc_DockQ(model,native,use_CA_only=False,capri_peptide=False):
exec_path=os.path.dirname(os.path.abspath(sys.argv[0]))
atom_for_sup=['CA','C','N','O']
if(use_CA_only):
atom_for_sup=['CA']
cmd_fnat=exec_path + '/fnat ' + model + ' ' + native + ' 5 -all'
cmd_interface=exec_path + '/fnat ' + model + ' ' + native + ' 10 -all'
if capri_peptide:
cmd_fnat=exec_path + '/fnat ' + model + ' ' + native + ' 4 -all'
cmd_interface=exec_path + '/fnat ' + model + ' ' + native + ' 8 -cb'
fnat_out = os.popen(cmd_fnat).read()
#fnat_out = subprocess.getoutput(cmd_fnat)
#print(fnat_out)
# sys.exit()
(fnat,nat_correct,nat_total,fnonnat,nonnat_count,model_total,interface5A)=parse_fnat(fnat_out)
assert fnat!=-1, "Error running cmd: %s\n" % (cmd_fnat)
inter_out = os.popen(cmd_interface).read()
# inter_out = subprocess.getoutput(cmd_interface)
(fnat_bb,nat_correct_bb,nat_total_bb,fnonnat_bb,nonnat_count_bb,model_total_bb,interface)=parse_fnat(inter_out)
assert fnat_bb!=-1, "Error running cmd: %s\n" % (cmd_interface)
#print fnat
#Use same interface as for fnat for iRMS
#interface=interface5A
# Start the parser
pdb_parser = Bio.PDB.PDBParser(QUIET = True)
# Get the structures
ref_structure = pdb_parser.get_structure("reference", native)
sample_structure = pdb_parser.get_structure("model", model)
# Use the first model in the pdb-files for alignment
# Change the number 0 if you want to align to another structure
ref_model = ref_structure[0]
sample_model = sample_structure[0]
# Make a list of the atoms (in the structures) you wish to align.
# In this case we use CA atoms whose index is in the specified range
ref_atoms = []
sample_atoms = []
common_interface=[]
chain_res={}
#find atoms common in both sample and native
atoms_def_sample=[]
atoms_def_in_both=[]
#first read in sample
for sample_chain in sample_model:
# print sample_chain
chain=sample_chain.id
# print chain
for sample_res in sample_chain:
# print sample_res
if sample_res.get_id()[0] != ' ': #Skip hetatm.
continue
resname=sample_res.get_id()[1]
key=str(resname) + chain
for a in atom_for_sup:
atom_key=key + '.' + a
if a in sample_res:
if atom_key in atoms_def_sample:
print(atom_key + ' already added (MODEL)!!!')
atoms_def_sample.append(atom_key)
#then read in native also present in sample
for ref_chain in ref_model:
chain=ref_chain.id
for ref_res in ref_chain:
#print ref_res
if ref_res.get_id()[0] != ' ': #Skip hetatm.
# print ref_res.get_id()
continue
resname=ref_res.get_id()[1]
key=str(resname) + chain
for a in atom_for_sup:
atom_key=key + '.' + a
if a in ref_res and atom_key in atoms_def_sample:
if atom_key in atoms_def_in_both:
print(atom_key + ' already added (Native)!!!')
atoms_def_in_both.append(atom_key)
# print atoms_def_in_both
for sample_chain in sample_model:
chain=sample_chain.id
if chain not in list(chain_res.keys()):
chain_res[chain]=[]
for sample_res in sample_chain:
if sample_res.get_id()[0] != ' ': #Skip hetatm.
continue
resname=sample_res.get_id()[1]
key=str(resname) + chain
chain_res[chain].append(key)
if key in interface:
for a in atom_for_sup:
atom_key=key + '.' + a
if a in sample_res and atom_key in atoms_def_in_both:
sample_atoms.append(sample_res[a])
common_interface.append(key)
#print inter_pairs
chain_ref={}
common_residues=[]
# Iterate of all chains in the model in order to find all residues
for ref_chain in ref_model:
# Iterate of all residues in each model in order to find proper atoms
# print dir(ref_chain)
chain=ref_chain.id
if chain not in list(chain_ref.keys()):
chain_ref[chain]=[]
for ref_res in ref_chain:
if ref_res.get_id()[0] != ' ': #Skip hetatm.
continue
resname=ref_res.get_id()[1]
key=str(resname) + chain
#print ref_res
# print key
# print chain_res.values()
if key in chain_res[chain]: # if key is present in sample
#print key
for a in atom_for_sup:
atom_key=key + '.' + a
if a in ref_res and atom_key in atoms_def_in_both:
chain_ref[chain].append(ref_res[a])
common_residues.append(key)
#chain_sample.append((ref_res['CA'])
if key in common_interface:
# Check if residue number ( .get_id() ) is in the list
# Append CA atom to list
#print key
for a in atom_for_sup:
atom_key=key + '.' + a
#print atom_key
if a in ref_res and atom_key in atoms_def_in_both:
ref_atoms.append(ref_res[a])
#get the ones that are present in native
chain_sample={}
for sample_chain in sample_model:
chain=sample_chain.id
if chain not in list(chain_sample.keys()):
chain_sample[chain]=[]
for sample_res in sample_chain:
if sample_res.get_id()[0] != ' ': #Skip hetatm.
continue
resname=sample_res.get_id()[1]
key=str(resname) + chain
if key in common_residues:
for a in atom_for_sup:
atom_key=key + '.' + a
if a in sample_res and atom_key in atoms_def_in_both:
chain_sample[chain].append(sample_res[a])
#if key in common_residues:
# print key
#sample_atoms.append(sample_res['CA'])
#common_interface.append(key)
assert len(ref_atoms)!=0, "length of native is zero"
assert len(sample_atoms)!=0, "length of model is zero"
assert len(ref_atoms)==len(sample_atoms), "Different number of atoms in native and model %d %d\n" % (len(ref_atoms),len(sample_atoms))
super_imposer = Bio.PDB.Superimposer()
super_imposer.set_atoms(ref_atoms, sample_atoms)
super_imposer.apply(sample_model.get_atoms())
# Print RMSD:
irms=super_imposer.rms
(chain1,chain2)=list(chain_sample.keys())
ligand_chain=chain1
receptor_chain=chain2
len1=len(chain_res[chain1])
len2=len(chain_res[chain2])
assert len1!=0, "%s chain has zero length!\n" % chain1
assert len2!=0, "%s chain has zero length!\n" % chain2
class1='ligand'
class2='receptor'
if(len(chain_sample[chain1]) > len(chain_sample[chain2])):
receptor_chain=chain1
ligand_chain=chain2
class1='receptor'
class2='ligand'
#print len1
#print len2
#print chain_sample.keys()
#Set to align on receptor
assert len(chain_ref[receptor_chain])==len(chain_sample[receptor_chain]), "Different number of atoms in native and model receptor (chain %c) %d %d\n" % (receptor_chain,len(chain_ref[receptor_chain]),len(chain_sample[receptor_chain]))
super_imposer.set_atoms(chain_ref[receptor_chain], chain_sample[receptor_chain])
super_imposer.apply(sample_model.get_atoms())
receptor_chain_rms=super_imposer.rms
#print receptor_chain_rms
#print dir(super_imposer)
#print chain1_rms
#Grep out the transformed ligand coords
#print ligand_chain
#print chain_ref[ligand_chain]
#print chain_sample[ligand_chain]
#l1=len(chain_ref[ligand_chain])
#l2=len(chain_sample[ligand_chain])
assert len(chain_ref[ligand_chain])!=0 or len(chain_sample[ligand_chain])!=0, "Zero number of equivalent atoms in native and model ligand (chain %s) %d %d.\nCheck that the residue numbers in model and native is consistent\n" % (ligand_chain,len(chain_ref[ligand_chain]),len(chain_sample[ligand_chain]))
assert len(chain_ref[ligand_chain])==len(chain_sample[ligand_chain]), "Different number of atoms in native and model ligand (chain %c) %d %d\n" % (ligand_chain,len(chain_ref[ligand_chain]),len(chain_sample[ligand_chain]))
coord1=np.array([atom.coord for atom in chain_ref[ligand_chain]])
coord2=np.array([atom.coord for atom in chain_sample[ligand_chain]])
#coord1=np.array([atom.coord for atom in chain_ref[receptor_chain]])
#coord2=np.array([atom.coord for atom in chain_sample[receptor_chain]])
#print len(coord1)
#print len(coord2)
sup=SVDSuperimposer()
Lrms = sup._rms(coord1,coord2) #using the private _rms function which does not superimpose
#super_imposer.set_atoms(chain_ref[ligand_chain], chain_sample[ligand_chain])
#super_imposer.apply(sample_model.get_atoms())
#coord1=np.array([atom.coord for atom in chain_ref[receptor_chain]])
#coord2=np.array([atom.coord for atom in chain_sample[receptor_chain]])
#Rrms= sup._rms(coord1,coord2)
#should give same result as above line
#diff = coord1-coord2
#l = len(diff) #number of atoms
#from math import sqrt
#print sqrt(sum(sum(diff*diff))/l)
#print np.sqrt(np.sum(diff**2)/l)
DockQ=(float(fnat) + 1/(1+(irms/1.5)*(irms/1.5)) + 1/(1+(Lrms/8.5)*(Lrms/8.5)))/3
info={}
info['DockQ']=DockQ
info['irms']=irms
info['Lrms']=Lrms
info['fnat']=fnat
info['nat_correct']=nat_correct
info['nat_total']=nat_total
info['fnonnat']=fnonnat
info['nonnat_count']=nonnat_count
info['model_total']=model_total
info['chain1']=chain1
info['chain2']=chain2
info['len1']=len1
info['len2']=len2
info['class1']=class1
info['class2']=class2
return info
def get_pdb_chains(pdb):
pdb_parser = Bio.PDB.PDBParser(QUIET = True)
pdb_struct = pdb_parser.get_structure("reference", pdb)[0]
chain=[]
for c in pdb_struct:
chain.append(c.id)
return chain
#ATOM 3312 CA
#ATOM 3315 CB ALA H 126 -21.802 31.674 73.597 1.00 58.05 C
def make_two_chain_pdb(pdb,group1,group2): #renumber from 1
pdb_parser = Bio.PDB.PDBParser(QUIET = True)
pdb_struct = pdb_parser.get_structure("reference", pdb)[0]
for c in pdb_struct:
if c.id in group1:
c.id='A'
if c.id in group2:
c.id='B'
(code,outfile)=tempfile.mkstemp()
io=Bio.PDB.PDBIO()
io.set_structure(pdb_struct)
io.save(outfile)
exec_path=os.path.dirname(os.path.abspath(sys.argv[0]))
cmd=exec_path + '/scripts/renumber_pdb.pl ' + outfile
os.system(cmd)
os.remove(outfile)
return outfile +'.renum'
def change_chain(pdb_string,chain):
new_str=[];
for line in pdb_string:
s=list(line);
s[21]=chain;
new_str.append("".join(s));
return "\n".join(new_str);
def make_two_chain_pdb_perm(pdb,group1,group2): #not yet ready
pdb_chains={}
f=open(pdb);
for line in f.readlines():
if line[0:4] == "ATOM":
# s=list(line);
#print line
chain=line[21]
atom=line[13:16]
resnum=int(line[22:26])
# print atom + ':' + str(resnum) +':'
if chain not in pdb_chains:
pdb_chains[chain]=[]
pdb_chains[chain].append(line)
# print line
# s[21]='B'
# print "".join(s)
# print chain
f.close()
#sys.exit()
(code,outfile)=tempfile.mkstemp()
f=open(outfile,'w')
for c in group1:
# print pdb_chains[c]
f.write(change_chain(pdb_chains[c],"A"))
f.write("TER\n");
for c in group2:
f.write(change_chain(pdb_chains[c],"B"))
f.close();
#print outfile
exec_path=os.path.dirname(os.path.abspath(sys.argv[0]))
cmd=exec_path + '/scripts/renumber_pdb.pl ' + outfile
os.system(cmd)
os.remove(outfile)
return outfile +'.renum'
def main():
parser=ArgumentParser(description="DockQ - Quality measure for protein-protein docking models")
parser.add_argument('model',metavar='<model>',type=str,nargs=1,help='path to model file')
parser.add_argument('native',metavar='<native>',type=str,nargs=1,help='path to native file')
parser.add_argument('-capri_peptide',default=False,action='store_true',help='use version for capri_peptide (DockQ cannot not be trusted for this setting)')
parser.add_argument('-short',default=False,action='store_true',help='short output')
parser.add_argument('-verbose',default=False,action='store_true',help='talk a lot!')
parser.add_argument('-quiet',default=False,action='store_true',help='keep quiet!')
parser.add_argument('-useCA',default=False,action='store_true',help='use CA instead of backbone')
parser.add_argument('-skip_check',default=False,action='store_true',help='skip initial check fo speed up on two chain examples')
parser.add_argument('-no_needle',default=False,action='store_true',help='do not use global alignment to fix residue numbering between native and model during chain permutation (use only in case needle is not installed, and the residues between the chains are identical')
parser.add_argument('-perm1',default=False,action='store_true',help='use all chain1 permutations to find maximum DockQ (number of comparisons is n! = 24, if combined with -perm2 there will be n!*m! combinations')
parser.add_argument('-perm2',default=False,action='store_true',help='use all chain2 permutations to find maximum DockQ (number of comparisons is n! = 24, if combined with -perm1 there will be n!*m! combinations')
# parser.add_argument('-comb',default=False,action='store_true',help='use all cyclicchain permutations to find maximum DockQ (number of comparisons is n!*m! = 24*24 = 576 for two tetramers interacting')
parser.add_argument('-model_chain1',metavar='model_chain1', type=str,nargs='+', help='pdb chain order to group together partner 1')
parser.add_argument('-model_chain2',metavar='model_chain2', type=str,nargs='+', help='pdb chain order to group together partner 2 (complement to partner 1 if undef)')
parser.add_argument('-native_chain1',metavar='native_chain1', type=str,nargs='+', help='pdb chain order to group together from native partner 1')
parser.add_argument('-native_chain2',metavar='native_chain2', type=str,nargs='+', help='pdb chain order to group together from native partner 2 (complement to partner 1 if undef)')
args = parser.parse_args()
#bio_ver=1.64
bio_ver=1.61
if(float(Bio.__version__) < bio_ver):
print("Biopython version (%s) is too old need at least >=%.2f" % (Bio.__version__,bio_ver))
sys.exit()
# if(len(sys.argv)!=3):
# print "Usage: ./Dock.py <model> <native>"
# sys.exit()
# print args
# print args.model[0]
# sys.exit()
# model=sys.argv[1]
# native=sys.argv[2]
exec_path=os.path.dirname(os.path.abspath(sys.argv[0]))
fix_numbering=exec_path + '/scripts/fix_numbering.pl'
model=args.model[0]
model_in=model
native=args.native[0]
native_in=native
use_CA_only=args.useCA
capri_peptide=args.capri_peptide
model_chains=[]
native_chains=[]
best_info=''
if(not args.skip_check):
model_chains=get_pdb_chains(model)
native_chains=get_pdb_chains(native)
files_to_clean=[]
# print native_chains
if((len(model_chains) > 2 or len(native_chains) > 2) and
(args.model_chain1 == None and args.native_chain1 == None)):
print("Multi-chain model need sets of chains to group\nuse -native_chain1 and/or -model_chain1 if you want a different mapping than 1-1")
print("Model chains : " + str(model_chains))
print("Native chains : " + str(native_chains))
sys.exit()
if not args.skip_check and (len(model_chains) < 2 or len(native_chains)< 2):
print("Need at least two chains in the two inputs\n");
sys.exit()
if len(model_chains) > 2 or len(native_chains)> 2:
group1=model_chains[0]
group2=model_chains[1]
nat_group1=native_chains[0]
nat_group2=native_chains[1]
if(args.model_chain1 != None):
group1=args.model_chain1
nat_group1=group1
if(args.model_chain2 != None):
group2=args.model_chain2
else:
#will use the complement from group1
group2=[]
for c in model_chains:
if c not in group1:
group2.append(c)
nat_group1=group1
nat_group2=group2
if(args.native_chain1 != None):
nat_group1=args.native_chain1
if(args.native_chain2 != None):
nat_group2=args.native_chain2
else:
#will use the complement from group1
nat_group2=[]
for c in native_chains:
if c not in nat_group1:
nat_group2.append(c)
if(args.model_chain1 == None):
group1=nat_group1
group2=nat_group2
#print group1
#print group2
#print "native"
#print nat_group1
#print nat_group2
if(args.verbose):
print('Merging ' + ''.join(group1) + ' -> ' + ''.join(nat_group1) + ' to chain A')
print('Merging ' + ''.join(group2) + ' -> ' + ''.join(nat_group2) + ' to chain B')
native=make_two_chain_pdb_perm(native,nat_group1,nat_group2)
files_to_clean.append(native)
pe=0
if args.perm1 or args.perm2:
best_DockQ=-1;
best_g1=[]
best_g2=[]
iter_perm1=itertools.combinations(group1,len(group1))
iter_perm2=itertools.combinations(group2,len(group2))
if args.perm1:
iter_perm1=itertools.permutations(group1)
if args.perm2:
iter_perm2=itertools.permutations(group2)
combos1=[];
combos2=[];
for g1 in iter_perm1:#_temp:
combos1.append(g1)
for g2 in iter_perm2:
combos2.append(g2)
for g1 in combos1:
for g2 in combos2:
pe=pe+1
# print str(g1)+' '+str(g2)
# print pe
# print group1
# print group2
pe_tot=pe
pe=1
#sys.exit()
if args.verbose:
print('Starting chain order permutation search (number of permutations: ' + str(pe_tot) + ')')
for g1 in combos1:
for g2 in combos2:
#g2=group2
model_renum=make_two_chain_pdb_perm(model_in,g1,g2)
model_fixed=model_renum
if not args.no_needle:
fix_numbering_cmd=fix_numbering + ' ' + model_renum + ' ' + native + ' > /dev/null'
model_fixed=model_renum + ".fixed"
# print fix_numbering_cmd
os.system(fix_numbering_cmd)
os.remove(model_renum)
if not os.path.exists(model_fixed):
print('If you are sure the residues are identical you can use the options -no_needle')
sys.exit()
test_dict=calc_DockQ(model_fixed,native,use_CA_only)
os.remove(model_fixed)
if not args.quiet:
print(str(pe)+'/'+str(pe_tot) + ' ' + ''.join(g1) + ' -> ' + ''.join(g2) + ' ' + str(test_dict['DockQ']))
if(test_dict['DockQ'] > best_DockQ):
best_DockQ=test_dict['DockQ'];
info=test_dict
best_g1=g1
best_g2=g2
best_info='Best score ( ' + str(best_DockQ) +' ) found for model -> native, chain1:' + ''.join(best_g1) + ' -> ' + ''.join(nat_group1) + ' chain2:' + ''.join(best_g2) + ' -> ' + ''.join(nat_group2)
if args.verbose:
print(best_info)
if not args.quiet:
print("Current best " + str(best_DockQ))
pe=pe+1
if not args.quiet:
print(best_info)
# print 'Best score ( ' + str(best_DockQ) +' ) found for ' + str(best_g1) + ' ' + str(best_g2)
else:
model_renum=make_two_chain_pdb_perm(model,group1,group2)
model_fixed=model_renum
if not args.no_needle:
fix_numbering_cmd=fix_numbering + ' ' + model_renum + ' ' + native + ' > /dev/null'
model_fixed=model_renum + ".fixed"
# print fix_numbering_cmd
os.system(fix_numbering_cmd)
os.remove(model_renum)
if not os.path.exists(model_fixed):
print('If you are sure the residues are identical you can use the options -no_needle')
sys.exit()
info=calc_DockQ(model_fixed,native,use_CA_only)
#os.system('cp ' + native + ' native_multichain.pdb')
#os.system('cp ' + model_fixed + ' .')
os.remove(model_fixed)
# files_to_clean.append(model)
# files_to_clean.append(model_fixed)
# sys.exit()
# print native
# print model
else:
info=calc_DockQ(model,native,use_CA_only=use_CA_only,capri_peptide=capri_peptide) #False):
# info=calc_DockQ(model,native,use_CA_only=)
irms=info['irms']
Lrms=info['Lrms']
fnat=info['fnat']
DockQ=info['DockQ']
fnonnat=info['fnonnat']
if(args.short):
if capri_peptide:
print(("DockQ-capri_peptide %.3f Fnat %.3f iRMS %.3f LRMS %.3f Fnonnat %.3f %s %s %s" % (DockQ,fnat,irms,Lrms,fnonnat,model_in,native_in,best_info)))
else:
print(("DockQ %.3f Fnat %.3f iRMS %.3f LRMS %.3f Fnonnat %.3f %s %s %s" % (DockQ,fnat,irms,Lrms,fnonnat,model_in,native_in,best_info)))
else:
if capri_peptide:
print('****************************************************************')
print('* DockQ-CAPRI peptide *')
print('* Do not trust any thing you read.... *')
print('* OBS THE DEFINITION OF Fnat and iRMS are different for *')
print('* peptides in CAPRI *')
print('* *')
print('* For the record: *')
print('* Definition of contact <4A all heavy atoms (Fnat) *')
print('* Definition of interface <8A CB (iRMS) *')
print('* For comments, please email: bjorn.wallner@.liu.se *')
print('****************************************************************')
else:
print('****************************************************************')
print('* DockQ *')
print('* Scoring function for protein-protein docking models *')
print('* Statistics on CAPRI data: *')
print('* 0.00 <= DockQ < 0.23 - Incorrect *')
print('* 0.23 <= DockQ < 0.49 - Acceptable quality *')
print('* 0.49 <= DockQ < 0.80 - Medium quality *')
print('* DockQ >= 0.80 - High quality *')
print('* Reference: Sankar Basu and Bjorn Wallner, DockQ: A quality *')
print('* measure for protein-protein docking models, submitted *')
print('* *')
print('* For the record: *')
print('* Definition of contact <5A (Fnat) *')
print('* Definition of interface <10A all heavy atoms (iRMS) *')
print('* For comments, please email: bjorn.wallner@.liu.se *')
print('* *')
print('****************************************************************')
print(("Model : %s" % model_in))
print(("Native : %s" % native_in))
if len(best_info):
print(best_info)
print('Number of equivalent residues in chain ' + info['chain1'] + ' ' + str(info['len1']) + ' (' + info['class1'] + ')')
print('Number of equivalent residues in chain ' + info['chain2'] + ' ' + str(info['len2']) + ' (' + info['class2'] + ')')
print(("Fnat %.3f %d correct of %d native contacts" % (info['fnat'],info['nat_correct'],info['nat_total'])))
print(("Fnonnat %.3f %d non-native of %d model contacts" % (info['fnonnat'],info['nonnat_count'],info['model_total'])))
print(("iRMS %.3f" % irms))
print(("LRMS %.3f" % Lrms))
# print 'CAPRI ' + capri_class(fnat,irms,Lrms,capri_peptide=capri_peptide)
peptide_suffix=''
if capri_peptide:
peptide_suffix='_peptide'
#print('CAPRI use DockQ instead.')
#print(('CAPRI{} {}'.format(peptide_suffix,capri_class(fnat,irms,Lrms,capri_peptide=capri_peptide))))
#print('DockQ_CAPRI ' + capri_class_DockQ(DockQ,capri_peptide=capri_peptide))
peptide_disclaimer=''
if capri_peptide:
peptide_disclaimer='DockQ not reoptimized for CAPRI peptide evaluation'
print(("DockQ {:.3f} {}".format(DockQ,peptide_disclaimer)))
for f in files_to_clean:
os.remove(f)
if __name__ == '__main__':
main()
|
bjornwallner/DockQ
|
DockQ.py
|
Python
|
gpl-3.0
| 29,613
|
[
"Biopython"
] |
a6bb88a691bf6fa7083983c1b8a5bf145447b94ddb4d6675041601ead7ef4c6c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.