text stringlengths 12 1.05M | repo_name stringlengths 5 86 | path stringlengths 4 191 | language stringclasses 1 value | license stringclasses 15 values | size int32 12 1.05M | keyword listlengths 1 23 | text_hash stringlengths 64 64 |
|---|---|---|---|---|---|---|---|
### Code written for STDP tutorial @ CAMP 2016
### Adapted from Song et al 2000 (Competitive Hebbian Learning through spike-timing-dependent synaptic plasticity)
### With independent Poisson inputs (relevant figures : Fig 2)
### Author: Harsha Gurnani
### Date: June 15, 2016
#######################################
from brian2 import *
from time import time
set_device('cpp_standalone')
#### Simulation parameters ####
simtime = 150 *second
delta = 0.1*ms
defaultclock.dt = delta
########################
#### Model Parameters
########################
### Neuron and synapses:
taum = 20*ms # Membrane time constant
Vrest = -70*mV # Resting membrane potential
Erev = 0*mV # Excitatory synapse - reversal potential
Irev = -70*mV # Inhibitory synapse - reversal potential
taue = 5*ms # Excitatory time constants
taui = 5*ms # Inhibitory time constants
gmax = 0.015 #Max excitatory conductance //in unnits of leak conductance
ginh = 0.05 #Inhibitory conductance
Vt = -54*mV # Spike threshold
Vr = -60*mV # Reset potential
#### How does no. of synapses/ ratio influence firing rates?
Ne = 1000 #No. of excitatory synapses
Ni = 200 #No. of ihibitory synapses
# How does final distribution of weights depend on presynaptic firing rates? Why???
FRe = 15*Hz #Firing rate (FR) for excitatory input
FRi = 10*Hz #FR for inhibitory input
### Neuron model
eqs = Equations('''
dV/dt = ( (Vrest - V) + ge*(Erev - V) + gi*(Irev - V) )/taum :volt
dge/dt = -ge/taue :1 #Conductance of exc synapse
dgi/dt = -gi/taui :1 #Conductance of inh synapse
''')
NRN = NeuronGroup(1, model=eqs, threshold = 'V>Vt', reset = 'V=Vr', method='euler')
########################
#### Synapses and STDP
########################
## STDP parameters for excitatory synapses
taupre = 20*ms # Time constant of potentiation for pre-post pairing
taupost = 20*ms # Time constant of depression for post-pre pairing
Weakratio = 1.05 # Apost*taupost/(Apre*taupre)
# Depression:Potentiation ratio (slightly greater than 1 for stabilising network)
Apre = 0.005 # %Strengthening with pre-post pair
Apost = Apre*(taupre/taupost)*Weakratio
### Excitatory Synapse Model:
syneqs = '''
gsyn :1
dx/dt = -x/taupre :1 (event-driven)
dy/dt = -y/taupost :1 (event-driven)
'''
preeqs = '''
ge_post += gsyn
x += Apre
gsyn += -y*gmax
gsyn = clip(gsyn, 0, gmax)
'''
posteqs = '''
y += Apost
gsyn += x*gmax
gsyn = clip(gsyn, 0, gmax)
'''
### Poisson Input at Excitatory synapses, at firing rate FRe
InpE = PoissonGroup(Ne, rates = FRe)
S_exc = Synapses(InpE, NRN, model=syneqs, on_pre=preeqs, on_post=posteqs )
S_exc.connect() # Connect all
S_exc.gsyn[:] = gmax*rand(Ne) # Initialise uniformly between 0 and gmax
### Inhibitory synapses:
InpI = PoissonGroup(Ni, rates = FRi)
S_inh = Synapses(InpI, NRN, model = ' ', on_pre = '''gi_post += ginh''') # Constant inhibitory conductance = ginh
S_inh.connect()
################
### Monitors
################
VMon = StateMonitor( NRN, 'V', record = 0 ) ## Membrane potential
FR = PopulationRateMonitor(NRN) ## Firing rate of NRN
Weights = StateMonitor( S_exc, 'gsyn', record = [0,Ne-1]) ## Two example synaptic weights
run( simtime, report='text')
figure()
### Histogram of sinal synaptic weights
subplot(211)
hist(S_exc.gsyn[:] /gmax, 20)
ylabel('No. of synapses')
xlabel('Normalised synaptic weight')
### Initial membrane potential trace
subplot(223)
plot(VMon.t[0:3000] /ms, VMon.V[0,0:3000] /mV)
ylim([-80,-40])
ylabel('Membrane potential (mV)')
legend('Initial V')
### Final membrane potential trace
subplot(224)
plot(VMon.t[-3000:-1] /ms, VMon.V[0,-3000:-1] /mV)
ylim([-80,-40])
legend('Final V')
figure()
### Evolution of Firing rate in time
subplot(211)
plot(FR.t /second, FR.smooth_rate(window='gaussian', width=100*ms)/Hz)
ylabel('Firing rate (Hz)')
### Evolution of two example synaptic weights
subplot(212)
plot(Weights.t /second, Weights.gsyn[0,:], 'b', label='Synapse 1')
plot(Weights.t /second, Weights.gsyn[1,:], 'g', label='Synapse N')
xlabel('Time (second)')
ylabel('Synaptic weight')
tight_layout()
show()
| h-mayorquin/camp_india_2016 | tutorials/LTP-I/Demo1/Full_demo1_abbott.py | Python | mit | 4,275 | [
"Gaussian",
"NEURON"
] | d615a01232e38e82ef40a96120c33092fef42e0f3b2fab55615c813c7166940f |
#!/usr/bin/env python
import os, pickle, time
try:
os.remove('my_gp_module.pyc')
except OSError:
pass
import scipy as sp
from scipy.linalg import eigh
from my_gp_module import GaussianProcess
import matplotlib.pyplot as plt
import matplotlib.cm as cm
def randint_norepeat(low, exclude=None, high=None, size=None):
l = list(sp.random.randint(low, high=high, size=size))
if exclude is not None:
# remove elements already present in exclude
l = [x for x in l if x not in exclude]
for i in range(size-len(l)):
while True:
new = sp.random.randint(low, high=high)
if new not in exclude and new not in l:
l.append(new)
break
l.sort()
return l
def teach_database_plusone(GP, X, y, X_t, y_t):
GP.flush_data()
# Force all data to be numpy arrays
X, y = sp.asarray(X), sp.asarray(y)
X_t, y_t = sp.asarray(X_t), sp.asarray(y_t)
# From a fixed database (X,y), get alpha of some new configurations if added one at a time
alphas = []
for i, (X_test, y_test) in enumerate(zip(X_t, y_t)):
if y_test.size != 1:
print "ERROR: output space must be 1D. Exiting..."
return
# Test configuration is placed at position 0
X_plus = sp.row_stack((X_test, X))
y_plus = sp.append(y_test, y)
ttt = time.clock()
GP.fit(X_plus, y_plus)
print "TIMER teach", time.clock() - ttt
alphas.append((GP.alpha[0]).flatten().copy())
GP.flush_data()
return sp.array(alphas).flatten()
# --------------------------------------------
# WHAT IT DOES:
# Latest idea by Anatole
# Ntest test configurations, Ndatabases databases. Teach db+1 and check if inverse*inverse*k works
# --------------------------------------------
# --------------------------------------------
# Parameters for the run
# --------------------------------------------
theta0 = 2.0e1
theta0_level2 = 1.0e1
nugget = 1.0e-15
normalise = 1
metric = 'cityblock'
Ntest = 50
Nteach = 1200
Ndatabases = 1
target_property = 'U_0'
dataset_loc = 'dsC7O2H10nsd_db.pkl'
# --------------------------------------------
# Load all database
# --------------------------------------------
ttt = time.clock()
dataset = pickle.load(open(dataset_loc, 'r'))
print "TIMER load_data", time.clock() - ttt
test_indices_rec, teach_indices_rec = [], []
alpha_predicted, alpha_target = [], []
energy_target, energy_error = [], []
# --------------------------------------------
# Setup a Gaussian Process
# --------------------------------------------
gp = GaussianProcess(corr='absolute_exponential', theta0=sp.asarray([theta0]),
nugget=nugget, verbose=True, normalise=normalise, do_features_projection=False, low_memory=False, metric=metric)
gp_level2 = GaussianProcess(corr='absolute_exponential', theta0=sp.asarray([theta0_level2]),
nugget=nugget, verbose=True, normalise=normalise, do_features_projection=False, low_memory=False, metric=metric)
# --------------------------------------------
# Loop over different training sets of the same size
# --------------------------------------------
for iteration in range(Ndatabases):
# --------------------------------------------
# Pick Ntest configurations randomly
# --------------------------------------------
test_indices = list(sp.random.randint(0, high=dataset[target_property].size, size=Ntest))
db_indices = randint_norepeat(0, exclude=test_indices, high=dataset[target_property].size, size=Nteach)
sp.save('db_indices_%d-%s' % (iteration, time.ctime()), db_indices)
teach_indices_rec.append(db_indices)
X = dataset['X'][test_indices + db_indices]
T = dataset[target_property][test_indices + db_indices]
print "\n", "-"*60, "\n"
print "db size = %d, iteration %03d" % (Nteach, iteration)
# --------------------------------------------
# Extract feature(s) from training data and test set:
# only sorted eigenvalues of Coulomb matrix in this case
# --------------------------------------------
ttt = time.clock()
eigX = [(eigh(M, eigvals_only=True))[::-1] for M in X]
print "TIMER eval_features", time.clock() - ttt
eigX_t = eigX[:Ntest]
eigX_db = eigX[Ntest:]
y = T.ravel()
y_t = y[:Ntest]
y_db = y[Ntest:]
# --------------------------------------------
# Do len(y_t) teachings by including db + 1 configurations
# --------------------------------------------
alphas = teach_database_plusone(gp, eigX_db, y_db, eigX_t, y_t)
alpha_target.append(alphas)
gp.flush_data()
# --------------------------------------------
# --------------------------------------------
# Second time don't include the test set and predict
# --------------------------------------------
ttt = time.clock()
# Fit to data
gp.fit(eigX_db, y_db)
gp_level2.fit(eigX_db, gp.alpha.flatten())
print "TIMER teach", time.clock() - ttt
y_pred = gp.predict(eigX_t)
# --------------------------------------------
# predict the alphas
# --------------------------------------------
alpha_pred = gp_level2.predict(eigX_t)
alpha_predicted.append(alpha_pred.flatten())
energy_target.append(y_t)
energy_error.append(y_pred - y_t)
# check whether the level 1 ML itself is predicting the property correctly
print "ERROR = ", energy_error[-1]
print "ALPHA TRUE vs. PREDICTED:", alphas, alpha_predicted[-1]
# Save round of predictions
with open('alpha_predictions.txt', 'a') as f:
if iteration == 0: f.write("n_test_molecules=%d n_databases=%d db_size=%d\n" % (Ntest, Ndatabases, Nteach))
output_data = sp.vstack((sp.array(alpha_target).flatten(), sp.array(alpha_predicted).flatten(), sp.array(energy_target).flatten(), sp.array(energy_error).flatten()))
sp.savetxt(f, output_data.T)
f.close()
for at, ap in zip(alpha_target, alpha_predicted):
plt.plot(at,ap,'x')
plt.xlabel("actual regression coefficient")
plt.ylabel("predicted regression coefficient")
plt.savefig('regr_actual_vs_pred-%s.pdf' % time.ctime())
plt.clf()
for at, err in zip(alpha_target, energy_error):
plt.plot(at,err,'x')
plt.xlabel("actual regression coefficient")
plt.ylabel("error on property %s" % target_property)
plt.title("MAE = %.3f" % sp.absolute(sp.array(energy_error)).mean())
plt.savefig('regr_actual_vs_error-%s.pdf' % time.ctime())
plt.clf()
| marcocaccin/MarcoGP | predict_alpha2.py | Python | apache-2.0 | 6,522 | [
"Gaussian"
] | 9790abd32fbaeb5afaacbdd3e2a27a40736190615bac3aaaf46b2970c6b2e3ef |
#!/usr/bin/python
import math
import os,sys
ncs_lib_path = ('../../../../python/')
sys.path.append(ncs_lib_path)
import ncs
def run(argv):
voltage_channel = {
"type": "voltage_gated_ion",
"m_initial": 0.0,
"reversal_potential": -80,
"v_half": -44,
"deactivation_slope": 40,
"activation_slope": 20,
"equilibrium_slope": 8.8,
"r": 1.0 / 0.303,
"conductance": 5 * 0.00015
}
calcium_channel = {
"type": "calcium_dependent",
"m_initial": 0.0,
"reversal_potential": -80,
"m_power": 2,
"conductance": 6.0 * 0.0009,
"forward_scale": 0.000125,
"forward_exponent": 2,
"backwards_rate": 2.5,
"tau_scale": 0.01,
}
ncs_cell = {
"threshold": -50.0,
"resting_potential": -60.0,
"calcium": 5.0,
"calcium_spike_increment": 100.0,
"tau_calcium": 0.07,
"leak_reversal_potential": 0.0,
"leak_conductance":0.0 ,
"tau_membrane": 0.02,
"r_membrane": 200.0,
"spike_shape": [
-38, 30, -43, -60, -60
],
"capacitance": 1.0,
"channels": [
voltage_channel,
calcium_channel,
]
}
sim = ncs.Simulation()
neuron_parameters = sim.addNeuron("ncs_neuron",
"ncs",
ncs_cell
)
group_1 = sim.addNeuronGroup("group_1", 100, "ncs_neuron", None) # last param is geometry
all_cells = sim.addNeuronAlias("all_cells", [group_1])
sim.addNeuronAlias("all", all_cells)
sim.addNeuronAlias("all_2", "all_cells")
if not sim.init(argv):
print "Failed to initialize simulation."
return
sim.addStimulus("rectangular_current", { "amplitude": 0.1 }, group_1, 1.0, 0.0, 1.0)
voltage_report = sim.addReport("group_1", "neuron", "neuron_voltage",1.0, 0.0,1.0).toStdOut()
#voltage_report.toAsciiFile("voltages.txt")
sim.run(duration=0.01)
del sim
del voltage_report
return
if __name__ == "__main__":
run(sys.argv)
| BrainComputationLab/ncs | python/samples/models/test/ncs_neuron_test.py | Python | bsd-2-clause | 1,999 | [
"NEURON"
] | 0cb1de69f2f7fad4b6f6a0de9bcb7b5ae9636c106caa5575c2080a755cfdabf7 |
# Usage: DipoleCalc.py file.mol2 #
# #README: Basically, each atom has its XYZ coordinates multiplied by their #
# electrostatic charges (in dic2, on the grep_line function). That creates on #
# vectorial component on each axis, which are summed. #
# which are summed, resulting in their individual dipole results. #
# After that, it is possible to find the Dipole Moment through the "module" #
# equation. #
###############################################################################
# Loading libraries
#import numpy
#import sys
#import re
import Tkinter
import tkFileDialog, tkMessageBox
import numpy
from pymol import cmd
from pymol.cgo import *
dialog = Tkinter.Tk()
dialog.withdraw()
#stored.coords = []
#arq = sys.argv[1]
# Setting parameters
#CHECAR SE REALMENTE ADICIONA O DIPOLE MANAGER AO MENU DO PYMOL
def __init__(self):
self.menuBar.addmenuitem('Plugin', 'command','Dipole Manager',label = 'Dipole Manager',command = lambda s=self : open_mol2(s))
#DESCOBRIR COMO ABRIR O MOL2 E RETORNAR O ARQUIVO
def open_mol2(self):
#--------------
global s2
#--------------
myFormatsMOL2 = [('chemical/x-mol2Tripos MOL2 molecule model files.','*.mol2')]
try:
self.MOL2File = tkFileDialog.askopenfile(parent=dialog,mode='rb',filetypes=myFormatsMOL2, title='Choose MOL2 file')
except:
quitProgram(self, "No MOL2 File!")
if self.MOL2File != None:
cmd.load(self.MOL2File.name, "Shingonga" )
print "Opening MOL2 file...", self.MOL2File.name
#get_atom_data(self)
s2 = []
#cmd.iterate_state(0,'(all)','s1.append({resi,name,x,y,z,partial_charge})')
#print s1
myspace = {'coord_taker': coord_taker}
cmd.iterate_state(0,'(all)', 'coord_taker(resi,name,x,y,z,partial_charge)', space=myspace)
print len(s2)
cog_calculator(self)
cog_drawer(self)
#axis_setter(self)
print self.dipx, self.dipy, self.dipz, self.mdip
#s2[].clear[]
def coord_taker(resi,name,x,y,z,partial_charge):
global s2
#print '%s`%s/%s' % (resn ,resi, name)
s2.extend([resi,name,x,y,z,partial_charge])
def cog_calculator(self):
global s2
counter1 = 0
self.cogx,self.cogy,self.cogz=0,0,0
self.dipx,self.dipy,self.dipz=0,0,0
self.mdip = 0
while counter1 < len(s2):
self.cogx+=s2[counter1+2]/(len(s2)/6)
self.cogy+=s2[counter1+3]/(len(s2)/6)
self.cogz+=s2[counter1+4]/(len(s2)/6)
self.dipx+=(s2[counter1+2]*s2[counter1+5]*4.80320440079)
self.dipy+=(s2[counter1+3]*s2[counter1+5]*4.80320440079)
self.dipz+=(s2[counter1+4]*s2[counter1+5]*4.80320440079)
counter1+=6
self.mdip = ((self.dipx**2) + (self.dipy**2) + (self.dipz**2))**0.5
def cog_drawer(self):
#com_object = 'cogmol2'
#cmd.pseudoatom(object=com_object,pos=[self.cogx, self.cogy, self.cogz])
#cmd.pseudoatom(object=com_object,pos=[self.cogx+self.dipx, self.cogy, self.cogz])
#cmd.pseudoatom(object=com_object,pos=[self.cogx, self.cogy+self.dipy, self.cogz])
#cmd.pseudoatom(object=com_object,pos=[self.cogx, self.cogy, self.cogz+self.dipz])
xcone = IsNegative(self.dipx)*0.1*abs(self.dipx)
ycone = IsNegative(self.dipy)*0.1*abs(self.dipy)
zcone = IsNegative(self.dipz)*0.1*abs(self.dipz)
self.obj = []
#PRINCIPAL
self.obj.extend([ 25.0, 1, 9.0, self.cogx, self.cogy, self.cogz, self.cogx+self.dipx, self.cogy+self.dipy, self.cogz+self.dipz, 0.03, 1, 0, 0, 1, 0, 0 ])
self.obj.extend([CONE, self.cogx+self.dipx, self.cogy+self.dipy, self.cogz+self.dipz, self.cogx+self.dipx+xcone, self.cogy+self.dipy+ycone, self.cogz+self.dipz+zcone, 0.10, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0])
#AUXILIARES
self.obj.extend([ 25.0, 0.4, 9.0, self.cogx, self.cogy, self.cogz, self.cogx+self.dipx, self.cogy, self.cogz, 0.01, 1, 0, 0, 1, 0, 0 ])
self.obj.extend([ 25.0, 0.4, 9.0, self.cogx, self.cogy, self.cogz, self.cogx, self.cogy+self.dipy, self.cogz, 0.01, 1, 0, 0, 1, 0, 0 ])
self.obj.extend([ 25.0, 0.4, 9.0, self.cogx, self.cogy, self.cogz, self.cogx, self.cogy, self.cogz+self.dipz, 0.01, 1, 0, 0, 1, 0, 0 ])
self.obj.extend([CONE, self.cogx+self.dipx, self.cogy, self.cogz, self.cogx+self.dipx+xcone, self.cogy, self.cogz, 0.05, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0])
self.obj.extend([CONE, self.cogx, self.cogy+self.dipy, self.cogz, self.cogx, self.cogy+self.dipy+ycone, self.cogz, 0.05, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0])
self.obj.extend([CONE, self.cogx, self.cogy, self.cogz+self.dipz, self.cogx, self.cogy, self.cogz+self.dipz+zcone, 0.05, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0])
cmd.load_cgo(self.obj,'DipoleVectors')
def IsNegative(v):
if v > 0:
return int(1)
elif v == 0:
return int(0)
elif v < 0:
return int(-1)
# self.xyz1 = get_coord('xdip')
# self.xyz2 = get_coord('ydip')
# cmd.show_as('spheres', com_object)
# cmd.color('yellow', com_object)
# cmd.set ('sphere_scale', '0.1', 'cogmol2')
# def get_coord(v):
# if not isinstance(v, str):
# return v
# if v.startswith('['):
# return cmd.safe_list_eval(v)
# return cmd.get_atom_coords(v)
#def get_atom_data(self):
# myspace = {'bfactors': []}
# cmd.iterate('(all)', 'bfactors.append(b)', space=myspace)
# print bfactors
# self.s1 = ()
# cmd.iterate_state(0,'(all)','self.s1.append([x,y,z])')
# print self.s1
| NutsII/PyMol-DipoleManager | DipoleManager-v0.13.py | Python | mit | 5,765 | [
"PyMOL"
] | 4ab0f0869a65ac6a4cfce82472df728b21908d7a38610b29d523cf770459f81f |
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding: utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
from __future__ import print_function
from abc import abstractproperty
import MDAnalysis
import importlib
import numpy as np
from .atoms_maps import atoms_maps
from difflib import get_close_matches
class Layers(MDAnalysis.core.topologyattrs.AtomAttr):
"""Layers for each atom"""
attrname = 'layers'
singular = 'layer'
per_object = 'atom'
class Clusters(MDAnalysis.core.topologyattrs.AtomAttr):
"""Clusters for each atom"""
attrname = 'clusters'
singular = 'cluster'
per_object = 'atom'
class Sides(MDAnalysis.core.topologyattrs.AtomAttr):
"""Sides for each atom"""
attrname = 'sides'
singular = 'side'
per_object = 'atom'
def _create_property(property_name,
docstring=None,
readonly=False,
required=False):
def getter(self):
return self.__getattribute__('_' + property_name)
def setter(self, value):
self.__setattr__('_' + property_name, value)
if readonly is True:
setter = None
if required is False:
absprop = None
else:
absprop = abstractproperty(None)
return property(fget=getter, fset=setter, doc=docstring), absprop
def _missing_attributes(interface, universe):
interface._topologyattrs = importlib.import_module(
'MDAnalysis.core.topologyattrs')
_check_missing_attribute(interface, 'names', 'Atomnames', universe.atoms,
universe.atoms.ids.astype(str))
# NOTE _check_missing_attribute() relies on radii being set to np.nan
# if the attribute radii is not present
_check_missing_attribute(interface, 'radii', 'Radii', universe.atoms,
np.nan)
_check_missing_attribute(interface, 'tempfactors', 'Tempfactors',
universe.atoms, 0.0)
_check_missing_attribute(interface, 'bfactors', 'Bfactors', universe.atoms,
0.0)
_check_missing_attribute(interface, 'altLocs', 'AltLocs', universe.atoms,
' ')
_check_missing_attribute(interface, 'icodes', 'ICodes', universe.residues,
' ')
_check_missing_attribute(interface, 'occupancies', 'Occupancies',
universe.atoms, 1)
_check_missing_attribute(interface, 'elements', 'Elements', universe.atoms,
' ')
_check_missing_attribute(interface, 'chainIDs', 'ChainIDs', universe.atoms,
'X')
_extra_attributes(interface, universe)
def _extra_attributes(interface, universe):
# we add here the new layer, cluster and side information
# they are not part of MDAnalysis.core.topologyattrs
attr = {'layers': Layers, 'clusters': Clusters, 'sides': Sides}
for key in attr.keys():
if key not in dir(universe.atoms):
vals = np.zeros(len(universe.atoms), dtype=int) - 1
universe.add_TopologyAttr(attr[key](vals))
def _check_missing_attribute(interface, name, classname, group, value):
""" Add an attribute, which is necessary for pytim but
missing from the present topology.
An example of how the code below would expand is:
if 'radii' not in dir(universe.atoms):
from MDAnalysis.core.topologyattrs import Radii
radii = np.zeros(len(universe.atoms)) * np.nan
universe.add_TopologyAttr(Radii(radii))
* MDAnalysis.core.topologyattrs -> self.topologyattrs
* Radii -> missing_class
* radii -> values
"""
universe = interface.universe
if name not in dir(universe.atoms):
missing_class = getattr(interface._topologyattrs, classname)
if isinstance(value, np.ndarray) or isinstance(value, list):
if len(value) == len(group):
values = np.array(value)
else:
raise RuntimeError("improper array/list length")
else:
values = np.array([value] * len(group))
universe.add_TopologyAttr(missing_class(values))
if name == 'elements':
types = MDAnalysis.topology.guessers.guess_types(group.names)
# is there an inconsistency in the way 'element' is defined
# in different modules in MDA?
# Note: the second arg in .get() is the default.
group.elements = np.array([t.ljust(2) for t in types])
if name == 'radii':
guess_radii(interface)
def weighted_close_match(string, dictionary):
# increase weight of the first letter
# this fixes problems with atom names like CH12
_wdict = {}
_dict = dictionary
_str = string[0] + string[0] + string
for key in _dict.keys():
_wdict[key[0] + key[0] + key] = _dict[key]
m = get_close_matches(_str, _wdict.keys(), n=1, cutoff=0.1)[0]
return m[2:]
def _guess_radii_from_masses(interface, group, guessed):
radii = np.copy(group.radii)
masses = group.masses
types = group.types
unique_masses = np.unique(masses)
# Let's not consider atoms with zero mass.
unique_masses = unique_masses[unique_masses > 0]
d = atoms_maps
for target_mass in unique_masses:
atype, _ = min(
d.items(),
key=lambda __entry: abs(__entry[1]['mass'] - target_mass))
try:
match_type = get_close_matches(
atype, interface.radii_dict.keys(), n=1, cutoff=0.1)
rd = interface.radii_dict
radii[masses == target_mass] = rd[match_type[0]]
for t in types[masses == target_mass]:
guessed.update({t: rd[match_type[0]]})
except BaseException:
pass
group.radii = radii
def _guess_radii_from_types(interface, group, guessed):
radii = np.copy(group.radii)
_dict = interface.radii_dict
for aname in np.unique(group.names):
try:
matching_type = weighted_close_match(aname, _dict)
radii[group.names == aname] = _dict[matching_type]
guessed.update({aname: _dict[matching_type]})
except (KeyError, IndexError):
try:
atype = group.types[group.names == aname][0]
matching_type = weighted_close_match(atype, _dict)
radii[group.types == atype] = _dict[matching_type]
guessed.update({atype: _dict[matching_type]})
except (KeyError, IndexError):
pass
group.radii = np.copy(radii)
def guess_radii(interface, group=None):
# NOTE: this code depends on the assumption that not-set radii,
# have the value np.nan (see _missing_attributes() ), so don't change it
# let's test first which information is available
guessed = {}
try:
interface.guessed_radii.update({})
except AttributeError:
interface.guessed_radii = {}
if group is None:
group = interface.universe.atoms
nans = np.isnan(group.radii)
# if no radius is nan, no need to guess anything
if not (np.any(np.equal(group.radii, None)) or np.any(nans)):
return
nones = np.equal(group.radii, None)
group.radii[nones] = np.array([np.nan] * len(group.radii[nones]))
group = group[np.isnan(group.radii)]
# We give precedence to atom names, then to types
try:
# this test failes wither if no 'type' property
# is available, or if it is, but the values are
# integers (like in lammps) and thus cannot be
# used to guess the type (in this code)
group.types.astype(int)
except AttributeError: # no types at all
pass # will try with masses
except ValueError: # types are there, and are not integers
_guess_radii_from_types(interface, group, guessed)
# We fill in the remaining ones using masses information
group = group[np.isnan(group.radii)]
if ('masses' in dir(group)):
_guess_radii_from_masses(interface, group, guessed)
interface.guessed_radii.update(guessed)
| Marcello-Sega/pytim | pytim/properties.py | Python | gpl-3.0 | 8,159 | [
"LAMMPS",
"MDAnalysis"
] | d2e2368495d7b24b27f098dcb081e900a24b220ba920451d23a2919f99013002 |
"""
:mod: DataManager
.. module: DataManager
:synopsis: DataManager links the functionalities of StorageElement and FileCatalog.
This module consists of DataManager and related classes.
"""
# # RSCID
__RCSID__ = "$Id$"
# # imports
from datetime import datetime, timedelta
import fnmatch
import os
import time
from types import StringTypes, ListType, DictType, StringType, TupleType
# # from DIRAC
import DIRAC
from DIRAC import S_OK, S_ERROR, gLogger, gConfig
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.ConfigurationSystem.Client.Helpers.Resources import getRegistrationProtocols, getThirdPartyProtocols
from DIRAC.AccountingSystem.Client.DataStoreClient import gDataStoreClient
from DIRAC.AccountingSystem.Client.Types.DataOperation import DataOperation
from DIRAC.Core.Utilities.Adler import fileAdler, compareAdler
from DIRAC.Core.Utilities.File import makeGuid, getSize
from DIRAC.Core.Utilities.List import randomize
from DIRAC.Core.Utilities.SiteSEMapping import getSEsForSite, isSameSiteSE, getSEsForCountry
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
from DIRAC.Resources.Storage.StorageElement import StorageElement
from DIRAC.Resources.Storage.StorageFactory import StorageFactory
from DIRAC.ResourceStatusSystem.Client.ResourceStatus import ResourceStatus
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.Core.Utilities.ReturnValues import returnSingleResult
class DataManager( object ):
"""
.. class:: DataManager
A DataManager is taking all the actions that impact or require the FileCatalog and the StorageElement together
"""
def __init__( self, catalogs = [], masterCatalogOnly = False, vo = False ):
""" c'tor
:param self: self reference
:param catalogs: the list of catalog in which to perform the operations. This
list will be ignored if masterCatalogOnly is set to True
:param masterCatalogOnly: if set to True, the operations will be performed only on the master catalog.
The catalogs parameter will be ignored.
:param vo: the VO for which the DataManager is created, get VO from the current proxy if not specified
"""
self.log = gLogger.getSubLogger( self.__class__.__name__, True )
self.vo = vo
catalogsToUse = FileCatalog( vo = self.vo ).getMasterCatalogNames()['Value'] if masterCatalogOnly else catalogs
self.fc = FileCatalog( catalogs = catalogsToUse, vo = self.vo )
self.accountingClient = None
self.registrationProtocol = getRegistrationProtocols()
self.thirdPartyProtocols = getThirdPartyProtocols()
self.resourceStatus = ResourceStatus()
self.ignoreMissingInFC = Operations( self.vo ).getValue( 'DataManagement/IgnoreMissingInFC', False )
self.useCatalogPFN = Operations( self.vo ).getValue( 'DataManagement/UseCatalogPFN', True )
def setAccountingClient( self, client ):
""" Set Accounting Client instance
"""
self.accountingClient = client
def __verifyWritePermission( self, path ):
""" Check if we have write permission to the given file (if exists) or its directory
"""
if type( path ) in StringTypes:
paths = [ path ]
else:
paths = path
res = self.fc.getPathPermissions( paths )
if not res['OK']:
return res
result = {'Successful':[], 'Failed':[]}
for path in paths:
if res['Value']['Successful'].get( path, {} ).get( 'Write', False ):
result['Successful'].append( path )
else:
result['Failed'].append( path )
return S_OK( result )
##########################################################################
#
# These are the bulk removal methods
#
def cleanLogicalDirectory( self, lfnDir ):
""" Clean the logical directory from the catalog and storage
"""
if type( lfnDir ) in StringTypes:
lfnDir = [ lfnDir ]
retDict = { "Successful" : {}, "Failed" : {} }
for folder in lfnDir:
res = self.__cleanDirectory( folder )
if not res['OK']:
self.log.debug( "Failed to clean directory.", "%s %s" % ( folder, res['Message'] ) )
retDict["Failed"][folder] = res['Message']
else:
self.log.debug( "Successfully removed directory.", folder )
retDict["Successful"][folder] = res['Value']
return S_OK( retDict )
def __cleanDirectory( self, folder ):
""" delete all files from directory :folder: in FileCatalog and StorageElement
:param self: self reference
:param str folder: directory name
"""
res = self.__verifyWritePermission( folder )
if folder not in res['Value']['Successful']:
errStr = "__cleanDirectory: Write access not permitted for this credential."
self.log.debug( errStr, folder )
return S_ERROR( errStr )
res = self.__getCatalogDirectoryContents( [ folder ] )
if not res['OK']:
return res
res = self.removeFile( res['Value'].keys() + [ '%s/dirac_directory' % folder ] )
if not res['OK']:
return res
for lfn, reason in res['Value']['Failed'].items():
gLogger.error( "Failed to remove file found in the catalog", "%s %s" % ( lfn, reason ) )
storageElements = gConfig.getValue( 'Resources/StorageElementGroups/SE_Cleaning_List', [] )
failed = False
for storageElement in sorted( storageElements ):
res = self.__removeStorageDirectory( folder, storageElement )
if not res['OK']:
failed = True
if failed:
return S_ERROR( "Failed to clean storage directory at all SEs" )
res = returnSingleResult( self.fc.removeDirectory( folder, recursive = True ) )
if not res['OK']:
return res
return S_OK()
def __removeStorageDirectory( self, directory, storageElement ):
""" delete SE directory
:param self: self reference
:param str directory: folder to be removed
:param str storageElement: DIRAC SE name
"""
se = StorageElement( storageElement, vo = self.vo )
res = returnSingleResult( se.exists( directory ) )
if not res['OK']:
self.log.debug( "Failed to obtain existance of directory", res['Message'] )
return res
exists = res['Value']
if not exists:
self.log.debug( "The directory %s does not exist at %s " % ( directory, storageElement ) )
return S_OK()
res = returnSingleResult( se.removeDirectory( directory, recursive = True ) )
if not res['OK']:
self.log.debug( "Failed to remove storage directory", res['Message'] )
return res
self.log.debug( "Successfully removed %d files from %s at %s" % ( res['Value']['FilesRemoved'],
directory,
storageElement ) )
return S_OK()
def __getCatalogDirectoryContents( self, directories ):
""" ls recursively all files in directories
:param self: self reference
:param list directories: folder names
"""
self.log.debug( 'Obtaining the catalog contents for %d directories:' % len( directories ) )
activeDirs = directories
allFiles = {}
while len( activeDirs ) > 0:
currentDir = activeDirs[0]
res = returnSingleResult( self.fc.listDirectory( currentDir ) )
activeDirs.remove( currentDir )
if not res['OK']:
self.log.debug( "Problem getting the %s directory content" % currentDir, res['Message'] )
else:
dirContents = res['Value']
activeDirs.extend( dirContents['SubDirs'] )
allFiles.update( dirContents['Files'] )
self.log.debug( "Found %d files" % len( allFiles ) )
return S_OK( allFiles )
def getReplicasFromDirectory( self, directory ):
""" get all replicas from a given directory
:param self: self reference
:param mixed directory: list of directories or one directory
"""
if type( directory ) in StringTypes:
directories = [directory]
else:
directories = directory
res = self.__getCatalogDirectoryContents( directories )
if not res['OK']:
return res
allReplicas = {}
for lfn, metadata in res['Value'].items():
allReplicas[lfn] = metadata['Replicas']
return S_OK( allReplicas )
def getFilesFromDirectory( self, directory, days = 0, wildcard = '*' ):
""" get all files from :directory: older than :days: days matching to :wildcard:
:param self: self reference
:param mixed directory: list of directories or directory name
:param int days: ctime days
:param str wildcard: pattern to match
"""
if type( directory ) in StringTypes:
directories = [directory]
else:
directories = directory
self.log.debug( "Obtaining the files older than %d days in %d directories:" % ( days, len( directories ) ) )
for folder in directories:
self.log.debug( folder )
activeDirs = directories
allFiles = []
while len( activeDirs ) > 0:
currentDir = activeDirs[0]
# We only need the metadata (verbose) if a limit date is given
res = returnSingleResult( self.fc.listDirectory( currentDir, verbose = ( days != 0 ) ) )
activeDirs.remove( currentDir )
if not res['OK']:
self.log.debug( "Error retrieving directory contents", "%s %s" % ( currentDir, res['Message'] ) )
else:
dirContents = res['Value']
subdirs = dirContents['SubDirs']
files = dirContents['Files']
self.log.debug( "%s: %d files, %d sub-directories" % ( currentDir, len( files ), len( subdirs ) ) )
for subdir in subdirs:
if ( not days ) or self.__isOlderThan( subdirs[subdir]['CreationDate'], days ):
if subdir[0] != '/':
subdir = currentDir + '/' + subdir
activeDirs.append( subdir )
for fileName in files:
fileInfo = files[fileName]
fileInfo = fileInfo.get( 'Metadata', fileInfo )
if ( not days ) or not fileInfo.get( 'CreationDate' ) or self.__isOlderThan( fileInfo['CreationDate'], days ):
if wildcard == '*' or fnmatch.fnmatch( fileName, wildcard ):
fileName = fileInfo.get( 'LFN', fileName )
allFiles.append( fileName )
return S_OK( allFiles )
def __isOlderThan( self, stringTime, days ):
timeDelta = timedelta( days = days )
maxCTime = datetime.utcnow() - timeDelta
# st = time.strptime( stringTime, "%a %b %d %H:%M:%S %Y" )
# cTimeStruct = datetime( st[0], st[1], st[2], st[3], st[4], st[5], st[6], None )
cTimeStruct = stringTime
if cTimeStruct < maxCTime:
return True
return False
##########################################################################
#
# These are the data transfer methods
#
def getFile( self, lfn, destinationDir = '' ):
""" Get a local copy of a LFN from Storage Elements.
'lfn' is the logical file name for the desired file
"""
if type( lfn ) == ListType:
lfns = lfn
elif type( lfn ) == StringType:
lfns = [lfn]
else:
errStr = "getFile: Supplied lfn must be string or list of strings."
self.log.debug( errStr )
return S_ERROR( errStr )
self.log.debug( "getFile: Attempting to get %s files." % len( lfns ) )
res = self.getActiveReplicas( lfns )
if not res['OK']:
return res
failed = res['Value']['Failed']
lfnReplicas = res['Value']['Successful']
res = self.fc.getFileMetadata( lfnReplicas.keys() )
if not res['OK']:
return res
failed.update( res['Value']['Failed'] )
fileMetadata = res['Value']['Successful']
successful = {}
for lfn in fileMetadata:
res = self.__getFile( lfn, lfnReplicas[lfn], fileMetadata[lfn], destinationDir )
if not res['OK']:
failed[lfn] = res['Message']
else:
successful[lfn] = res['Value']
gDataStoreClient.commit()
return S_OK( { 'Successful': successful, 'Failed' : failed } )
def __getFile( self, lfn, replicas, metadata, destinationDir ):
if not replicas:
self.log.debug( "No accessible replicas found" )
return S_ERROR( "No accessible replicas found" )
# Determine the best replicas
res = self._getSEProximity( replicas.keys() )
if not res['OK']:
return res
for storageElementName in res['Value']:
se = StorageElement( storageElementName, vo = self.vo )
oDataOperation = self.__initialiseAccountingObject( 'getFile', storageElementName, 1 )
oDataOperation.setStartTime()
startTime = time.time()
res = returnSingleResult( se.getFile( lfn, localPath = os.path.realpath( destinationDir ) ) )
getTime = time.time() - startTime
oDataOperation.setValueByKey( 'TransferTime', getTime )
if not res['OK']:
self.log.debug( "Failed to get %s from %s" % ( lfn, storageElementName ), res['Message'] )
oDataOperation.setValueByKey( 'TransferOK', 0 )
oDataOperation.setValueByKey( 'FinalStatus', 'Failed' )
oDataOperation.setEndTime()
else:
oDataOperation.setValueByKey( 'TransferSize', res['Value'] )
localFile = os.path.realpath( os.path.join( destinationDir, os.path.basename( lfn ) ) )
localAdler = fileAdler( localFile )
if ( metadata['Size'] != res['Value'] ):
oDataOperation.setValueByKey( 'FinalStatus', 'FinishedDirty' )
self.log.debug( "Size of downloaded file (%d) does not match catalog (%d)" % ( res['Value'],
metadata['Size'] ) )
elif ( metadata['Checksum'] ) and ( not compareAdler( metadata['Checksum'], localAdler ) ):
oDataOperation.setValueByKey( 'FinalStatus', 'FinishedDirty' )
self.log.debug( "Checksum of downloaded file (%s) does not match catalog (%s)" % ( localAdler,
metadata['Checksum'] ) )
else:
oDataOperation.setEndTime()
gDataStoreClient.addRegister( oDataOperation )
return S_OK( localFile )
gDataStoreClient.addRegister( oDataOperation )
self.log.debug( "getFile: Failed to get local copy from any replicas.", lfn )
return S_ERROR( "DataManager.getFile: Failed to get local copy from any replicas." )
def _getSEProximity( self, ses ):
""" get SE proximity """
siteName = DIRAC.siteName()
localSEs = [se for se in getSEsForSite( siteName )['Value'] if se in ses]
countrySEs = []
countryCode = str( siteName ).split( '.' )[-1]
res = getSEsForCountry( countryCode )
if res['OK']:
countrySEs = [se for se in res['Value'] if se in ses and se not in localSEs]
sortedSEs = randomize( localSEs ) + randomize( countrySEs )
sortedSEs += randomize( [se for se in ses if se not in sortedSEs] )
return S_OK( sortedSEs )
def putAndRegister( self, lfn, fileName, diracSE, guid = None, path = None, checksum = None ):
""" Put a local file to a Storage Element and register in the File Catalogues
'lfn' is the file LFN
'file' is the full path to the local file
'diracSE' is the Storage Element to which to put the file
'guid' is the guid with which the file is to be registered (if not provided will be generated)
'path' is the path on the storage where the file will be put (if not provided the LFN will be used)
"""
# ancestors = ancestors if ancestors else list(
folder = os.path.dirname( lfn )
res = self.__verifyWritePermission( folder )
if folder not in res['Value']['Successful']:
errStr = "putAndRegister: Write access not permitted for this credential."
self.log.debug( errStr, lfn )
return S_ERROR( errStr )
# Check that the local file exists
if not os.path.exists( fileName ):
errStr = "putAndRegister: Supplied file does not exist."
self.log.debug( errStr, fileName )
return S_ERROR( errStr )
# If the path is not provided then use the LFN path
if not path:
path = os.path.dirname( lfn )
# Obtain the size of the local file
size = getSize( fileName )
if size == 0:
errStr = "putAndRegister: Supplied file is zero size."
self.log.debug( errStr, fileName )
return S_ERROR( errStr )
# If the GUID is not given, generate it here
if not guid:
guid = makeGuid( fileName )
if not checksum:
self.log.debug( "putAndRegister: Checksum information not provided. Calculating adler32." )
checksum = fileAdler( fileName )
self.log.debug( "putAndRegister: Checksum calculated to be %s." % checksum )
res = self.fc.exists( {lfn:guid} )
if not res['OK']:
errStr = "putAndRegister: Completey failed to determine existence of destination LFN."
self.log.debug( errStr, lfn )
return res
if lfn not in res['Value']['Successful']:
errStr = "putAndRegister: Failed to determine existence of destination LFN."
self.log.debug( errStr, lfn )
return S_ERROR( errStr )
if res['Value']['Successful'][lfn]:
if res['Value']['Successful'][lfn] == lfn:
errStr = "putAndRegister: The supplied LFN already exists in the File Catalog."
self.log.debug( errStr, lfn )
else:
errStr = "putAndRegister: This file GUID already exists for another file. " \
"Please remove it and try again."
self.log.debug( errStr, res['Value']['Successful'][lfn] )
return S_ERROR( "%s %s" % ( errStr, res['Value']['Successful'][lfn] ) )
##########################################################
# Instantiate the destination storage element here.
storageElement = StorageElement( diracSE, vo = self.vo )
res = storageElement.isValid()
if not res['OK']:
errStr = "putAndRegister: The storage element is not currently valid."
self.log.debug( errStr, "%s %s" % ( diracSE, res['Message'] ) )
return S_ERROR( errStr )
destinationSE = storageElement.getStorageElementName()['Value']
res = returnSingleResult( storageElement.getURL( lfn ) )
if not res['OK']:
errStr = "putAndRegister: Failed to generate destination PFN."
self.log.debug( errStr, res['Message'] )
return S_ERROR( errStr )
destUrl = res['Value']
fileDict = {lfn:fileName}
successful = {}
failed = {}
##########################################################
# Perform the put here.
oDataOperation = self.__initialiseAccountingObject( 'putAndRegister', diracSE, 1 )
oDataOperation.setStartTime()
oDataOperation.setValueByKey( 'TransferSize', size )
startTime = time.time()
res = returnSingleResult( storageElement.putFile( fileDict ) )
putTime = time.time() - startTime
oDataOperation.setValueByKey( 'TransferTime', putTime )
if not res['OK']:
errStr = "putAndRegister: Failed to put file to Storage Element."
oDataOperation.setValueByKey( 'TransferOK', 0 )
oDataOperation.setValueByKey( 'FinalStatus', 'Failed' )
oDataOperation.setEndTime()
gDataStoreClient.addRegister( oDataOperation )
startTime = time.time()
gDataStoreClient.commit()
self.log.debug( 'putAndRegister: Sending accounting took %.1f seconds' % ( time.time() - startTime ) )
self.log.debug( errStr, "%s: %s" % ( fileName, res['Message'] ) )
return S_ERROR( "%s %s" % ( errStr, res['Message'] ) )
successful[lfn] = {'put': putTime}
###########################################################
# Perform the registration here
oDataOperation.setValueByKey( 'RegistrationTotal', 1 )
fileTuple = ( lfn, destUrl, size, destinationSE, guid, checksum )
registerDict = {'LFN':lfn, 'PFN':destUrl, 'Size':size, 'TargetSE':destinationSE, 'GUID':guid, 'Addler':checksum}
startTime = time.time()
res = self.registerFile( fileTuple )
registerTime = time.time() - startTime
oDataOperation.setValueByKey( 'RegistrationTime', registerTime )
if not res['OK']:
errStr = "putAndRegister: Completely failed to register file."
self.log.debug( errStr, res['Message'] )
failed[lfn] = { 'register' : registerDict }
oDataOperation.setValueByKey( 'FinalStatus', 'Failed' )
elif lfn in res['Value']['Failed']:
errStr = "putAndRegister: Failed to register file."
self.log.debug( errStr, "%s %s" % ( lfn, res['Value']['Failed'][lfn] ) )
oDataOperation.setValueByKey( 'FinalStatus', 'Failed' )
failed[lfn] = { 'register' : registerDict }
else:
successful[lfn]['register'] = registerTime
oDataOperation.setValueByKey( 'RegistrationOK', 1 )
oDataOperation.setEndTime()
gDataStoreClient.addRegister( oDataOperation )
startTime = time.time()
gDataStoreClient.commit()
self.log.debug( 'putAndRegister: Sending accounting took %.1f seconds' % ( time.time() - startTime ) )
return S_OK( {'Successful': successful, 'Failed': failed } )
def replicateAndRegister( self, lfn, destSE, sourceSE = '', destPath = '', localCache = '' , catalog = '' ):
""" Replicate a LFN to a destination SE and register the replica.
'lfn' is the LFN to be replicated
'destSE' is the Storage Element the file should be replicated to
'sourceSE' is the source for the file replication (where not specified all replicas will be attempted)
'destPath' is the path on the destination storage element, if to be different from LHCb convention
'localCache' is the local file system location to be used as a temporary cache
"""
successful = {}
failed = {}
self.log.debug( "replicateAndRegister: Attempting to replicate %s to %s." % ( lfn, destSE ) )
startReplication = time.time()
res = self.__replicate( lfn, destSE, sourceSE, destPath, localCache )
replicationTime = time.time() - startReplication
if not res['OK']:
errStr = "DataManager.replicateAndRegister: Completely failed to replicate file."
self.log.debug( errStr, res['Message'] )
return S_ERROR( errStr )
if not res['Value']:
# The file was already present at the destination SE
self.log.debug( "replicateAndRegister: %s already present at %s." % ( lfn, destSE ) )
successful[lfn] = { 'replicate' : 0, 'register' : 0 }
resDict = { 'Successful' : successful, 'Failed' : failed }
return S_OK( resDict )
successful[lfn] = { 'replicate' : replicationTime }
destPfn = res['Value']['DestPfn']
destSE = res['Value']['DestSE']
self.log.debug( "replicateAndRegister: Attempting to register %s at %s." % ( destPfn, destSE ) )
replicaTuple = ( lfn, destPfn, destSE )
startRegistration = time.time()
res = self.registerReplica( replicaTuple, catalog = catalog )
registrationTime = time.time() - startRegistration
if not res['OK']:
# Need to return to the client that the file was replicated but not registered
errStr = "replicateAndRegister: Completely failed to register replica."
self.log.debug( errStr, res['Message'] )
failed[lfn] = { 'Registration' : { 'LFN' : lfn, 'TargetSE' : destSE, 'PFN' : destPfn } }
else:
if lfn in res['Value']['Successful']:
self.log.debug( "replicateAndRegister: Successfully registered replica." )
successful[lfn]['register'] = registrationTime
else:
errStr = "replicateAndRegister: Failed to register replica."
self.log.debug( errStr, res['Value']['Failed'][lfn] )
failed[lfn] = { 'Registration' : { 'LFN' : lfn, 'TargetSE' : destSE, 'PFN' : destPfn } }
return S_OK( {'Successful': successful, 'Failed': failed} )
def replicate( self, lfn, destSE, sourceSE = '', destPath = '', localCache = '' ):
""" Replicate a LFN to a destination SE and register the replica.
'lfn' is the LFN to be replicated
'destSE' is the Storage Element the file should be replicated to
'sourceSE' is the source for the file replication (where not specified all replicas will be attempted)
'destPath' is the path on the destination storage element, if to be different from LHCb convention
'localCache' is the local file system location to be used as a temporary cache
"""
self.log.debug( "replicate: Attempting to replicate %s to %s." % ( lfn, destSE ) )
res = self.__replicate( lfn, destSE, sourceSE, destPath, localCache )
if not res['OK']:
errStr = "replicate: Replication failed."
self.log.debug( errStr, "%s %s" % ( lfn, destSE ) )
return res
if not res['Value']:
# The file was already present at the destination SE
self.log.debug( "replicate: %s already present at %s." % ( lfn, destSE ) )
return res
return S_OK( lfn )
def __replicate( self, lfn, destSEName, sourceSEName = None, destPath = None, localCache = None ):
""" Replicate a LFN to a destination SE.
'lfn' is the LFN to be replicated
'destSE' is the Storage Element the file should be replicated to
'sourceSE' is the source for the file replication (where not specified all replicas will be attempted)
'destPath' is the path on the destination storage element, if to be different from LHCb convention
'localCache' if cannot do third party transfer, we do get and put through this local directory
"""
log = self.log.getSubLogger( '__replicate', True )
###########################################################
# Check that we have write permissions to this directory.
res = self.__verifyWritePermission( lfn )
if lfn not in res['Value']['Successful']:
errStr = "__replicate: Write access not permitted for this credential."
log.debug( errStr, lfn )
return S_ERROR( errStr )
# Check that the destination storage element is sane and resolve its name
log.debug( "Verifying destination StorageElement validity (%s)." % ( destSEName ) )
destStorageElement = StorageElement( destSEName, vo = self.vo )
res = destStorageElement.isValid()
if not res['OK']:
errStr = "The storage element is not currently valid."
log.debug( errStr, "%s %s" % ( destSEName, res['Message'] ) )
return S_ERROR( errStr )
# Get the real name of the SE
destSEName = destStorageElement.getStorageElementName()['Value']
###########################################################
# Check whether the destination storage element is banned
log.verbose( "Determining whether %s ( destination ) is Write-banned." % destSEName )
if not self.__SEActive( destSEName ).get( 'Value', {} ).get( 'Write' ):
infoStr = "Supplied destination Storage Element is not currently allowed for Write."
log.debug( infoStr, destSEName )
return S_ERROR( infoStr )
# Get the LFN replicas from the file catalog
log.debug( "Attempting to obtain replicas for %s." % ( lfn ) )
res = returnSingleResult( self.getReplicas( lfn ) )
if not res[ 'OK' ]:
errStr = "%Failed to get replicas for LFN."
log.debug( errStr, "%s %s" % ( lfn, res['Message'] ) )
return S_ERROR( "%s %s" % ( errStr, res['Message'] ) )
log.debug( "Successfully obtained replicas for LFN." )
lfnReplicas = res['Value']
###########################################################
# If the file catalog size is zero fail the transfer
log.debug( "Attempting to obtain size for %s." % lfn )
res = returnSingleResult( self.fc.getFileSize( lfn ) )
if not res['OK']:
errStr = "Failed to get size for LFN."
log.debug( errStr, "%s %s" % ( lfn, res['Message'] ) )
return S_ERROR( "%s %s" % ( errStr, res['Message'] ) )
catalogSize = res['Value']
if catalogSize == 0:
errStr = "Registered file size is 0."
log.debug( errStr, lfn )
return S_ERROR( errStr )
log.debug( "File size determined to be %s." % catalogSize )
###########################################################
# If the LFN already exists at the destination we have nothing to do
if destSEName in lfnReplicas:
log.debug( "__replicate: LFN is already registered at %s." % destSEName )
return S_OK()
###########################################################
# If the source is specified, check that it is in the replicas
if sourceSEName:
log.debug( "Determining whether source Storage Element specified is sane." )
if sourceSEName not in lfnReplicas:
errStr = "LFN does not exist at supplied source SE."
log.error( errStr, "%s %s" % ( lfn, sourceSEName ) )
return S_ERROR( errStr )
# If sourceSE is specified, then we consider this one only, otherwise
# we consider them all
possibleSourceSEs = [sourceSEName] if sourceSEName else lfnReplicas.keys()
# We sort the possibileSourceSEs with the SEs that are on the same site than the destination first
# reverse = True because True > False
possibleSourceSEs = sorted( possibleSourceSEs,
key = lambda x : isSameSiteSE( x, destSEName ).get( 'Value', False ),
reverse = True )
# In case we manage to find SEs that would work as a source, but we can't negotiate a protocol
# we will do a get and put using one of this sane SE
possibleSEsForIntermediateTransfer = []
# Take into account the destination path
if destPath:
destPath = '%s/%s' % ( destPath, os.path.basename( lfn ) )
else:
destPath = lfn
for candidateSEName in possibleSourceSEs:
log.debug( "Consider %s as a source" % candidateSEName )
# Check that the candidate is active
if not self.__SEActive( candidateSEName ).get( 'Value', {} ).get( 'Read' ):
log.debug( "%s is currently not allowed as a source." % candidateSEName )
continue
else:
log.debug( "%s is available for use." % candidateSEName )
candidateSE = StorageElement( candidateSEName, vo = self.vo )
# Check that the SE is valid
res = candidateSE.isValid()
if not res['OK']:
log.debug( "The storage element is not currently valid.", "%s %s" % ( candidateSEName, res['Message'] ) )
continue
else:
log.debug( "The storage is currently valid", candidateSEName )
# Check that the file size corresponds to the one in the FC
res = returnSingleResult( candidateSE.getFileSize( lfn ) )
if not res['OK']:
log.debug( "could not get fileSize on %s" % candidateSEName, res['Message'] )
continue
seFileSize = res['Value']
if seFileSize != catalogSize:
log.debug( "Catalog size and physical file size mismatch.", "%s %s" % ( catalogSize, seFileSize ) )
continue
else:
log.debug( "Catalog size and physical size match" )
res = destStorageElement.negociateProtocolWithOtherSE( candidateSE, protocols = self.thirdPartyProtocols )
if not res['OK']:
log.debug( "Error negotiating replication protocol", res['Message'] )
continue
replicationProtocol = res['Value']
if not replicationProtocol:
possibleSEsForIntermediateTransfer.append( candidateSE )
log.debug( "No protocol suitable for replication found" )
continue
log.debug( 'Found common protocols', replicationProtocol )
# THIS WOULD NOT WORK IF PROTO == file !!
# Compare the urls to make sure we are not overwriting
res = returnSingleResult( candidateSE.getURL( lfn, protocol = replicationProtocol ) )
if not res['OK']:
log.debug( "Cannot get sourceURL", res['Message'] )
continue
sourceURL = res['Value']
res = returnSingleResult( destStorageElement.getURL( destPath, protocol = replicationProtocol ) )
if not res['OK']:
log.debug( "Cannot get destURL", res['Message'] )
continue
destURL = res['Value']
if sourceURL == destURL:
log.debug( "Same source and destination, give up" )
continue
# Attempt the transfer
res = returnSingleResult( destStorageElement.replicateFile( {destPath:sourceURL}, sourceSize = catalogSize ) )
if not res['OK']:
log.debug( "Replication failed", "%s from %s to %s." % ( lfn, candidateSEName, destSEName ) )
continue
log.debug( "Replication successful.", res['value'] )
res = returnSingleResult( destStorageElement.getURL(destPath, protocol = self.registrationProtocol))
if not res['OK']:
log.debug( 'Error getting the registration URL', res['Message'] )
# it's maybe pointless to try the other candidateSEs...
continue
registrationURL = res['Value']
return S_OK( {'DestSE':destSEName, 'DestPfn':registrationURL} )
# If we are here, that means that we could not make a third party transfer.
# Check if we have some sane SEs from which we could do a get/put
localDir = os.path.realpath( localCache if localCache else '.' )
localFile = os.path.join( localDir, os.path.basename( lfn ) )
log.debug( "Will try intermediate transfer from %s sources" % len( possibleSEsForIntermediateTransfer ) )
for candidateSE in possibleSEsForIntermediateTransfer:
res = returnSingleResult( candidateSE.getFile( lfn, localPath = localDir ) )
if not res['OK']:
log.debug( 'Error getting the file from %s' % candidateSE.name, res['Message'] )
continue
res = returnSingleResult( destStorageElement.putFile( {destPath:localFile}, sourceSize = catalogSize ) )
if not res['OK']:
log.debug( 'Error putting file coming from %s' % candidateSE.name, res['Message'] )
# if the put is the problem, it's maybe pointless to try the other candidateSEs...
continue
# get URL with default protocol to return it
res = returnSingleResult( destStorageElement.getURL( destPath, protocol = self.registrationProtocol ) )
if not res['OK']:
log.debug( 'Error getting the registration URL', res['Message'] )
# it's maybe pointless to try the other candidateSEs...
continue
registrationURL = res['Value']
return S_OK( {'DestSE':destSEName, 'DestPfn':registrationURL} )
# If here, we are really doomed
errStr = "Failed to replicate with all sources."
log.debug( errStr, lfn )
return S_ERROR( errStr )
###################################################################
#
# These are the file catalog write methods
#
def registerFile( self, fileTuple, catalog = '' ):
""" Register a file or a list of files
:param self: self reference
:param tuple fileTuple: (lfn, physicalFile, fileSize, storageElementName, fileGuid, checksum )
:param str catalog: catalog name
"""
if type( fileTuple ) == ListType:
fileTuples = fileTuple
elif type( fileTuple ) == TupleType:
fileTuples = [fileTuple]
else:
errStr = "registerFile: Supplied file info must be tuple of list of tuples."
self.log.debug( errStr )
return S_ERROR( errStr )
self.log.debug( "registerFile: Attempting to register %s files." % len( fileTuples ) )
res = self.__registerFile( fileTuples, catalog )
if not res['OK']:
errStr = "registerFile: Completely failed to register files."
self.log.debug( errStr, res['Message'] )
return res
# Remove Failed LFNs if they are in success
success = res['Value']['Successful']
failed = res['Value']['Failed']
for lfn in success:
failed.pop( lfn, None )
return res
def __registerFile( self, fileTuples, catalog ):
""" register file to cataloge """
fileDict = {}
for lfn, physicalFile, fileSize, storageElementName, fileGuid, checksum in fileTuples:
fileDict[lfn] = {'PFN':physicalFile, 'Size':fileSize, 'SE':storageElementName, 'GUID':fileGuid, 'Checksum':checksum}
if catalog:
fileCatalog = FileCatalog( catalog, vo = self.vo )
if not fileCatalog.isOK():
return S_ERROR( "Can't get FileCatalog %s" % catalog )
else:
fileCatalog = self.fc
res = fileCatalog.addFile( fileDict )
if not res['OK']:
errStr = "__registerFile: Completely failed to register files."
self.log.debug( errStr, res['Message'] )
return res
def registerReplica( self, replicaTuple, catalog = '' ):
""" Register a replica (or list of) supplied in the replicaTuples.
'replicaTuple' is a tuple or list of tuples of the form (lfn,pfn,se)
"""
if type( replicaTuple ) == ListType:
replicaTuples = replicaTuple
elif type( replicaTuple ) == TupleType:
replicaTuples = [ replicaTuple ]
else:
errStr = "registerReplica: Supplied file info must be tuple of list of tuples."
self.log.debug( errStr )
return S_ERROR( errStr )
self.log.debug( "registerReplica: Attempting to register %s replicas." % len( replicaTuples ) )
res = self.__registerReplica( replicaTuples, catalog )
if not res['OK']:
errStr = "registerReplica: Completely failed to register replicas."
self.log.debug( errStr, res['Message'] )
return res
# Remove Failed LFNs if they are in success
success = res['Value']['Successful']
failed = res['Value']['Failed']
for lfn in success:
failed.pop( lfn, None )
return res
def __registerReplica( self, replicaTuples, catalog ):
""" register replica to catalogue """
seDict = {}
for lfn, url, storageElementName in replicaTuples:
seDict.setdefault( storageElementName, [] ).append( ( lfn, url ) )
failed = {}
replicaTuples = []
for storageElementName, replicaTuple in seDict.items():
destStorageElement = StorageElement( storageElementName, vo = self.vo )
res = destStorageElement.isValid()
if not res['OK']:
errStr = "__registerReplica: The storage element is not currently valid."
self.log.debug( errStr, "%s %s" % ( storageElementName, res['Message'] ) )
for lfn, url in replicaTuple:
failed[lfn] = errStr
else:
storageElementName = destStorageElement.getStorageElementName()['Value']
for lfn, url in replicaTuple:
res = returnSingleResult( destStorageElement.getURL( lfn, protocol = self.registrationProtocol ) )
if not res['OK']:
failed[lfn] = res['Message']
else:
replicaTuple = ( lfn, res['Value'], storageElementName, False )
replicaTuples.append( replicaTuple )
self.log.debug( "__registerReplica: Successfully resolved %s replicas for registration." % len( replicaTuples ) )
# HACK!
replicaDict = {}
for lfn, url, se, _master in replicaTuples:
replicaDict[lfn] = {'SE':se, 'PFN':url}
if catalog:
fileCatalog = FileCatalog( catalog, vo = self.vo )
res = fileCatalog.addReplica( replicaDict )
else:
res = self.fc.addReplica( replicaDict )
if not res['OK']:
errStr = "__registerReplica: Completely failed to register replicas."
self.log.debug( errStr, res['Message'] )
return S_ERROR( errStr )
failed.update( res['Value']['Failed'] )
successful = res['Value']['Successful']
resDict = {'Successful':successful, 'Failed':failed}
return S_OK( resDict )
###################################################################
#
# These are the removal methods for physical and catalogue removal
#
def removeFile( self, lfn, force = None ):
""" Remove the file (all replicas) from Storage Elements and file catalogue
'lfn' is the file to be removed
"""
if force == None:
force = self.ignoreMissingInFC
if type( lfn ) == ListType:
lfns = lfn
elif type( lfn ) == StringType:
lfns = [lfn]
else:
errStr = "removeFile: Supplied lfns must be string or list of strings."
self.log.debug( errStr )
return S_ERROR( errStr )
# First check if the file exists in the FC
res = self.fc.exists( lfns )
if not res['OK']:
return res
success = res['Value']['Successful']
lfns = [lfn for lfn in success if success[lfn] ]
if force:
# Files that don't exist are removed successfully
successful = dict.fromkeys( [lfn for lfn in success if not success[lfn] ], True )
failed = {}
else:
successful = {}
failed = dict.fromkeys( [lfn for lfn in success if not success[lfn] ], 'No such file or directory' )
# Check that we have write permissions to this directory and to the file.
if lfns:
dir4lfns = {}
for lfn in lfns:
dir4lfns.setdefault( os.path.dirname( lfn ), [] ).append( lfn )
res = self.__verifyWritePermission( dir4lfns.keys() )
if res['Value']['Failed']:
errStr = "removeFile: Write access not permitted for this credential."
self.log.debug( errStr, 'for %d files' % len( res['Value']['Failed'] ) )
failed.update( dict.fromkeys( [lfn for dirName in res['Value']['Failed'] for lfn in dir4lfns[dirName]], errStr ) )
lfns = list( set( [lfn for dirName in res['Value']['Successful'] for lfn in dir4lfns[dirName] ] ) )
if lfns:
self.log.debug( "removeFile: Attempting to remove %s files from Storage and Catalogue. Get replicas first" % len( lfns ) )
res = self.fc.getReplicas( lfns, True )
if not res['OK']:
errStr = "DataManager.removeFile: Completely failed to get replicas for lfns."
self.log.debug( errStr, res['Message'] )
return res
lfnDict = res['Value']['Successful']
for lfn, reason in res['Value'].get( 'Failed', {} ).items():
# Ignore files missing in FC if force is set
if reason == 'No such file or directory' and force:
successful[lfn] = True
elif reason == 'File has zero replicas':
lfnDict[lfn] = {}
else:
failed[lfn] = reason
res = self.__removeFile( lfnDict )
if not res['OK']:
errStr = "removeFile: Completely failed to remove files."
self.log.debug( errStr, res['Message'] )
return res
failed.update( res['Value']['Failed'] )
successful.update( res['Value']['Successful'] )
resDict = {'Successful':successful, 'Failed':failed}
gDataStoreClient.commit()
return S_OK( resDict )
def __removeFile( self, lfnDict ):
""" remove file """
storageElementDict = {}
# # sorted and reversed
for lfn, repDict in sorted( lfnDict.items(), reverse = True ):
for se, pfn in repDict.items():
storageElementDict.setdefault( se, [] ).append( lfn )
failed = {}
successful = {}
for storageElementName in sorted( storageElementDict ):
lfns = storageElementDict[storageElementName]
res = self.__removeReplica( storageElementName, lfns, replicaDict = lfnDict )
if not res['OK']:
errStr = res['Message']
for lfn in lfns:
failed[lfn] = failed.setdefault( lfn, '' ) + " %s" % errStr
else:
for lfn, errStr in res['Value']['Failed'].items():
failed[lfn] = failed.setdefault( lfn, '' ) + " %s" % errStr
completelyRemovedFiles = []
for lfn in [lfn for lfn in lfnDict if lfn not in failed]:
completelyRemovedFiles.append( lfn )
if completelyRemovedFiles:
res = self.fc.removeFile( completelyRemovedFiles )
if not res['OK']:
for lfn in completelyRemovedFiles:
failed[lfn] = "Failed to remove file from the catalog: %s" % res['Message']
else:
failed.update( res['Value']['Failed'] )
successful = res['Value']['Successful']
return S_OK( { 'Successful' : successful, 'Failed' : failed } )
def removeReplica( self, storageElementName, lfn ):
""" Remove replica at the supplied Storage Element from Storage Element then file catalogue
'storageElementName' is the storage where the file is to be removed
'lfn' is the file to be removed
"""
if type( lfn ) == ListType:
lfns = lfn
elif type( lfn ) == StringType:
lfns = [lfn]
else:
errStr = "removeReplica: Supplied lfns must be string or list of strings."
self.log.debug( errStr )
return S_ERROR( errStr )
successful = {}
failed = {}
# Check that we have write permissions to this file.
res = self.__verifyWritePermission( lfns )
if res['Value']['Failed']:
errStr = "removeReplica: Write access not permitted for this credential."
self.log.debug( errStr, 'for %d files' % len( res['Value']['Failed'] ) )
failed.update( dict.fromkeys( res['Value']['Failed'], errStr ) )
lfns = [lfn for lfn in lfns if lfn not in res['Value']['Failed']]
self.log.debug( "removeReplica: Will remove catalogue entry for %s lfns at %s." % ( len( lfns ),
storageElementName ) )
res = self.fc.getReplicas( lfns, True )
if not res['OK']:
errStr = "removeReplica: Completely failed to get replicas for lfns."
self.log.debug( errStr, res['Message'] )
return res
failed.update( res['Value']['Failed'] )
replicaDict = res['Value']['Successful']
lfnsToRemove = []
for lfn, repDict in res['Value']['Successful'].items():
if storageElementName not in repDict:
# The file doesn't exist at the storage element so don't have to remove it
successful[lfn] = True
elif len( repDict ) == 1:
# The file has only a single replica so don't remove
self.log.debug( "The replica you are trying to remove is the only one.", "%s @ %s" % ( lfn,
storageElementName ) )
failed[lfn] = "Failed to remove sole replica"
else:
lfnsToRemove.append( lfn )
if not lfnsToRemove:
return S_OK( { 'Successful' : successful, 'Failed' : failed } )
res = self.__removeReplica( storageElementName, lfnsToRemove, replicaDict = replicaDict )
if not res['OK']:
return res
failed.update( res['Value']['Failed'] )
successful.update( res['Value']['Successful'] )
gDataStoreClient.commit()
return S_OK( { 'Successful' : successful, 'Failed' : failed } )
def __removeReplica( self, storageElementName, lfns, replicaDict = None ):
""" remove replica
Remove the replica from the storageElement, and then from the catalog
:param storageElementName : The name of the storage Element
:param lfns list of lfn we want to remove
:param replicaDict : cache of fc.getReplicas(lfns) : { lfn { se : catalog url } }
"""
failed = {}
successful = {}
replicaDict = replicaDict if replicaDict else {}
lfnsToRemove = []
for lfn in lfns:
res = self.__verifyWritePermission( lfn )
if lfn not in res['Value']['Successful']:
errStr = "__removeReplica: Write access not permitted for this credential."
self.log.debug( errStr, lfn )
failed[lfn] = errStr
else:
lfnsToRemove.append( lfn )
res = self.__removePhysicalReplica( storageElementName, lfnsToRemove, replicaDict = replicaDict )
if not res['OK']:
errStr = "__removeReplica: Failed to remove physical replicas."
self.log.debug( errStr, res['Message'] )
return S_ERROR( res['Message'] )
failed.update( dict( [( lfn, error ) for lfn, error in res['Value']['Failed'].items()] ) )
# Here we use the FC PFN...
replicaTuples = [( lfn, replicaDict[lfn][storageElementName], storageElementName ) for lfn in res['Value']['Successful']]
if not replicaTuples:
return S_OK( { 'Successful' : successful, 'Failed' : failed } )
res = self.__removeCatalogReplica( replicaTuples )
if not res['OK']:
errStr = "__removeReplica: Completely failed to remove physical files."
self.log.debug( errStr, res['Message'] )
failed.update( dict.fromkeys( [lfn for lfn, _pfn, _se in replicaTuples if lfn not in failed], res['Message'] ) )
successful = {}
else:
failed.update( res['Value']['Failed'] )
successful = res['Value']['Successful']
return S_OK( { 'Successful' : successful, 'Failed' : failed } )
def removeReplicaFromCatalog( self, storageElementName, lfn ):
""" remove :lfn: replica from :storageElementName: SE
:param self: self reference
:param str storageElementName: SE name
:param mixed lfn: a single LFN or list of LFNs
"""
# Remove replica from the file catalog 'lfn' are the file
# to be removed 'storageElementName' is the storage where the file is to be removed
if type( lfn ) == ListType:
lfns = lfn
elif type( lfn ) == StringType:
lfns = [lfn]
else:
errStr = "removeReplicaFromCatalog: Supplied lfns must be string or list of strings."
self.log.debug( errStr )
return S_ERROR( errStr )
self.log.debug( "removeReplicaFromCatalog: Will remove catalogue entry for %s lfns at %s." % \
( len( lfns ), storageElementName ) )
res = self.fc.getReplicas( lfns, allStatus = True )
if not res['OK']:
errStr = "removeReplicaFromCatalog: Completely failed to get replicas for lfns."
self.log.debug( errStr, res['Message'] )
return res
failed = {}
successful = {}
for lfn, reason in res['Value']['Failed'].items():
if reason in ( 'No such file or directory', 'File has zero replicas' ):
successful[lfn] = True
else:
failed[lfn] = reason
replicaTuples = []
for lfn, repDict in res['Value']['Successful'].items():
if storageElementName not in repDict:
# The file doesn't exist at the storage element so don't have to remove it
successful[lfn] = True
else:
replicaTuples.append( ( lfn, repDict[storageElementName], storageElementName ) )
self.log.debug( "removeReplicaFromCatalog: Resolved %s pfns for catalog removal at %s." % ( len( replicaTuples ),
storageElementName ) )
res = self.__removeCatalogReplica( replicaTuples )
failed.update( res['Value']['Failed'] )
successful.update( res['Value']['Successful'] )
resDict = {'Successful':successful, 'Failed':failed}
return S_OK( resDict )
def removeCatalogPhysicalFileNames( self, replicaTuple ):
""" Remove replicas from the file catalog specified by replica tuple
'replicaTuple' is a tuple containing the replica to be removed and is of the form ( lfn, pfn, se )
"""
if type( replicaTuple ) == ListType:
replicaTuples = replicaTuple
elif type( replicaTuple ) == TupleType:
replicaTuples = [replicaTuple]
else:
errStr = "removeCatalogPhysicalFileNames: Supplied info must be tuple or list of tuples."
self.log.debug( errStr )
return S_ERROR( errStr )
return self.__removeCatalogReplica( replicaTuples )
def __removeCatalogReplica( self, replicaTuple ):
""" remove replica form catalogue
:param replicaTuple : list of (lfn, catalogPFN, se)
"""
oDataOperation = self.__initialiseAccountingObject( 'removeCatalogReplica', '', len( replicaTuple ) )
oDataOperation.setStartTime()
start = time.time()
# HACK!
replicaDict = {}
for lfn, pfn, se in replicaTuple:
replicaDict[lfn] = {'SE':se, 'PFN':pfn}
res = self.fc.removeReplica( replicaDict )
oDataOperation.setEndTime()
oDataOperation.setValueByKey( 'RegistrationTime', time.time() - start )
if not res['OK']:
oDataOperation.setValueByKey( 'RegistrationOK', 0 )
oDataOperation.setValueByKey( 'FinalStatus', 'Failed' )
gDataStoreClient.addRegister( oDataOperation )
errStr = "__removeCatalogReplica: Completely failed to remove replica."
self.log.debug( errStr, res['Message'] )
return S_ERROR( errStr )
for lfn in res['Value']['Successful']:
infoStr = "__removeCatalogReplica: Successfully removed replica."
self.log.debug( infoStr, lfn )
if res['Value']['Successful']:
self.log.debug( "__removeCatalogReplica: Removed %d replicas" % len( res['Value']['Successful'] ) )
success = res['Value']['Successful']
if success:
self.log.info( "__removeCatalogReplica: Removed %d replicas" % len( success ) )
for lfn in success:
self.log.debug( "__removeCatalogReplica: Successfully removed replica.", lfn )
for lfn, error in res['Value']['Failed'].items():
self.log.error( "__removeCatalogReplica: Failed to remove replica.", "%s %s" % ( lfn, error ) )
oDataOperation.setValueByKey( 'RegistrationOK', len( success ) )
gDataStoreClient.addRegister( oDataOperation )
return res
def removePhysicalReplicaLegacy( self, storageElementName, lfn ):
""" Remove replica from Storage Element.
'lfn' are the files to be removed
'storageElementName' is the storage where the file is to be removed
"""
if type( lfn ) == ListType:
lfns = lfn
elif type( lfn ) == StringType:
lfns = [lfn]
else:
errStr = "removePhysicalReplica: Supplied lfns must be string or list of strings."
self.log.debug( errStr )
return S_ERROR( errStr )
successful = {}
failed = {}
# Check that we have write permissions to this directory.
res = self.__verifyWritePermission( lfns )
if res['Value']['Failed']:
errStr = "removePhysicalReplica: Write access not permitted for this credential."
self.log.debug( errStr, 'for %d files' % len( res['Value']['Failed'] ) )
failed.update( dict.fromkeys( res['Value']['Failed'], errStr ) )
lfns = [lfn for lfn in lfns if lfn not in res['Value']['Failed']]
self.log.debug( "removePhysicalReplica: Attempting to remove %s lfns at %s." % ( len( lfns ),
storageElementName ) )
self.log.debug( "removePhysicalReplica: Attempting to resolve replicas." )
res = self.getReplicas( lfns )
if not res['OK']:
errStr = "removePhysicalReplica: Completely failed to get replicas for lfns."
self.log.debug( errStr, res['Message'] )
return res
failed.update( res['Value']['Failed'] )
replicaDict = res['Value']['Successful']
successful = {}
lfnsToRemove = []
for lfn, repDict in replicaDict.items():
if storageElementName not in repDict:
# The file doesn't exist at the storage element so don't have to remove it
successful[lfn] = True
else:
lfnsToRemove.append( lfn )
self.log.debug( "removePhysicalReplica: Resolved %s pfns for removal at %s." % ( len( lfnsToRemove ),
storageElementName ) )
res = self.__removePhysicalReplica( storageElementName, lfnsToRemove, replicaDict = replicaDict )
for lfn, error in res['Value']['Failed'].items():
failed[lfn] = error
for lfn in res['Value']['Successful']:
successful[lfn] = True
resDict = { 'Successful' : successful, 'Failed' : failed }
return S_OK( resDict )
def __removePhysicalReplica( self, storageElementName, lfnsToRemove, replicaDict = None ):
""" remove replica from storage element
:param storageElementName : name of the storage Element
:param lfnsToRemove : list of lfn to removes
:param replicaDict : cache of fc.getReplicas, to be passed to the SE
"""
self.log.debug( "__removePhysicalReplica: Attempting to remove %s pfns at %s." % ( len( lfnsToRemove ),
storageElementName ) )
storageElement = StorageElement( storageElementName, vo = self.vo )
res = storageElement.isValid()
if not res['OK']:
errStr = "__removePhysicalReplica: The storage element is not currently valid."
self.log.debug( errStr, "%s %s" % ( storageElementName, res['Message'] ) )
return S_ERROR( errStr )
oDataOperation = self.__initialiseAccountingObject( 'removePhysicalReplica',
storageElementName,
len( lfnsToRemove ) )
oDataOperation.setStartTime()
start = time.time()
ret = storageElement.getFileSize( lfnsToRemove, replicaDict = replicaDict )
deletedSizes = ret.get( 'Value', {} ).get( 'Successful', {} )
res = storageElement.removeFile( lfnsToRemove, replicaDict = replicaDict )
oDataOperation.setEndTime()
oDataOperation.setValueByKey( 'TransferTime', time.time() - start )
if not res['OK']:
oDataOperation.setValueByKey( 'TransferOK', 0 )
oDataOperation.setValueByKey( 'FinalStatus', 'Failed' )
gDataStoreClient.addRegister( oDataOperation )
errStr = "__removePhysicalReplica: Failed to remove replicas."
self.log.debug( errStr, res['Message'] )
return S_ERROR( errStr )
else:
for lfn, value in res['Value']['Failed'].items():
if 'No such file or directory' in value:
res['Value']['Successful'][lfn] = lfn
res['Value']['Failed'].pop( lfn )
for lfn in res['Value']['Successful']:
res['Value']['Successful'][lfn] = True
deletedSize = sum( [size for lfn, size in deletedSizes.items() if lfn in res['Value']['Successful']] )
oDataOperation.setValueByKey( 'TransferSize', deletedSize )
oDataOperation.setValueByKey( 'TransferOK', len( res['Value']['Successful'] ) )
gDataStoreClient.addRegister( oDataOperation )
infoStr = "__removePhysicalReplica: Successfully issued accounting removal request."
self.log.debug( infoStr )
return res
#########################################################################
#
# File transfer methods
#
def put( self, lfn, fileName, diracSE, path = None ):
""" Put a local file to a Storage Element
:param self: self reference
:param str lfn: LFN
:param str fileName: the full path to the local file
:param str diracSE: the Storage Element to which to put the file
:param str path: the path on the storage where the file will be put (if not provided the LFN will be used)
"""
# Check that the local file exists
if not os.path.exists( fileName ):
errStr = "put: Supplied file does not exist."
self.log.debug( errStr, fileName )
return S_ERROR( errStr )
# If the path is not provided then use the LFN path
if not path:
path = os.path.dirname( lfn )
# Obtain the size of the local file
size = getSize( fileName )
if size == 0:
errStr = "put: Supplied file is zero size."
self.log.debug( errStr, fileName )
return S_ERROR( errStr )
##########################################################
# Instantiate the destination storage element here.
storageElement = StorageElement( diracSE, vo = self.vo )
res = storageElement.isValid()
if not res['OK']:
errStr = "put: The storage element is not currently valid."
self.log.debug( errStr, "%s %s" % ( diracSE, res['Message'] ) )
return S_ERROR( errStr )
fileDict = {lfn:fileName}
successful = {}
failed = {}
##########################################################
# Perform the put here.
startTime = time.time()
res = returnSingleResult( storageElement.putFile( fileDict ) )
putTime = time.time() - startTime
if not res['OK']:
errStr = "put: Failed to put file to Storage Element."
failed[lfn] = res['Message']
self.log.debug( errStr, "%s: %s" % ( fileName, res['Message'] ) )
else:
self.log.debug( "put: Put file to storage in %s seconds." % putTime )
successful[lfn] = res['Value']
resDict = {'Successful': successful, 'Failed':failed}
return S_OK( resDict )
# def removeReplica(self,lfn,storageElementName,singleFile=False):
# def putReplica(self,lfn,storageElementName,singleFile=False):
# def replicateReplica(self,lfn,size,storageElementName,singleFile=False):
def getActiveReplicas( self, lfns ):
""" Get all the replicas for the SEs which are in Active status for reading.
"""
res = self.getReplicas( lfns, allStatus = False )
if not res['OK']:
return res
replicas = res['Value']
return self.checkActiveReplicas( replicas )
def checkActiveReplicas( self, replicaDict ):
""" Check a replica dictionary for active replicas
"""
if type( replicaDict ) != DictType:
return S_ERROR( 'Wrong argument type %s, expected a dictionary' % type( replicaDict ) )
for key in [ 'Successful', 'Failed' ]:
if not key in replicaDict:
return S_ERROR( 'Missing key "%s" in replica dictionary' % key )
if type( replicaDict[key] ) != DictType:
return S_ERROR( 'Wrong argument type %s, expected a dictionary' % type( replicaDict[key] ) )
seReadStatus = {}
for lfn, replicas in replicaDict['Successful'].items():
if type( replicas ) != DictType:
del replicaDict['Successful'][ lfn ]
replicaDict['Failed'][lfn] = 'Wrong replica info'
continue
for se in replicas.keys():
# Fix the caching
readStatus = seReadStatus[se] if se in seReadStatus else seReadStatus.setdefault( se, self.__SEActive( se ).get( 'Value', {} ).get( 'Read', False ) )
if not readStatus:
replicas.pop( se )
return S_OK( replicaDict )
def __SEActive( self, se ):
""" check is SE is active """
result = StorageFactory().getStorageName( se )
if not result['OK']:
return S_ERROR( 'SE not known' )
resolvedName = result['Value']
res = self.resourceStatus.getStorageElementStatus( resolvedName, default = None )
if not res[ 'OK' ]:
return S_ERROR( 'SE not known' )
seStatus = { 'Read' : True, 'Write' : True }
if res['Value'][resolvedName].get( 'ReadAccess', 'Active' ) not in ( 'Active', 'Degraded' ):
seStatus[ 'Read' ] = False
if res['Value'][resolvedName].get( 'WriteAccess', 'Active' ) not in ( 'Active', 'Degraded' ):
seStatus[ 'Write' ] = False
return S_OK( seStatus )
def __initialiseAccountingObject( self, operation, se, files ):
""" create accouting record """
accountingDict = {}
accountingDict['OperationType'] = operation
result = getProxyInfo()
if not result['OK']:
userName = 'system'
else:
userName = result['Value'].get( 'username', 'unknown' )
accountingDict['User'] = userName
accountingDict['Protocol'] = 'DataManager'
accountingDict['RegistrationTime'] = 0.0
accountingDict['RegistrationOK'] = 0
accountingDict['RegistrationTotal'] = 0
accountingDict['Destination'] = se
accountingDict['TransferTotal'] = files
accountingDict['TransferOK'] = files
accountingDict['TransferSize'] = files
accountingDict['TransferTime'] = 0.0
accountingDict['FinalStatus'] = 'Successful'
accountingDict['Source'] = DIRAC.siteName()
oDataOperation = DataOperation()
oDataOperation.setValuesFromDict( accountingDict )
return oDataOperation
##########################################
#
# Defunct methods only there before checking backward compatability
#
def getReplicas( self, lfns, allStatus = True ):
""" get replicas from catalogue """
res = self.fc.getReplicas( lfns, allStatus = allStatus )
if not self.useCatalogPFN:
if res['OK']:
se_lfn = {}
catalogReplicas = res['Value']['Successful']
# We group the query to getURL by storage element to gain in speed
for lfn in catalogReplicas:
for se in catalogReplicas[lfn]:
se_lfn.setdefault( se, [] ).append( lfn )
for se in se_lfn:
seObj = StorageElement( se, vo = self.vo )
succPfn = seObj.getURL( se_lfn[se], protocol = self.registrationProtocol ).get( 'Value', {} ).get( 'Successful', {} )
for lfn in succPfn:
# catalogReplicas still points res["value"]["Successful"] so res will be updated
catalogReplicas[lfn][se] = succPfn[lfn]
return res
##################################################################################################3
# Methods from the catalogToStorage. It would all work with the direct call to the SE, but this checks
# first if the replica is known to the catalog
def __executeIfReplicaExists( self, storageElementName, lfn, method, **kwargs ):
""" a simple wrapper that allows replica querying then perform the StorageElement operation
:param self: self reference
:param str storageElementName: DIRAC SE name
:param mixed lfn: a LFN str, list of LFNs or dict with LFNs as keys
"""
# # default value
kwargs = kwargs if kwargs else {}
# # get replicas for lfn
res = FileCatalog( vo = self.vo ).getReplicas( lfn )
if not res["OK"]:
errStr = "__executeIfReplicaExists: Completely failed to get replicas for LFNs."
self.log.debug( errStr, res["Message"] )
return res
# # returned dict, get failed replicase
retDict = { "Failed": res["Value"]["Failed"],
"Successful" : {} }
# # print errors
for lfn, reason in retDict["Failed"].items():
self.log.error( "_callReplicaSEFcn: Failed to get replicas for file.", "%s %s" % ( lfn, reason ) )
# # good replicas
lfnReplicas = res["Value"]["Successful"]
# # store PFN to LFN mapping
lfnList = []
se = None # Placeholder for the StorageElement object
for lfn, replicas in lfnReplicas.items():
if storageElementName in replicas:
lfnList.append( lfn )
else:
errStr = "__executeIfReplicaExists: File hasn't got replica at supplied Storage Element."
self.log.error( errStr, "%s %s" % ( lfn, storageElementName ) )
retDict["Failed"][lfn] = errStr
if 'replicaDict' not in kwargs:
kwargs['replicaDict'] = lfnReplicas
# # call StorageElement function at least
se = se = se if se else StorageElement( storageElementName, vo = self.vo )
fcn = getattr( se, method )
res = fcn( lfnList, **kwargs )
# # check result
if not res["OK"]:
errStr = "__executeIfReplicaExists: Failed to execute %s StorageElement method." % method
self.log.error( errStr, res["Message"] )
return res
# # filter out failed and successful
for lfn, lfnRes in res["Value"]["Successful"].items():
retDict["Successful"][lfn] = lfnRes
for lfn, errorMessage in res["Value"]["Failed"].items():
retDict["Failed"][lfn] = errorMessage
return S_OK( retDict )
def getReplicaIsFile( self, lfn, storageElementName ):
""" determine whether the supplied lfns are files at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleFile: execute for the first LFN only
"""
return self.__executeIfReplicaExists( storageElementName, lfn, "isFile" )
def getReplicaSize( self, lfn, storageElementName ):
""" get the size of files for the lfns at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleFile: execute for the first LFN only
"""
return self.__executeIfReplicaExists( storageElementName, lfn, "getFileSize" )
def getReplicaAccessUrl( self, lfn, storageElementName, protocol = False ):
""" get the access url for lfns at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleFile: execute for the first LFN only
"""
return self.__executeIfReplicaExists( storageElementName, lfn, "getURL", protocol = protocol )
def getReplicaMetadata( self, lfn, storageElementName ):
""" get the file metadata for lfns at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleFile: execute for the first LFN only
"""
return self.__executeIfReplicaExists( storageElementName, lfn, "getFileMetadata" )
def prestageReplica( self, lfn, storageElementName, lifetime = 86400 ):
""" issue a prestage requests for the lfns at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param int lifetime: 24h in seconds
:param bool singleFile: execute for the first LFN only
"""
return self.__executeIfReplicaExists( storageElementName, lfn,
"prestageFile", lifetime = lifetime )
def pinReplica( self, lfn, storageElementName, lifetime = 86400 ):
""" pin the lfns at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param int lifetime: 24h in seconds
:param bool singleFile: execute for the first LFN only
"""
return self.__executeIfReplicaExists( storageElementName, lfn,
"pinFile", lifetime = lifetime )
def releaseReplica( self, lfn, storageElementName ):
""" release pins for the lfns at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleFile: execute for the first LFN only
"""
return self.__executeIfReplicaExists( storageElementName, lfn, "releaseFile" )
def getReplica( self, lfn, storageElementName, localPath = False ):
""" copy replicas from DIRAC SE to local directory
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param mixed localPath: path in the local file system, if False, os.getcwd() will be used
:param bool singleFile: execute for the first LFN only
"""
return self.__executeIfReplicaExists( storageElementName, lfn,
"getFile", localPath = localPath )
| fibbo/DIRAC | DataManagementSystem/Client/DataManager.py | Python | gpl-3.0 | 70,724 | [
"DIRAC"
] | 167387332b2e7037ed782436e64874777b7d80fba8cfc3503ece1a25ee4978c6 |
"""interp_time_series.py is a basic interpolation and extraction script which interpolates wind speed and direction to locations and heights.
Only nearest neighbour interpolation is used, i.e. the value from the centre of the grid cell containing the location
Usage:
interp_time_series.py <file>... --loc=<file> --height=<h1,h2,...,hn>... --atts=<attributes> [--dims=<dimspec>]... [--out=<dir>] [-h | --help]
Options:
--loc=<file> locations file containing latitude and longitude
--height=<h1,h2,...,hn> comma seperated list of heights to interpolate to
--atts=<attributes> list of attributes to write to time-series, in form key1:value1,key2:value2. If value is left
blank, will attempt to use value of global attribute from netcdf file
--dims=<dimspec>
--out=<dir> output directory to write time-series, d
-h |--help show this message
Notes:
This requires the extract_time_series.ncl script. Currently the variables extracted,the heights,
and the file-naming conventions are hard-coded in that script.
Examples:
python extract_time_series wrfout_d01_2010-01-* --out=./tseries --netcdf"""
import sys
import time
import datetime
import numpy as np
import wrftools
import tools
import docopt
from customexceptions import DomainError
from netCDF4 import Dataset
from collections import OrderedDict
# This should correspond to the header of the locations file
DTYPE = [('location_id', 'S6'), ('name', 'S10'), ('country', 'S10'), ('hubheight', '<f8'),('latitude', '<f8'),('longitude', '<f8')]
def main():
args = docopt.docopt(__doc__, sys.argv[1:])
# Read locations file
locfile = args['--loc']
locations = read_loc_file(locfile, DTYPE)
# Default to current directory if no output directory specified
if args['--out']==None:
out_dir = '.'
else:
out_dir = args['--out']
# Get wrfout files to operate on
nc_files = args['<file>']
# Create height arrays to interpolate to
hgts = args['--height'][0].split(',')
interp_height = np.array(map(float,hgts))
# Fetch dimension slicing specs
dims = args['--dims']
ts = 0 # time start
te = None # time end, None uses all times
for dim in dims:
d,s,e = dim.split(',')
if d=='time':
ts = int(s)
te = int(e)
# For each file
for f in nc_files:
dataset = Dataset(f, 'r')
# Get attributes to write to time-series
atts = OrderedDict()
for a in args['--atts'].split(','):
keyval = a.split(':')
if len(keyval)==1:
key = keyval[0]
atts[key.lower()] = dataset.getncattr(key)
else:
atts[keyval[0]] = keyval[1]
# Get some dimensions
dims = dataset.dimensions
west_east = dims['west_east']
west_east_stag = dims['west_east_stag']
south_north = dims['south_north']
south_north_stag = dims['south_north_stag']
bottom_top = dims['bottom_top']
bottom_top_stag = dims['bottom_top_stag']
# WRF times are a chararcter array, convert them into datetimes
times = dataset.variables['Times']
tstrings = [''.join(t) for t in times]
dtimes = [datetime.datetime(*time.strptime(t,'%Y-%m-%d_%H:%M:%S')[0:5]) for t in tstrings]
init_time = dtimes[0]
# allow subsetting of time
use_times = dtimes[ts:te]
ntimes = len(use_times)
lat2d = dataset.variables['XLAT'][0,:,:]
lon2d = dataset.variables['XLONG'][0,:,:]
ph = dataset.variables['PH'][:]
phb = dataset.variables['PHB'][:]
hgt = dataset.variables['HGT'][:]
u = dataset.variables['U'][:]
v = dataset.variables['V'][:]
#
# Destagger wind speeds to mass points
#
um = 0.5*(u[:,:,:,0:len(west_east)] + u[:,:,:,1:len(west_east_stag)])
vm = 0.5*(v[:,:,0:len(south_north),:] + v[:,:,1:len(south_north_stag),:])
#
# calculate height of staggered, then unstaggered levels
#
stag_heights = (ph + phb) / 9.81
mass_heights = 0.5*(stag_heights[:,0:len(bottom_top),:,:] + stag_heights[:,1:len(bottom_top_stag),:,:])
#
# Work out height above ground level by subtracting terrain height
#
agl_heights = mass_heights - hgt[:,np.newaxis,:,:]
#
# Now loop through each location in the file and extract time-series for that point
#
for l in locations :
loc_id = l['location_id']
lat = l['latitude']
lon = l['longitude']
# find nearest grid point
(x,y, dist) = get_index(lat, lon, lat2d, lon2d)
# get heights at this point
level_heights = agl_heights[:,:,y,x]
# loop over heights.
# there may be a more pythonic way to do this without looping
for i_hgt in interp_height:
speed_fname = '%s/%s_SPEED_d%02d_%03d_%s.txt' %(out_dir, loc_id, atts['grid_id'], int(i_hgt), init_time.strftime('%Y-%m-%d_%H%M'))
dir_fname = '%s/%s_DIRECTION_d%02d_%03d_%s.txt' %(out_dir, loc_id,atts['grid_id'], int(i_hgt), init_time.strftime('%Y-%m-%d_%H%M'))
speed_file = open(speed_fname, 'w')
dir_file = open(dir_fname, 'w')
# find capping level
levels_above = np.argmax(level_heights>=i_hgt, axis=1)
levels_below = levels_above - 1
#
# If levels_above contains a zero, it means a height has been requested which
# is below the lowest model level, and interpolation will fail
#
if np.min(levels_above)==0:
raise InterpolationError('height requested is below lowest model level, interpolation will fail')
#
# Make the simplifying assumption that the capping level
# doesn't change during the course of a file, typically one day
# This is almost always justified, as the heights of model levels varies
# only very slightly (.1m), and it simplifies the code significnatly
#
level_above = levels_above[0]
level_below = levels_below[0]
hgt_above = level_heights[:,level_above]
hgt_below = level_heights[:,level_below]
hgt_diff = hgt_above - hgt_below
# What proportion of the level thickness are we below the upper capping level?
height_frac = (hgt_above - i_hgt) / hgt_diff
u_above = um[:,level_above,y,x]
u_below = um[:,level_below,y,x]
u_diff = u_above - u_below
v_above = vm[:,level_above,y,x]
v_below = vm[:,level_below,y,x]
v_diff = v_above - v_below
#
# Do the linear interpolation
#
interp_u = u_above - (height_frac * u_diff)
interp_v = v_above - (height_frac * v_diff)
speed = np.sqrt(interp_u**2+interp_v**2)
direction = tools.bearing(interp_u, interp_v)
att_header = ','.join(atts.keys())
att_values = ','.join(map(str,atts.values()))
header = att_header+',location_id,latitude,longitude,variable,init_time,valid_time,height,value'
speed_file.write(header)
dir_file.write(header)
speed_file.write('\n')
dir_file.write('\n')
for n in range(ntimes):
speed_file.write('%s,%s,%0.6f,%0.6f,%s,%s,%s,%0.3f,%0.3f\n' % (att_values, loc_id, lat, lon, 'SPEED', init_time.strftime('%Y-%m-%d %H:%M'), dtimes[ts+n].strftime('%Y-%m-%d %H:%M'), i_hgt, speed[ts+n]))
dir_file.write('%s,%s,%0.6f,%0.6f,%s,%s,%s,%0.3f,%0.3f\n' % (att_values, loc_id, lat, lon, 'DIRECTION', init_time.strftime('%Y-%m-%d %H:%M'), dtimes[ts+n].strftime('%Y-%m-%d %H:%M'), i_hgt, direction[ts+n]))
speed_file.close()
dir_file.close()
dataset.close()
def read_loc_file(fname, dtype):
"""Reads a locations file and returns name, location ids, latitudes and longitudes"""
rec = np.genfromtxt(fname, dtype=dtype, delimiter=',')[1:]
return rec
def in_domain(lat, lon, lat2d, lon2d):
"""Tests whether (lat,lon) is within domain defined by lat2d and lon2d.
Returns boolean, true if the point is within the domain """
min_lon = np.min(lon2d)
max_lon = np.max(lon2d)
min_lat = np.min(lat2d)
max_lat = np.max(lat2d)
if (lat < min_lat) or (lat> max_lat) or (lon<min_lon) or (lon>max_lon):
logger.debug("point (%0.3f, %0.3f) is not within domain (%0.2f, %0.3f, %0.3f, %0.3f)" %(lat, lon, min_lat, max_lat, min_lon, max_lon))
return False
else:
return True
def get_index(lat, lon, lat2d, lon2d):
""" Finds the nearest mass point grid index to the point (lon, lat).
Works but is slow as just naively searches through the arrays point
by point.
Arguments:
@lat: the latitude of the target point
@lon: longitude of the target point
@lat2d: 2d array of latitudes of grid points
@lon2d: 2d array of longitudes of grid points
Returns (i,j) of nearest grid cell. Raises exception if outside"""
logger = wrftools.get_logger()
logger.debug("finding index of (%0.3f, %0.3f) " %(lat,lon))
west_east = lat2d.shape[0]
south_north = lat2d.shape[1]
logger.debug("dimensions of domain are: %d south_north, %d west_east" %(south_north, west_east))
if not in_domain(lat, lon, lat2d, lon2d):
raise DomainError('point (%0.3f, %0.3f) not in model domain' %(lat, lon))
#
# slow, but will work. Just search through the arrays until we
# hit the nearest grid point
#
min_dist = 10000000 # if the point is further than 10M m away, don't bother!
min_x = 0
min_y = 0
for x in range(west_east-1):
for y in range(south_north-1):
point_lat = lat2d[x,y]
point_lon = lon2d[x,y]
d = tools.haversine(lat, lon, point_lat, point_lon)
if d < min_dist:
min_dist = d
min_x = x
min_y = y
if min_x==0 or min_y==0 or min_x>west_east or min_y>south_north:
logger.error("Point is on/off edge of of model domain, this should have been caught earlier!")
raise DomainError("Point is on/off edge of of model domain")
logger.debug('nearest grid index is x=%d, y=%d, %0.3f m away' %(min_x, min_y, min_dist))
logger.debug('latitude, longitude of original is (%0.3f, %0.3f)' %(lat, lon))
logger.debug('latitude, longitude of index is (%0.3f, %0.3f)' %(lat2d[min_x,min_y], lon2d[min_x,min_y]))
return (min_x, min_y, min_dist)
if __name__ == '__main__':
main()
| envhyf/wrftools | wrftools/interp_time_series.py | Python | gpl-3.0 | 11,732 | [
"NetCDF"
] | c7ed8de30b84fbe261d406b968ed57111fcad77813b6d1e53746e8a25a87d2fd |
# CREATED:2015-09-16 14:46:47 by Brian McFee <brian.mcfee@nyu.edu>
# -*- encoding: utf-8 -*-
'''Evaluation criteria for hierarchical structure analysis.
Hierarchical structure analysis seeks to annotate a track with a nested
decomposition of the temporal elements of the piece, effectively providing
a kind of "parse tree" of the composition. Unlike the flat segmentation
metrics defined in :mod:`mir_eval.segment`, which can only encode one level of
analysis, hierarchical annotations expose the relationships between short
segments and the larger compositional elements to which they belong.
Conventions
-----------
Annotations are assumed to take the form of an ordered list of segmentations.
As in the :mod:`mir_eval.segment` metrics, each segmentation itself consists of
an n-by-2 array of interval times, so that the ``i`` th segment spans time
``intervals[i, 0]`` to ``intervals[i, 1]``.
Hierarchical annotations are ordered by increasing specificity, so that the
first segmentation should contain the fewest segments, and the last
segmentation contains the most.
Metrics
-------
* :func:`mir_eval.hierarchy.tmeasure`: Precision, recall, and F-measure of
triplet-based frame accuracy for boundary detection.
* :func:`mir_eval.hierarchy.lmeasure`: Precision, recall, and F-measure of
triplet-based frame accuracy for segment labeling.
References
----------
.. [#mcfee2015] Brian McFee, Oriol Nieto, and Juan P. Bello.
"Hierarchical evaluation of segment boundary detection",
International Society for Music Information Retrieval (ISMIR) conference,
2015.
.. [#mcfee2017] Brian McFee, Oriol Nieto, Morwaread Farbood, and
Juan P. Bello.
"Evaluating hierarchical structure in music annotations",
Frontiers in Psychology, 2017.
'''
import collections
import itertools
import warnings
import numpy as np
import scipy.sparse
from . import util
from .segment import validate_structure
def _round(t, frame_size):
'''Round a time-stamp to a specified resolution.
Equivalent to ``t - np.mod(t, frame_size)``.
Examples
--------
>>> _round(53.279, 0.1)
53.2
>>> _round(53.279, 0.25)
53.25
Parameters
----------
t : number or ndarray
The time-stamp to round
frame_size : number > 0
The resolution to round to
Returns
-------
t_round : number
The rounded time-stamp
'''
return t - np.mod(t, float(frame_size))
def _hierarchy_bounds(intervals_hier):
'''Compute the covered time range of a hierarchical segmentation.
Parameters
----------
intervals_hier : list of ndarray
A hierarchical segmentation, encoded as a list of arrays of segment
intervals.
Returns
-------
t_min : float
t_max : float
The minimum and maximum times spanned by the annotation
'''
boundaries = list(itertools.chain(*list(itertools.chain(*intervals_hier))))
return min(boundaries), max(boundaries)
def _align_intervals(int_hier, lab_hier, t_min=0.0, t_max=None):
'''Align a hierarchical annotation to span a fixed start and end time.
Parameters
----------
int_hier : list of list of intervals
lab_hier : list of list of str
Hierarchical segment annotations, encoded as a
list of list of intervals (int_hier) and list of
list of strings (lab_hier)
t_min : None or number >= 0
The minimum time value for the segmentation
t_max : None or number >= t_min
The maximum time value for the segmentation
Returns
-------
intervals_hier : list of list of intervals
labels_hier : list of list of str
`int_hier` `lab_hier` aligned to span `[t_min, t_max]`.
'''
return [list(_) for _ in zip(*[util.adjust_intervals(np.asarray(ival),
labels=lab,
t_min=t_min,
t_max=t_max)
for ival, lab in zip(int_hier, lab_hier)])]
def _lca(intervals_hier, frame_size):
'''Compute the (sparse) least-common-ancestor (LCA) matrix for a
hierarchical segmentation.
For any pair of frames ``(s, t)``, the LCA is the deepest level in
the hierarchy such that ``(s, t)`` are contained within a single
segment at that level.
Parameters
----------
intervals_hier : list of ndarray
An ordered list of segment interval arrays.
The list is assumed to be ordered by increasing specificity (depth).
frame_size : number
The length of the sample frames (in seconds)
Returns
-------
lca_matrix : scipy.sparse.csr_matrix
A sparse matrix such that ``lca_matrix[i, j]`` contains the depth
of the deepest segment containing frames ``i`` and ``j``.
'''
frame_size = float(frame_size)
# Figure out how many frames we need
n_start, n_end = _hierarchy_bounds(intervals_hier)
n = int((_round(n_end, frame_size) -
_round(n_start, frame_size)) / frame_size)
# Initialize the LCA matrix
lca_matrix = scipy.sparse.lil_matrix((n, n), dtype=np.uint8)
for level, intervals in enumerate(intervals_hier, 1):
for ival in (_round(np.asarray(intervals),
frame_size) / frame_size).astype(int):
idx = slice(ival[0], ival[1])
lca_matrix[idx, idx] = level
return lca_matrix.tocsr()
def _meet(intervals_hier, labels_hier, frame_size):
'''Compute the (sparse) least-common-ancestor (LCA) matrix for a
hierarchical segmentation.
For any pair of frames ``(s, t)``, the LCA is the deepest level in
the hierarchy such that ``(s, t)`` are contained within a single
segment at that level.
Parameters
----------
intervals_hier : list of ndarray
An ordered list of segment interval arrays.
The list is assumed to be ordered by increasing specificity (depth).
labels_hier : list of list of str
``labels_hier[i]`` contains the segment labels for the
``i``th layer of the annotations
frame_size : number
The length of the sample frames (in seconds)
Returns
-------
meet_matrix : scipy.sparse.csr_matrix
A sparse matrix such that ``meet_matrix[i, j]`` contains the depth
of the deepest segment label containing both ``i`` and ``j``.
'''
frame_size = float(frame_size)
# Figure out how many frames we need
n_start, n_end = _hierarchy_bounds(intervals_hier)
n = int((_round(n_end, frame_size) -
_round(n_start, frame_size)) / frame_size)
# Initialize the meet matrix
meet_matrix = scipy.sparse.lil_matrix((n, n), dtype=np.uint8)
for level, (intervals, labels) in enumerate(zip(intervals_hier,
labels_hier), 1):
# Encode the labels at this level
lab_enc = util.index_labels(labels)[0]
# Find unique agreements
int_agree = np.triu(np.equal.outer(lab_enc, lab_enc))
# Map intervals to frame indices
int_frames = (_round(intervals, frame_size) / frame_size).astype(int)
# For each intervals i, j where labels agree, update the meet matrix
for (seg_i, seg_j) in zip(*np.where(int_agree)):
idx_i = slice(*list(int_frames[seg_i]))
idx_j = slice(*list(int_frames[seg_j]))
meet_matrix[idx_i, idx_j] = level
if seg_i != seg_j:
meet_matrix[idx_j, idx_i] = level
return scipy.sparse.csr_matrix(meet_matrix)
def _gauc(ref_lca, est_lca, transitive, window):
'''Generalized area under the curve (GAUC)
This function computes the normalized recall score for correctly
ordering triples ``(q, i, j)`` where frames ``(q, i)`` are closer than
``(q, j)`` in the reference annotation.
Parameters
----------
ref_lca : scipy.sparse
est_lca : scipy.sparse
The least common ancestor matrices for the reference and
estimated annotations
transitive : bool
If True, then transitive comparisons are counted, meaning that
``(q, i)`` and ``(q, j)`` can differ by any number of levels.
If False, then ``(q, i)`` and ``(q, j)`` can differ by exactly one
level.
window : number or None
The maximum number of frames to consider for each query.
If `None`, then all frames are considered.
Returns
-------
score : number [0, 1]
The percentage of reference triples correctly ordered by
the estimation.
Raises
------
ValueError
If ``ref_lca`` and ``est_lca`` have different shapes
'''
# Make sure we have the right number of frames
if ref_lca.shape != est_lca.shape:
raise ValueError('Estimated and reference hierarchies '
'must have the same shape.')
# How many frames?
n = ref_lca.shape[0]
# By default, the window covers the entire track
if window is None:
window = n
# Initialize the score
score = 0.0
# Iterate over query frames
num_frames = 0
for query in range(n):
# Find all pairs i,j such that ref_lca[q, i] > ref_lca[q, j]
results = slice(max(0, query - window), min(n, query + window))
ref_score = ref_lca[query, results]
est_score = est_lca[query, results]
# Densify the results
ref_score = np.asarray(ref_score.todense()).squeeze()
est_score = np.asarray(est_score.todense()).squeeze()
# Don't count the query as a result
# when query < window, query itself is the index within the slice
# otherwise, query is located at the center of the slice, window
# (this also holds when the slice goes off the end of the array.)
idx = min(query, window)
ref_score = np.concatenate((ref_score[:idx], ref_score[idx+1:]))
est_score = np.concatenate((est_score[:idx], est_score[idx+1:]))
inversions, normalizer = _compare_frame_rankings(ref_score, est_score,
transitive=transitive)
if normalizer:
score += 1.0 - inversions / float(normalizer)
num_frames += 1
# Normalize by the number of frames counted.
# If no frames are counted, take the convention 0/0 -> 0
if num_frames:
score /= float(num_frames)
else:
score = 0.0
return score
def _count_inversions(a, b):
'''Count the number of inversions in two numpy arrays:
# points i, j where a[i] >= b[j]
Parameters
----------
a, b : np.ndarray, shape=(n,) (m,)
The arrays to be compared.
This implementation is optimized for arrays with many
repeated values.
Returns
-------
inversions : int
The number of detected inversions
'''
a, a_counts = np.unique(a, return_counts=True)
b, b_counts = np.unique(b, return_counts=True)
inversions = 0
i = 0
j = 0
while i < len(a) and j < len(b):
if a[i] < b[j]:
i += 1
elif a[i] >= b[j]:
inversions += np.sum(a_counts[i:]) * b_counts[j]
j += 1
return inversions
def _compare_frame_rankings(ref, est, transitive=False):
'''Compute the number of ranking disagreements in two lists.
Parameters
----------
ref : np.ndarray, shape=(n,)
est : np.ndarray, shape=(n,)
Reference and estimate ranked lists.
`ref[i]` is the relevance score for point `i`.
transitive : bool
If true, all pairs of reference levels are compared.
If false, only adjacent pairs of reference levels are compared.
Returns
-------
inversions : int
The number of pairs of indices `i, j` where
`ref[i] < ref[j]` but `est[i] >= est[j]`.
normalizer : float
The total number of pairs (i, j) under consideration.
If transitive=True, then this is |{(i,j) : ref[i] < ref[j]}|
If transitive=False, then this is |{i,j) : ref[i] +1 = ref[j]}|
'''
idx = np.argsort(ref)
ref_sorted = ref[idx]
est_sorted = est[idx]
# Find the break-points in ref_sorted
levels, positions, counts = np.unique(ref_sorted,
return_index=True,
return_counts=True)
positions = list(positions)
positions.append(len(ref_sorted))
index = collections.defaultdict(lambda: slice(0))
ref_map = collections.defaultdict(lambda: 0)
for level, cnt, start, end in zip(levels, counts,
positions[:-1], positions[1:]):
index[level] = slice(start, end)
ref_map[level] = cnt
# Now that we have values sorted, apply the inversion-counter to
# pairs of reference values
if transitive:
level_pairs = itertools.combinations(levels, 2)
else:
level_pairs = [(i, i+1) for i in levels]
level_pairs, lcounter = itertools.tee(level_pairs)
normalizer = float(sum([ref_map[i] * ref_map[j] for (i, j) in lcounter]))
if normalizer == 0:
return 0, 0.0
inversions = 0
for level_1, level_2 in level_pairs:
inversions += _count_inversions(est_sorted[index[level_1]],
est_sorted[index[level_2]])
return inversions, float(normalizer)
def validate_hier_intervals(intervals_hier):
'''Validate a hierarchical segment annotation.
Parameters
----------
intervals_hier : ordered list of segmentations
Raises
------
ValueError
If any segmentation does not span the full duration of the top-level
segmentation.
If any segmentation does not start at 0.
'''
# Synthesize a label array for the top layer.
label_top = util.generate_labels(intervals_hier[0])
boundaries = set(util.intervals_to_boundaries(intervals_hier[0]))
for level, intervals in enumerate(intervals_hier[1:], 1):
# Make sure this level is consistent with the root
label_current = util.generate_labels(intervals)
validate_structure(intervals_hier[0], label_top,
intervals, label_current)
# Make sure all previous boundaries are accounted for
new_bounds = set(util.intervals_to_boundaries(intervals))
if boundaries - new_bounds:
warnings.warn('Segment hierarchy is inconsistent '
'at level {:d}'.format(level))
boundaries |= new_bounds
def tmeasure(reference_intervals_hier, estimated_intervals_hier,
transitive=False, window=15.0, frame_size=0.1, beta=1.0):
'''Computes the tree measures for hierarchical segment annotations.
Parameters
----------
reference_intervals_hier : list of ndarray
``reference_intervals_hier[i]`` contains the segment intervals
(in seconds) for the ``i`` th layer of the annotations. Layers are
ordered from top to bottom, so that the last list of intervals should
be the most specific.
estimated_intervals_hier : list of ndarray
Like ``reference_intervals_hier`` but for the estimated annotation
transitive : bool
whether to compute the t-measures using transitivity or not.
window : float > 0
size of the window (in seconds). For each query frame q,
result frames are only counted within q +- window.
frame_size : float > 0
length (in seconds) of frames. The frame size cannot be longer than
the window.
beta : float > 0
beta parameter for the F-measure.
Returns
-------
t_precision : number [0, 1]
T-measure Precision
t_recall : number [0, 1]
T-measure Recall
t_measure : number [0, 1]
F-beta measure for ``(t_precision, t_recall)``
Raises
------
ValueError
If either of the input hierarchies are inconsistent
If the input hierarchies have different time durations
If ``frame_size > window`` or ``frame_size <= 0``
'''
# Compute the number of frames in the window
if frame_size <= 0:
raise ValueError('frame_size ({:.2f}) must be a positive '
'number.'.format(frame_size))
if window is None:
window_frames = None
else:
if frame_size > window:
raise ValueError('frame_size ({:.2f}) cannot exceed '
'window ({:.2f})'.format(frame_size, window))
window_frames = int(_round(window, frame_size) / frame_size)
# Validate the hierarchical segmentations
validate_hier_intervals(reference_intervals_hier)
validate_hier_intervals(estimated_intervals_hier)
# Build the least common ancestor matrices
ref_lca = _lca(reference_intervals_hier, frame_size)
est_lca = _lca(estimated_intervals_hier, frame_size)
# Compute precision and recall
t_recall = _gauc(ref_lca, est_lca, transitive, window_frames)
t_precision = _gauc(est_lca, ref_lca, transitive, window_frames)
t_measure = util.f_measure(t_precision, t_recall, beta=beta)
return t_precision, t_recall, t_measure
def lmeasure(reference_intervals_hier, reference_labels_hier,
estimated_intervals_hier, estimated_labels_hier,
frame_size=0.1, beta=1.0):
'''Computes the tree measures for hierarchical segment annotations.
Parameters
----------
reference_intervals_hier : list of ndarray
``reference_intervals_hier[i]`` contains the segment intervals
(in seconds) for the ``i`` th layer of the annotations. Layers are
ordered from top to bottom, so that the last list of intervals should
be the most specific.
reference_labels_hier : list of list of str
``reference_labels_hier[i]`` contains the segment labels for the
``i``th layer of the annotations
estimated_intervals_hier : list of ndarray
estimated_labels_hier : list of ndarray
Like ``reference_intervals_hier`` and ``reference_labels_hier``
but for the estimated annotation
frame_size : float > 0
length (in seconds) of frames. The frame size cannot be longer than
the window.
beta : float > 0
beta parameter for the F-measure.
Returns
-------
l_precision : number [0, 1]
L-measure Precision
l_recall : number [0, 1]
L-measure Recall
l_measure : number [0, 1]
F-beta measure for ``(l_precision, l_recall)``
Raises
------
ValueError
If either of the input hierarchies are inconsistent
If the input hierarchies have different time durations
If ``frame_size > window`` or ``frame_size <= 0``
'''
# Compute the number of frames in the window
if frame_size <= 0:
raise ValueError('frame_size ({:.2f}) must be a positive '
'number.'.format(frame_size))
# Validate the hierarchical segmentations
validate_hier_intervals(reference_intervals_hier)
validate_hier_intervals(estimated_intervals_hier)
# Build the least common ancestor matrices
ref_meet = _meet(reference_intervals_hier, reference_labels_hier,
frame_size)
est_meet = _meet(estimated_intervals_hier, estimated_labels_hier,
frame_size)
# Compute precision and recall
l_recall = _gauc(ref_meet, est_meet, True, None)
l_precision = _gauc(est_meet, ref_meet, True, None)
l_measure = util.f_measure(l_precision, l_recall, beta=beta)
return l_precision, l_recall, l_measure
def evaluate(ref_intervals_hier, ref_labels_hier,
est_intervals_hier, est_labels_hier, **kwargs):
'''Compute all hierarchical structure metrics for the given reference and
estimated annotations.
Examples
--------
A toy example with two two-layer annotations
>>> ref_i = [[[0, 30], [30, 60]], [[0, 15], [15, 30], [30, 45], [45, 60]]]
>>> est_i = [[[0, 45], [45, 60]], [[0, 15], [15, 30], [30, 45], [45, 60]]]
>>> ref_l = [ ['A', 'B'], ['a', 'b', 'a', 'c'] ]
>>> est_l = [ ['A', 'B'], ['a', 'a', 'b', 'b'] ]
>>> scores = mir_eval.hierarchy.evaluate(ref_i, ref_l, est_i, est_l)
>>> dict(scores)
{'T-Measure full': 0.94822745804853459,
'T-Measure reduced': 0.8732458222764804,
'T-Precision full': 0.96569179094693058,
'T-Precision reduced': 0.89939075137018787,
'T-Recall full': 0.93138358189386117,
'T-Recall reduced': 0.84857799953694923}
A more realistic example, using SALAMI pre-parsed annotations
>>> def load_salami(filename):
... "load SALAMI event format as labeled intervals"
... events, labels = mir_eval.io.load_labeled_events(filename)
... intervals = mir_eval.util.boundaries_to_intervals(events)[0]
... return intervals, labels[:len(intervals)]
>>> ref_files = ['data/10/parsed/textfile1_uppercase.txt',
... 'data/10/parsed/textfile1_lowercase.txt']
>>> est_files = ['data/10/parsed/textfile2_uppercase.txt',
... 'data/10/parsed/textfile2_lowercase.txt']
>>> ref = [load_salami(fname) for fname in ref_files]
>>> ref_int = [seg[0] for seg in ref]
>>> ref_lab = [seg[1] for seg in ref]
>>> est = [load_salami(fname) for fname in est_files]
>>> est_int = [seg[0] for seg in est]
>>> est_lab = [seg[1] for seg in est]
>>> scores = mir_eval.hierarchy.evaluate(ref_int, ref_lab,
... est_hier, est_lab)
>>> dict(scores)
{'T-Measure full': 0.66029225561405358,
'T-Measure reduced': 0.62001868041578034,
'T-Precision full': 0.66844764668949885,
'T-Precision reduced': 0.63252297209957919,
'T-Recall full': 0.6523334654992341,
'T-Recall reduced': 0.60799919710921635}
Parameters
----------
ref_intervals_hier : list of list-like
ref_labels_hier : list of list of str
est_intervals_hier : list of list-like
est_labels_hier : list of list of str
Hierarchical annotations are encoded as an ordered list
of segmentations. Each segmentation itself is a list (or list-like)
of intervals (\*_intervals_hier) and a list of lists of labels
(\*_labels_hier).
kwargs
additional keyword arguments to the evaluation metrics.
Returns
-------
scores : OrderedDict
Dictionary of scores, where the key is the metric name (str) and
the value is the (float) score achieved.
T-measures are computed in both the "full" (``transitive=True``) and
"reduced" (``transitive=False``) modes.
Raises
------
ValueError
Thrown when the provided annotations are not valid.
'''
# First, find the maximum length of the reference
_, t_end = _hierarchy_bounds(ref_intervals_hier)
# Pre-process the intervals to match the range of the reference,
# and start at 0
ref_intervals_hier, ref_labels_hier = _align_intervals(ref_intervals_hier,
ref_labels_hier,
t_min=0.0,
t_max=None)
est_intervals_hier, est_labels_hier = _align_intervals(est_intervals_hier,
est_labels_hier,
t_min=0.0,
t_max=t_end)
scores = collections.OrderedDict()
# Force the transitivity setting
kwargs['transitive'] = False
(scores['T-Precision reduced'],
scores['T-Recall reduced'],
scores['T-Measure reduced']) = util.filter_kwargs(tmeasure,
ref_intervals_hier,
est_intervals_hier,
**kwargs)
kwargs['transitive'] = True
(scores['T-Precision full'],
scores['T-Recall full'],
scores['T-Measure full']) = util.filter_kwargs(tmeasure,
ref_intervals_hier,
est_intervals_hier,
**kwargs)
(scores['L-Precision'],
scores['L-Recall'],
scores['L-Measure']) = util.filter_kwargs(lmeasure,
ref_intervals_hier,
ref_labels_hier,
est_intervals_hier,
est_labels_hier,
**kwargs)
return scores
| bmcfee/mir_eval | mir_eval/hierarchy.py | Python | mit | 25,014 | [
"Brian"
] | 5c1e3bca617e33593ee248864454549bcef805203ef742aded380386774512d8 |
from __future__ import print_function
# husk:
# Exit*2? remove pylab.show()
# close button
# DFT
# ADOS
# grey-out stuff after one second: vmd, rasmol, ...
# Show with ....
# rasmol: set same rotation as ag
# Graphs: save, Python, 3D
# start from python (interactive mode?)
# ascii-art option (colored)|
# option -o (output) and -f (force overwrite)
# surfacebuilder
# screen-dump
# icon
# ag-community-server
# translate option: record all translations,
# and check for missing translations.
#TODO: Add possible way of choosing orinetations. \
#TODO: Two atoms defines a direction, three atoms their normal does
#TODO: Align orientations chosen in Rot_selected v unselcted
#TODO: Get the atoms_rotate_0 thing string
#TODO: Use set atoms instead og the get atoms
#TODO: Arrow keys will decide how the orientation changes
#TODO: Undo redo que should be implemented
#TODO: Update should have possibility to change positions
#TODO: Window for rotation modes and move moves which can be chosen
#TODO: WHen rotate and move / hide the movie menu
import os
import sys
import weakref
import pickle
from gettext import gettext as _
from gettext import ngettext
import numpy as np
import pygtk
pygtk.require("2.0")
import gtk
from ase.gui.view import View
from ase.gui.status import Status
from ase.gui.widgets import pack, help, Help, oops
from ase.gui.settings import Settings
from ase.gui.crystal import SetupBulkCrystal
from ase.gui.surfaceslab import SetupSurfaceSlab
from ase.gui.nanoparticle import SetupNanoparticle
from ase.gui.nanotube import SetupNanotube
from ase.gui.graphene import SetupGraphene
from ase.gui.calculator import SetCalculator
from ase.gui.energyforces import EnergyForces
from ase.gui.minimize import Minimize
from ase.gui.scaling import HomogeneousDeformation
from ase.gui.quickinfo import QuickInfo
from ase.gui.save import SaveWindow
from ase.version import version
ui_info = """\
<ui>
<menubar name='MenuBar'>
<menu action='FileMenu'>
<menuitem action='Open'/>
<menuitem action='New'/>
<menuitem action='Save'/>
<separator/>
<menuitem action='Quit'/>
</menu>
<menu action='EditMenu'>
<menuitem action='SelectAll'/>
<menuitem action='Invert'/>
<menuitem action='SelectConstrained'/>
<menuitem action='SelectImmobile'/>
<separator/>
<menuitem action='Copy'/>
<menuitem action='Paste'/>
<separator/>
<menuitem action='HideAtoms'/>
<menuitem action='ShowAtoms'/>
<separator/>
<menuitem action='Modify'/>
<menuitem action='AddAtoms'/>
<menuitem action='DeleteAtoms'/>
<separator/>
<menuitem action='First'/>
<menuitem action='Previous'/>
<menuitem action='Next'/>
<menuitem action='Last'/>
</menu>
<menu action='ViewMenu'>
<menuitem action='ShowUnitCell'/>
<menuitem action='ShowAxes'/>
<menuitem action='ShowBonds'/>
<menuitem action='ShowVelocities'/>
<menuitem action='ShowForces'/>
<menu action='ShowLabels'>
<menuitem action='NoLabel'/>
<menuitem action='AtomIndex'/>
<menuitem action='MagMom'/>
<menuitem action='Element'/>
</menu>
<separator/>
<menuitem action='QuickInfo'/>
<menuitem action='Repeat'/>
<menuitem action='Rotate'/>
<menuitem action='Colors'/>
<menuitem action='Focus'/>
<menuitem action='ZoomIn'/>
<menuitem action='ZoomOut'/>
<menu action='ChangeView'>
<menuitem action='ResetView'/>
<menuitem action='xyPlane'/>
<menuitem action='yzPlane'/>
<menuitem action='zxPlane'/>
<menuitem action='yxPlane'/>
<menuitem action='zyPlane'/>
<menuitem action='xzPlane'/>
<menuitem action='a2a3Plane'/>
<menuitem action='a3a1Plane'/>
<menuitem action='a1a2Plane'/>
<menuitem action='a3a2Plane'/>
<menuitem action='a2a1Plane'/>
<menuitem action='a1a3Plane'/>
</menu>
<menuitem action='Settings'/>
<menuitem action='VMD'/>
<menuitem action='RasMol'/>
<menuitem action='XMakeMol'/>
<menuitem action='Avogadro'/>
</menu>
<menu action='ToolsMenu'>
<menuitem action='Graphs'/>
<menuitem action='Movie'/>
<menuitem action='EModify'/>
<menuitem action='Constraints'/>
<menuitem action='RenderScene'/>
<menuitem action='MoveAtoms'/>
<menuitem action='RotateAtoms'/>
<menuitem action='OrientAtoms'/>
<menuitem action='DFT'/>
<menuitem action='NEB'/>
<menuitem action='BulkModulus'/>
</menu>
<menu action='SetupMenu'>
<menuitem action='Bulk'/>
<menuitem action='Surface'/>
<menuitem action='Nanoparticle'/>
<menuitem action='Graphene'/>
<menuitem action='Nanotube'/>
</menu>
<menu action='CalculateMenu'>
<menuitem action='SetCalculator'/>
<separator/>
<menuitem action='EnergyForces'/>
<menuitem action='Minimize'/>
<menuitem action='Scaling'/>
</menu>
<menu action='HelpMenu'>
<menuitem action='About'/>
<menuitem action='Webpage'/>
<menuitem action='Debug'/>
</menu>
</menubar>
</ui>"""
class GUI(View, Status):
def __init__(self, images, rotations='', show_unit_cell=True,
show_bonds=False):
# Try to change into directory of file you are viewing
try:
os.chdir(os.path.split(sys.argv[1])[0])
# This will fail sometimes (e.g. for starting a new session)
except:
pass
self.images = images
self.window = gtk.Window(gtk.WINDOW_TOPLEVEL)
#self.window.set_icon(gtk.gdk.pixbuf_new_from_file('guiase.png'))
self.window.set_position(gtk.WIN_POS_CENTER)
#self.window.connect("destroy", lambda w: gtk.main_quit())
self.window.connect('delete_event', self.exit)
vbox = gtk.VBox()
self.window.add(vbox)
if gtk.pygtk_version < (2, 12):
self.set_tip = gtk.Tooltips().set_tip
actions = gtk.ActionGroup("Actions")
actions.add_actions([
('FileMenu', None, _('_File')),
('EditMenu', None, _('_Edit')),
('ViewMenu', None, _('_View')),
('ToolsMenu', None, _('_Tools')),
# TRANSLATORS: Set up (i.e. build) surfaces, nanoparticles, ...
('SetupMenu', None, _('_Setup')),
('CalculateMenu', None, _('_Calculate')),
('HelpMenu', None, _('_Help')),
('Open', gtk.STOCK_OPEN, _('_Open'), '<control>O',
_('Create a new file'),
self.open),
('New', gtk.STOCK_NEW, _('_New'), '<control>N',
_('New ase.gui window'),
lambda widget: os.system('ase-gui &')),
('Save', gtk.STOCK_SAVE, _('_Save'), '<control>S',
_('Save current file'),
self.save),
('Quit', gtk.STOCK_QUIT, _('_Quit'), '<control>Q',
_('Quit'),
self.exit),
('SelectAll', None, _('Select _all'), None,
'',
self.select_all),
('Invert', None, _('_Invert selection'), None,
'',
self.invert_selection),
('SelectConstrained', None, _('Select _constrained atoms'), None,
'',
self.select_constrained_atoms),
('SelectImmobile', None, _('Select _immobile atoms'), '<control>I',
'',
self.select_immobile_atoms),
('Copy', None, _('_Copy'), '<control>C',
_('Copy current selection and its orientation to clipboard'),
self.copy_atoms),
('Paste', None, _('_Paste'), '<control>V',
_('Insert current clipboard selection'),
self.paste_atoms),
('Modify', None, _('_Modify'), '<control>Y',
_('Change tags, moments and atom types of the selected atoms'),
self.modify_atoms),
('AddAtoms', None, _('_Add atoms'), '<control>A',
_('Insert or import atoms and molecules'),
self.add_atoms),
('DeleteAtoms', None, _('_Delete selected atoms'), 'BackSpace',
_('Delete the selected atoms'),
self.delete_selected_atoms),
('First', gtk.STOCK_GOTO_FIRST, _('_First image'), 'Home',
'',
self.step),
('Previous', gtk.STOCK_GO_BACK, _('_Previous image'), 'Page_Up',
'',
self.step),
('Next', gtk.STOCK_GO_FORWARD, _('_Next image'), 'Page_Down',
'',
self.step),
('Last', gtk.STOCK_GOTO_LAST, _('_Last image'), 'End',
'',
self.step),
('ShowLabels', None, _('Show _Labels')),
('HideAtoms', None, _('Hide selected atoms'), None,
'',
self.hide_selected),
('ShowAtoms', None, _('Show selected atoms'), None,
'',
self.show_selected),
('QuickInfo', None, _('Quick Info ...'), None,
'',
self.quick_info_window),
('Repeat', None, _('Repeat ...'), None,
'',
self.repeat_window),
('Rotate', None, _('Rotate ...'), None,
'',
self.rotate_window),
('Colors', None, _('Colors ...'), 'C', '',
self.colors_window),
# TRANSLATORS: verb
('Focus', gtk.STOCK_ZOOM_FIT, _('Focus'), 'F',
'',
self.focus),
('ZoomIn', gtk.STOCK_ZOOM_IN, _('Zoom in'), 'plus',
'',
self.zoom),
('ZoomOut', gtk.STOCK_ZOOM_OUT, _('Zoom out'), 'minus',
'',
self.zoom),
('ChangeView', None, _('Change View')),
('ResetView', None, _('Reset View'), 'equal',
'',
self.reset_view),
('xyPlane', None, _('\'xy\' Plane'), 'z', '', self.set_view),
('yzPlane', None, _('\'yz\' Plane'), 'x', '', self.set_view),
('zxPlane', None, _('\'zx\' Plane'), 'y', '', self.set_view),
('yxPlane', None, _('\'yx\' Plane'), '<alt>z', '', self.set_view),
('zyPlane', None, _('\'zy\' Plane'), '<alt>x', '', self.set_view),
('xzPlane', None, _('\'xz\' Plane'), '<alt>y', '', self.set_view),
('a2a3Plane', None, _('\'a2 a3\' Plane'), '1', '', self.set_view),
('a3a1Plane', None, _('\'a3 a1\' Plane'), '2', '', self.set_view),
('a1a2Plane', None, _('\'a1 a2\' Plane'), '3', '', self.set_view),
('a3a2Plane', None, _('\'a3 a2\' Plane'), '<alt>1', '', self.set_view),
('a1a3Plane', None, _('\'a1 a3\' Plane'), '<alt>2', '', self.set_view),
('a2a1Plane', None, _('\'a2 a1\' Plane'), '<alt>3', '', self.set_view),
('Settings', gtk.STOCK_PREFERENCES, _('Settings ...'), None,
'',
self.settings),
('VMD', None, _('VMD'), None,
'',
self.external_viewer),
('RasMol', None, _('RasMol'), None,
'',
self.external_viewer),
('XMakeMol', None, _('xmakemol'), None,
'',
self.external_viewer),
('Avogadro', None, _('avogadro'), None,
'',
self.external_viewer),
('Graphs', None, _('Graphs ...'), None,
'',
self.plot_graphs),
('Movie', None, _('Movie ...'), None,
'',
self.movie),
('EModify', None, _('Expert mode ...'), '<control>E',
'',
self.execute),
('Constraints', None, _('Constraints ...'), None,
'',
self.constraints_window),
('RenderScene', None, _('Render scene ...'), None,
'',
self.render_window),
('DFT', None, _('DFT ...'), None,
'',
self.dft_window),
('NEB', None, _('NE_B'), None,
'',
self.NEB),
('BulkModulus', None, _('B_ulk Modulus'), None,
'',
self.bulk_modulus),
('Bulk', None, _('_Bulk Crystal'), None,
_("Create a bulk crystal with arbitrary orientation"),
self.bulk_window),
('Surface', None, _('_Surface slab'), None,
_("Create the most common surfaces"),
self.surface_window),
('Nanoparticle', None, _('_Nanoparticle'), None,
_("Create a crystalline nanoparticle"),
self.nanoparticle_window),
('Nanotube', None, _('Nano_tube'), None,
_("Create a nanotube"),
self.nanotube_window),
('Graphene', None, _('Graphene'), None,
_("Create a graphene sheet or nanoribbon"),
self.graphene_window),
('SetCalculator', None, _('Set _Calculator'), None,
_("Set a calculator used in all calculation modules"),
self.calculator_window),
('EnergyForces', None, _('_Energy and Forces'), None,
_("Calculate energy and forces"),
self.energy_window),
('Minimize', None, _('Energy _Minimization'), None,
_("Minimize the energy"),
self.energy_minimize_window),
('Scaling', None, _('Scale system'), None,
_("Deform system by scaling it"),
self.scaling_window),
('About', None, _('_About'), None,
None,
self.about),
('Webpage', gtk.STOCK_HELP, _('Webpage ...'), None, None, webpage),
('Debug', None, _('Debug ...'), None, None, self.debug)])
actions.add_toggle_actions([
('ShowUnitCell', None, _('Show _unit cell'), '<control>U',
'Bold',
self.toggle_show_unit_cell,
show_unit_cell > 0),
('ShowAxes', None, _('Show _axes'), None,
'Bold',
self.toggle_show_axes,
True),
('ShowBonds', None, _('Show _bonds'), '<control>B',
'Bold',
self.toggle_show_bonds,
show_bonds),
('ShowVelocities', None, _('Show _velocities'),
'<control>G', 'Bold',
self.toggle_show_velocities,
False),
('ShowForces', None, _('Show _forces'), '<control>F',
'Bold',
self.toggle_show_forces,
False),
('MoveAtoms', None, _('_Move atoms'), '<control>M',
'Bold',
self.toggle_move_mode,
False),
('RotateAtoms', None, _('_Rotate atoms'), '<control>R',
'Bold',
self.toggle_rotate_mode,
False),
('OrientAtoms', None, _('Orien_t atoms'), '<control>T',
'Bold',
self.toggle_orient_mode,
False)
])
actions.add_radio_actions((
('NoLabel', None, _('_None'), None, None, 0),
('AtomIndex', None, _('Atom _Index'), None, None, 1),
('MagMom', None, _('_Magnetic Moments'), None, None, 2),
('Element', None, _('_Element Symbol'), None, None, 3)),
0, self.show_labels)
self.ui = ui = gtk.UIManager()
ui.insert_action_group(actions, 0)
self.window.add_accel_group(ui.get_accel_group())
try:
mergeid = ui.add_ui_from_string(ui_info)
except gobject.GError as msg:
print(_('building menus failed: %s') % msg)
vbox.pack_start(ui.get_widget('/MenuBar'), False, False, 0)
View.__init__(self, vbox, rotations)
Status.__init__(self, vbox)
vbox.show()
#self.window.set_events(gtk.gdk.BUTTON_PRESS_MASK)
self.window.connect('key-press-event', self.scroll)
self.window.connect('scroll_event', self.scroll_event)
self.window.show()
self.graphs = [] # List of open pylab windows
self.graph_wref = [] # List of weakrefs to Graph objects
self.movie_window = None
self.vulnerable_windows = []
self.simulation = {} # Used by modules on Calculate menu.
self.module_state = {} # Used by modules to store their state.
def run(self, expr=None):
self.set_colors()
self.set_coordinates(self.images.nimages - 1, focus=True)
if self.images.nimages > 1:
self.movie()
if expr is None and not np.isnan(self.images.E[0]):
expr = self.config['gui_graphs_string']
if expr is not None and expr != '' and self.images.nimages > 1:
self.plot_graphs(expr=expr)
gtk.main()
def step(self, action):
d = {'First': -10000000,
'Previous': -1,
'Next': 1,
'Last': 10000000}[action.get_name()]
i = max(0, min(self.images.nimages - 1, self.frame + d))
self.set_frame(i)
if self.movie_window is not None:
self.movie_window.frame_number.value = i
def _do_zoom(self, x):
"""Utility method for zooming"""
self.scale *= x
self.draw()
def zoom(self, action):
"""Zoom in/out on keypress or clicking menu item"""
x = {'ZoomIn': 1.2, 'ZoomOut':1 /1.2}[action.get_name()]
self._do_zoom(x)
def scroll_event(self, window, event):
"""Zoom in/out when using mouse wheel"""
SHIFT = event.state == gtk.gdk.SHIFT_MASK
x = 1.0
if event.direction == gtk.gdk.SCROLL_UP:
x = 1.0 + (1-SHIFT)*0.2 + SHIFT*0.01
elif event.direction == gtk.gdk.SCROLL_DOWN:
x = 1.0 / (1.0 + (1-SHIFT)*0.2 + SHIFT*0.01)
self._do_zoom(x)
def settings(self, menuitem):
Settings(self)
def scroll(self, window, event):
from copy import copy
CTRL = event.state == gtk.gdk.CONTROL_MASK
SHIFT = event.state == gtk.gdk.SHIFT_MASK
dxdydz = {gtk.keysyms.KP_Add: ('zoom', 1.0 + (1-SHIFT)*0.2 + SHIFT*0.01, 0),
gtk.keysyms.KP_Subtract: ('zoom', 1 / (1.0 + (1-SHIFT)*0.2 + SHIFT*0.01), 0),
gtk.keysyms.Up: ( 0, +1 - CTRL, +CTRL),
gtk.keysyms.Down: ( 0, -1 + CTRL, -CTRL),
gtk.keysyms.Right: (+1, 0, 0),
gtk.keysyms.Left: (-1, 0, 0)}.get(event.keyval, None)
try:
inch = chr(event.keyval)
except:
inch = None
sel = []
atom_move = self.ui.get_widget('/MenuBar/ToolsMenu/MoveAtoms'
).get_active()
atom_rotate = self.ui.get_widget('/MenuBar/ToolsMenu/RotateAtoms'
).get_active()
atom_orient = self.ui.get_widget('/MenuBar/ToolsMenu/OrientAtoms'
).get_active()
if dxdydz is None:
return
dx, dy, dz = dxdydz
if dx == 'zoom':
self._do_zoom(dy)
return
d = self.scale * 0.1
tvec = np.array([dx, dy, dz])
dir_vec = np.dot(self.axes, tvec)
if (atom_move):
rotmat = self.axes
s = 0.1
if SHIFT:
s = 0.01
add = s * dir_vec
for i in range(len(self.R)):
if self.atoms_to_rotate_0[i]:
self.R[i] += add
for jx in range(self.images.nimages):
self.images.P[jx][i] += add
elif atom_rotate:
from .rot_tools import rotate_about_vec, \
rotate_vec
sel = self.images.selected
if sum(sel) == 0:
sel = self.atoms_to_rotate_0
nsel = sum(sel)
# this is the first one to get instatiated
if nsel != 2:
self.rot_vec = dir_vec
change = False
z_axis = np.dot(self.axes, np.array([0, 0, 1]))
if self.atoms_to_rotate == None:
change = True
self.z_axis_old = z_axis.copy()
self.dx_change = [0, 0]
self.atoms_to_rotate = self.atoms_to_rotate_0.copy()
self.atoms_selected = sel.copy()
self.rot_vec = dir_vec
if nsel != 2 or sum(self.atoms_to_rotate) == 2:
self.dx_change = [0, 0]
for i in range(len(sel)):
if sel[i] != self.atoms_selected[i]:
change = True
cz = [dx, dy+dz]
if cz[0] or cz[1]:
change = False
if not(cz[0] * (self.dx_change[1])):
change = True
for i in range(2):
if cz[i] and self.dx_change[i]:
self.rot_vec = self.rot_vec * cz[i] * self.dx_change[i]
if cz[1]:
change = False
if np.prod(self.z_axis_old != z_axis):
change = True
self.z_axis_old = z_axis.copy()
self.dx_change = copy(cz)
dihedral_rotation = len(self.images.selected_ordered) == 4
if change:
self.atoms_selected = sel.copy()
if nsel == 2 and sum(self.atoms_to_rotate) != 2:
asel = []
for i, j in enumerate(sel):
if j:
asel.append(i)
a1, a2 = asel
rvx = self.images.P[self.frame][a1] - \
self.images.P[self.frame][a2]
rvy = np.cross(rvx,
np.dot(self.axes,
np.array([0, 0, 1])))
self.rot_vec = rvx * dx + rvy * (dy + dz)
self.dx_change = [dx, dy+dz]
# dihedral rotation?
if dihedral_rotation:
sel = self.images.selected_ordered
self.rot_vec = (dx+dy+dz)*(self.R[sel[2]]-self.R[sel[1]])
rot_cen = np.array([0.0, 0.0, 0.0])
if dihedral_rotation:
sel = self.images.selected_ordered
rot_cen = self.R[sel[1]].copy()
elif nsel:
for i, b in enumerate( sel):
if b:
rot_cen += self.R[i]
rot_cen /= float(nsel)
degrees = 5 * (1 - SHIFT) + SHIFT
degrees = abs(sum(dxdydz)) * 3.1415 / 360.0 * degrees
rotmat = rotate_about_vec(self.rot_vec, degrees)
# now rotate the atoms that are to be rotated
for i in range(len(self.R)):
if self.atoms_to_rotate[i]:
self.R[i] -= rot_cen
for jx in range(self.images.nimages):
self.images.P[jx][i] -= rot_cen
self.R[i] = rotate_vec(rotmat, self.R[i])
for jx in range(self.images.nimages):
self.images.P[jx][i] = rotate_vec(rotmat, self.images.P[jx][i])
self.R[i] += rot_cen
for jx in range(self.images.nimages):
self.images.P[jx][i] += rot_cen
elif atom_orient:
to_vec = np.array([dx, dy, dz])
from .rot_tools import rotate_vec_into_newvec
rot_mat = rotate_vec_into_newvec(self.orient_normal, to_vec)
self.axes = rot_mat
self.set_coordinates()
else:
self.center -= (dx * 0.1 * self.axes[:, 0] -
dy * 0.1 * self.axes[:, 1])
self.draw()
def copy_atoms(self, widget):
"Copies selected atoms to a clipboard."
clip = gtk.clipboard_get(gtk.gdk.SELECTION_CLIPBOARD)
if self.images.selected.any():
atoms = self.images.get_atoms(self.frame)
lena = len(atoms)
for i in range(len(atoms)):
li = lena-1-i
if not self.images.selected[li]:
del(atoms[li])
for i in atoms:
i.position = np.dot(self.axes.T,i.position)
ref = atoms[0].position
for i in atoms:
if i.position[2] < ref[2]:
ref = i.position
atoms.reference_position = ref
clip.set_text(pickle.dumps(atoms, 0))
def paste_atoms(self, widget):
"Inserts clipboard selection into the current frame using the add_atoms window."
clip = gtk.clipboard_get(gtk.gdk.SELECTION_CLIPBOARD)
try:
atoms = pickle.loads(clip.wait_for_text())
self.add_atoms(widget, data='Paste', paste=atoms)
except:
pass
def add_atoms(self, widget, data=None, paste=None):
"""
Presents a dialogbox to the user, that allows him to add atoms/molecule to the current slab
or to paste the clipboard.
The molecule/atom is rotated using the current rotation of the coordinate system.
The molecule/atom can be added at a specified position - if the keyword auto+Z is used,
the COM of the selected atoms will be used as COM for the moleculed. The COM is furthermore
translated Z ang towards the user.
If no molecules are selected, the COM of all the atoms will be used for the x-y components of the
active coordinate system, while the z-direction will be chosen from the nearest atom position
along this direction.
Note: If this option is used, all frames except the active one are deleted.
"""
if data == 'load':
chooser = gtk.FileChooserDialog(
_('Open ...'), None, gtk.FILE_CHOOSER_ACTION_OPEN,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OPEN, gtk.RESPONSE_OK))
chooser.set_filename(_("<<filename>>"))
ok = chooser.run()
if ok == gtk.RESPONSE_OK:
filename = chooser.get_filename()
chooser.destroy()
else:
chooser.destroy()
return
if data == 'OK' or data == 'load':
import ase
if data == 'load':
molecule = filename
else:
molecule = self.add_entries[1].get_text()
tag = self.add_entries[2].get_text()
mom = self.add_entries[3].get_text()
pos = self.add_entries[4].get_text().lower()
if paste is not None:
a = paste.copy()
else:
a = None
if a is None:
try:
a = ase.Atoms([ase.Atom(molecule)])
except:
try:
import ase.structure
a = ase.structure.molecule(molecule)
except:
try:
a = ase.io.read(molecule, -1)
except:
self.add_entries[1].set_text('?' + molecule)
return ()
directions = np.transpose(self.axes)
if a != None:
for i in a:
try:
i.set('tag',int(tag))
except:
self.add_entries[2].set_text('?' + tag)
return ()
try:
i.magmom = float(mom)
except:
self.add_entries[3].set_text('?' + mom)
return ()
if self.origin_radio.get_active() and paste:
a.translate(-paste.reference_position)
# apply the current rotation matrix to A
for i in a:
i.position = np.dot(self.axes, i.position)
# find the extent of the molecule in the local coordinate system
if self.centre_radio.get_active():
a_cen_pos = np.array([0.0, 0.0, 0.0])
m_cen_pos = 0.0
for i in a.positions:
a_cen_pos[0] += np.dot(directions[0], i)
a_cen_pos[1] += np.dot(directions[1], i)
a_cen_pos[2] += np.dot(directions[2], i)
m_cen_pos = max(np.dot(-directions[2], i), m_cen_pos)
a_cen_pos[0] /= len(a.positions)
a_cen_pos[1] /= len(a.positions)
a_cen_pos[2] /= len(a.positions)
a_cen_pos[2] -= m_cen_pos
else:
a_cen_pos = np.array([0.0, 0.0, 0.0])
# now find the position
cen_pos = np.array([0.0, 0.0, 0.0])
if sum(self.images.selected) > 0:
for i in range(len(self.R)):
if self.images.selected[i]:
cen_pos += self.R[i]
cen_pos /= sum(self.images.selected)
elif len(self.R) > 0:
px = 0.0
py = 0.0
pz = -1e6
for i in range(len(self.R)):
px += np.dot(directions[0], self.R[i])
py += np.dot(directions[1], self.R[i])
pz = max(np.dot(directions[2], self.R[i]), pz)
px = (px/float(len(self.R)))
py = (py/float(len(self.R)))
cen_pos = directions[0] * px + \
directions[1] * py + \
directions[2] * pz
if 'auto' in pos:
pos = pos.replace('auto', '')
import re
pos = re.sub('\s', '', pos)
if '(' in pos:
sign = eval('%s1' % pos[0])
a_cen_pos -= sign * np.array(eval(pos[1:]), float)
else:
a_cen_pos -= float(pos) * directions[2]
else:
cen_pos = np.array(eval(pos))
for i in a:
i.position += cen_pos - a_cen_pos
# and them to the molecule
atoms = self.images.get_atoms(self.frame)
atoms = atoms + a
self.new_atoms(atoms, init_magmom=True)
# and finally select the new molecule for easy moving and rotation
for i in range(len(a)):
self.images.selected[len(atoms) - i - 1] = True
self.draw()
self.add_entries[0].destroy()
if data == 'Cancel':
self.add_entries[0].destroy()
if data == None or data == 'Paste':
from ase.gui.widgets import pack
molecule = ''
tag = '0'
mom = '0'
pos = 'auto+1'
self.add_entries = []
window = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.add_entries.append(window)
window.set_title(_('Add atoms'))
if data == 'Paste':
molecule = paste.get_chemical_symbols(True)
window.set_title(_('Paste'))
vbox = gtk.VBox(False, 0)
window.add(vbox)
vbox.show()
packed = False
for i, j in [[_('Insert atom or molecule'), molecule],
[_('Tag'), tag],
[_('Moment'), mom],
[_('Position'), pos]]:
label = gtk.Label(i)
if not packed:
vbox.pack_start(label, True, True, 0)
else:
packed = True
vbox.add(label)
label.show()
entry = gtk.Entry()
entry.set_text(j)
self.add_entries.append(entry)
entry.set_max_length(50)
entry.show()
vbox.add(entry)
pack(vbox,[gtk.Label('atom/molecule reference:')])
self.centre_radio = gtk.RadioButton(None, "centre ")
self.origin_radio = gtk.RadioButton(self.centre_radio, "origin")
pack(vbox,[self.centre_radio, self.origin_radio])
if data == 'Paste':
self.origin_radio.set_active(True)
self.add_entries[1].set_sensitive(False)
if data == None:
button = gtk.Button(_('_Load molecule'))
button.connect('clicked', self.add_atoms, 'load')
button.show()
vbox.add(button)
button = gtk.Button(_('_OK'))
button.connect('clicked', self.add_atoms, 'OK', paste)
button.show()
vbox.add(button)
button = gtk.Button(_('_Cancel'))
button.connect('clicked', self.add_atoms, 'Cancel')
button.show()
vbox.add(button)
window.show()
def modify_atoms(self, widget, data=None):
"""
Presents a dialog box where the user is able to change the atomic type, the magnetic
moment and tags of the selected atoms. An item marked with X will not be changed.
"""
if data:
if data == 'OK':
import ase
symbol = self.add_entries[1].get_text()
tag = self.add_entries[2].get_text()
mom = self.add_entries[3].get_text()
a = None
if symbol != 'X':
try:
a = ase.Atoms([ase.Atom(symbol)])
except:
self.add_entries[1].set_text('?' + symbol)
return ()
y = self.images.selected.copy()
# and them to the molecule
atoms = self.images.get_atoms(self.frame)
for i in range(len(atoms)):
if self.images.selected[i]:
if a:
atoms[i].symbol = symbol
try:
if tag != 'X':
atoms[i].tag = int(tag)
except:
self.add_entries[2].set_text('?' + tag)
return ()
try:
if mom != 'X':
atoms[i].magmom = float(mom)
except:
self.add_entries[3].set_text('?' + mom)
return ()
self.new_atoms(atoms, init_magmom=True)
# Updates atomic labels
cv = self.ui.get_action_groups()[0].\
get_action("NoLabel").get_current_value()
self.ui.get_action_groups()[0].\
get_action("NoLabel").set_current_value(0)
self.ui.get_action_groups()[0].\
get_action("NoLabel").set_current_value(cv)
# and finally select the new molecule for easy moving and rotation
self.images.selected = y
self.draw()
self.add_entries[0].destroy()
if data == None and sum(self.images.selected):
atoms = self.images.get_atoms(self.frame)
s_tag = ''
s_mom = ''
s_symbol = ''
# Get the tags, moments and symbols of the selected atomsa
for i in range(len(atoms)):
if self.images.selected[i]:
if not(s_tag):
s_tag = str(atoms[i].tag)
elif s_tag != str(atoms[i].tag):
s_tag = 'X'
if not(s_mom):
s_mom = ("%2.2f" % (atoms[i].magmom))
elif s_mom != ("%2.2f" % (atoms[i].magmom)):
s_mom = 'X'
if not(s_symbol):
s_symbol = str(atoms[i].symbol)
elif s_symbol != str(atoms[i].symbol):
s_symbol = 'X'
self.add_entries = []
window = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.add_entries.append(window)
window.set_title(_('Modify'))
vbox = gtk.VBox(False, 0)
window.add(vbox)
vbox.show()
pack = False
for i, j in [[_('Atom'), s_symbol],
[_('Tag'), s_tag],
[_('Moment'), s_mom]]:
label = gtk.Label(i)
if not pack:
vbox.pack_start(label, True, True, 0)
else:
pack = True
vbox.add(label)
label.show()
entry = gtk.Entry()
entry.set_text(j)
self.add_entries.append(entry)
entry.set_max_length(50)
entry.show()
vbox.add(entry)
button = gtk.Button(_('_OK'))
button.connect('clicked', self.modify_atoms, 'OK')
button.show()
vbox.add(button)
button = gtk.Button(_('_Cancel'))
button.connect('clicked', self.modify_atoms, 'Cancel')
button.show()
vbox.add(button)
window.show()
def delete_selected_atoms(self, widget=None, data=None):
if data == 'OK':
atoms = self.images.get_atoms(self.frame)
lena = len(atoms)
for i in range(len(atoms)):
li = lena-1-i
if self.images.selected[li]:
del(atoms[li])
self.new_atoms(atoms)
self.draw()
if data:
self.delete_window.destroy()
if not(data) and sum(self.images.selected):
nselected = sum(self.images.selected)
self.delete_window = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.delete_window.set_title(_('Confirmation'))
self.delete_window.set_border_width(10)
self.box1 = gtk.HBox(False, 0)
self.delete_window.add(self.box1)
self.button1 = gtk.Button(ngettext('Delete selected atom?',
'Delete selected atoms?',
nselected))
self.button1.connect("clicked", self.delete_selected_atoms, "OK")
self.box1.pack_start(self.button1, True, True, 0)
self.button1.show()
self.button2 = gtk.Button(_("Cancel"))
self.button2.connect("clicked", self.delete_selected_atoms, "Cancel")
self.box1.pack_start(self.button2, True, True, 0)
self.button2.show()
self.box1.show()
self.delete_window.show()
def debug(self, x):
from ase.gui.debug import Debug
Debug(self)
def execute(self, widget=None):
from ase.gui.execute import Execute
Execute(self)
def constraints_window(self, widget=None):
from ase.gui.constraints import Constraints
Constraints(self)
def dft_window(self, widget=None):
from ase.gui.dft import DFT
DFT(self)
def select_all(self, widget):
self.images.selected[:] = True
self.draw()
def invert_selection(self, widget):
self.images.selected[:] = ~self.images.selected
self.draw()
def select_constrained_atoms(self, widget):
self.images.selected[:] = ~self.images.dynamic
self.draw()
def select_immobile_atoms(self, widget):
if self.images.nimages > 1:
R0 = self.images.P[0]
for R in self.images.P[1:]:
self.images.selected[:] =~ (np.abs(R - R0) > 1.0e-10).any(1)
self.draw()
def movie(self, widget=None):
from ase.gui.movie import Movie
self.movie_window = Movie(self)
def plot_graphs(self, x=None, expr=None):
from ase.gui.graphs import Graphs
g = Graphs(self)
if expr is not None:
g.plot(expr=expr)
self.graph_wref.append(weakref.ref(g))
def plot_graphs_newatoms(self):
"Notify any Graph objects that they should make new plots."
new_wref = []
found = 0
for wref in self.graph_wref:
ref = wref()
if ref is not None:
ref.plot()
new_wref.append(wref) # Preserve weakrefs that still work.
found += 1
self.graph_wref = new_wref
return found
def NEB(self, action):
from ase.gui.neb import NudgedElasticBand
NudgedElasticBand(self.images)
def bulk_modulus(self, action):
from ase.gui.bulk_modulus import BulkModulus
BulkModulus(self.images)
def open(self, button=None, filenames=None):
if filenames == None:
chooser = gtk.FileChooserDialog(
_('Open ...'), None, gtk.FILE_CHOOSER_ACTION_OPEN,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OPEN, gtk.RESPONSE_OK))
chooser.set_filename(_("<<filename>>"))
# Add a file type filter
name_to_suffix = {}
types = gtk.combo_box_new_text()
for name, suffix in [(_('Automatic'), None),
(_('Dacapo netCDF output file'),'dacapo'),
(_('Virtual Nano Lab file'),'vnl'),
(_('ASE pickle trajectory'),'traj'),
(_('ASE bundle trajectory'),'bundle'),
(_('GPAW text output'),'gpaw-text'),
(_('CUBE file'),'cube'),
(_('XCrySDen Structure File'),'xsf'),
(_('Dacapo text output'),'dacapo-text'),
(_('XYZ-file'),'xyz'),
(_('VASP POSCAR/CONTCAR file'),'vasp'),
(_('VASP OUTCAR file'),'vasp_out'),
(_('Protein Data Bank'),'pdb'),
(_('CIF-file'),'cif'),
(_('FHI-aims geometry file'),'aims'),
(_('FHI-aims output file'),'aims_out'),
(_('TURBOMOLE coord file'),'tmol'),
(_('exciting input'),'exi'),
(_('WIEN2k structure file'),'struct'),
(_('DftbPlus input file'),'dftb'),
(_('ETSF format'),'etsf.nc'),
(_('CASTEP geom file'),'cell'),
(_('CASTEP output file'),'castep'),
(_('CASTEP trajectory file'),'geom'),
(_('DFTBPlus GEN format'),'gen')
]:
types.append_text(name)
name_to_suffix[name] = suffix
types.set_active(0)
img_vbox = gtk.VBox()
pack(img_vbox, [gtk.Label(_('File type:')), types])
img_vbox.show()
chooser.set_extra_widget(img_vbox)
ok = chooser.run() == gtk.RESPONSE_OK
if ok:
filenames = [chooser.get_filename()]
filetype = types.get_active_text()
chooser.destroy()
if not ok:
return
n_current = self.images.nimages
self.reset_tools_modes()
self.images.read(filenames, slice(None), name_to_suffix[filetype])
self.set_colors()
self.set_coordinates(self.images.nimages - 1, focus=True)
def import_atoms (self, button=None, filenames=None):
if filenames == None:
chooser = gtk.FileChooserDialog(
_('Open ...'), None, gtk.FILE_CHOOSER_ACTION_OPEN,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OPEN, gtk.RESPONSE_OK))
ok = chooser.run()
if ok == gtk.RESPONSE_OK:
filenames = [chooser.get_filename()]
chooser.destroy()
if not ok:
return
self.images.import_atoms(filenames, self.frame)
self.set_colors()
self.set_coordinates(self.images.nimages - 1, focus=True)
def save(self, menuitem):
SaveWindow(self)
def quick_info_window(self, menuitem):
QuickInfo(self)
def bulk_window(self, menuitem):
SetupBulkCrystal(self)
def surface_window(self, menuitem):
SetupSurfaceSlab(self)
def nanoparticle_window(self, menuitem):
SetupNanoparticle(self)
def graphene_window(self, menuitem):
SetupGraphene(self)
def nanotube_window(self, menuitem):
SetupNanotube(self)
def calculator_window(self, menuitem):
SetCalculator(self)
def energy_window(self, menuitem):
EnergyForces(self)
def energy_minimize_window(self, menuitem):
Minimize(self)
def scaling_window(self, menuitem):
HomogeneousDeformation(self)
def new_atoms(self, atoms, init_magmom=False):
"Set a new atoms object."
self.reset_tools_modes()
rpt = getattr(self.images, 'repeat', None)
self.images.repeat_images(np.ones(3, int))
self.images.initialize([atoms], init_magmom=init_magmom)
self.frame = 0 # Prevent crashes
self.images.repeat_images(rpt)
self.set_colors()
self.set_coordinates(frame=0, focus=True)
self.notify_vulnerable()
def prepare_new_atoms(self):
"Marks that the next call to append_atoms should clear the images."
self.images.prepare_new_atoms()
def append_atoms(self, atoms):
"Set a new atoms object."
#self.notify_vulnerable() # Do this manually after last frame.
frame = self.images.append_atoms(atoms)
self.set_coordinates(frame=frame-1, focus=True)
def notify_vulnerable(self):
"""Notify windows that would break when new_atoms is called.
The notified windows may adapt to the new atoms. If that is not
possible, they should delete themselves.
"""
new_vul = [] # Keep weakrefs to objects that still exist.
for wref in self.vulnerable_windows:
ref = wref()
if ref is not None:
new_vul.append(wref)
ref.notify_atoms_changed()
self.vulnerable_windows = new_vul
def register_vulnerable(self, obj):
"""Register windows that are vulnerable to changing the images.
Some windows will break if the atoms (and in particular the
number of images) are changed. They can register themselves
and be closed when that happens.
"""
self.vulnerable_windows.append(weakref.ref(obj))
def exit(self, button, event=None):
self.window.destroy()
gtk.main_quit()
return True
def xxx(self, x=None,
message1=_('Not implemented!'),
message2=_('do you really need it?')):
oops(message1, message2)
def about(self, action):
try:
dialog = gtk.AboutDialog()
dialog.set_version(version)
dialog.set_website(
'https://wiki.fysik.dtu.dk/ase/ase/gui/gui.html')
except AttributeError:
self.xxx()
else:
dialog.run()
dialog.destroy()
def webpage(widget):
import webbrowser
webbrowser.open('https://wiki.fysik.dtu.dk/ase/ase/gui/gui.html')
| suttond/MODOI | ase/gui/gui.py | Python | lgpl-3.0 | 48,962 | [
"ASE",
"Avogadro",
"CASTEP",
"CRYSTAL",
"FHI-aims",
"GPAW",
"NetCDF",
"RasMol",
"TURBOMOLE",
"VASP",
"VMD",
"WIEN2k",
"exciting"
] | 105d5dc91302b095db7292c53580519da5794bdcc039f08dd250f57584f9bdad |
from ase.calculators.test import make_test_dft_calculation
from ase.dft.stm import STM
atoms = make_test_dft_calculation()
stm = STM(atoms, [0, 1, 2])
c = stm.get_averaged_current(-1.0, 4.5)
x, y, h = stm.scan(-1.0, c)
stm.write('stm.pckl')
x, y, h2 = STM('stm.pckl').scan(-1, c)
assert abs(h - h2).max() == 0
stm = STM(atoms, use_density=True)
c = stm.get_averaged_current(42, 4.5)
x, y = stm.linescan(42, c, [0, 0], [2, 2])
assert abs(x[-1] - 2 * 2**0.5) < 1e-13
assert abs(y[-1] - y[0]) < 1e-13
| suttond/MODOI | ase/test/stm.py | Python | lgpl-3.0 | 500 | [
"ASE"
] | e68108f0576f3243818e032a04f4c50b08b636bc5cb8e56703d57434932f49e4 |
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import os
import unittest
import pymatgen.io.jarvis as jio
from pymatgen.io.vasp.inputs import Poscar
from pymatgen.util.testing import PymatgenTest
class JarvisAtomsAdaptorTest(unittest.TestCase):
@unittest.skipIf(not jio.jarvis_loaded, "JARVIS-tools not loaded.")
def test_get_atoms_from_structure(self):
structure = Poscar.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "POSCAR")).structure
atoms = jio.JarvisAtomsAdaptor.get_atoms(structure)
jarvis_composition = atoms.composition.reduced_formula
self.assertEqual(jarvis_composition, structure.composition.reduced_formula)
self.assertTrue(atoms.lattice_mat is not None and atoms.lattice_mat.any())
@unittest.skipIf(not jio.jarvis_loaded, "JARVIS-tools not loaded.")
def test_get_structure(self):
structure = Poscar.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "POSCAR")).structure
atoms = jio.JarvisAtomsAdaptor.get_atoms(structure)
self.assertEqual(
jio.JarvisAtomsAdaptor.get_structure(atoms).composition.reduced_formula,
"FePO4",
)
if __name__ == "__main__":
unittest.main()
| vorwerkc/pymatgen | pymatgen/io/tests/test_jarvis.py | Python | mit | 1,261 | [
"VASP",
"pymatgen"
] | e0f320632f1d6062c1209915677cdc6068c23e2fa5ebdd5a15e71e0c36e97251 |
from itertools import product
import numpy as np
import nose.tools as nt
from ..convenience import lasso, step, threshold
def test_marginalize():
np.random.seed(10) # we are going to freeze the active set for this test
n, p = 20, 5
X = np.random.standard_normal((n, p))
X /= np.sqrt((X**2).sum(0))[None, :]
Y = X.dot([60.1, -61, 0, 0, 0]) + np.random.standard_normal(n)
n, p = X.shape
W = np.ones(p) * 20
L = lasso.gaussian(X, Y, W, randomizer='gaussian', randomizer_scale=0.01)
signs = L.fit()
# we should be able to reconstruct the initial randomness by hand
beta = L._view.initial_soln
omega = X.T.dot(X.dot(beta) - Y) + L.ridge_term * beta + L._view.initial_subgrad
np.testing.assert_allclose(omega, L._view._initial_omega)
A1, b1 = L._view.opt_transform
opt_state1 = L._view.observed_opt_state.copy()
state1 = A1.dot(opt_state1) + b1
# now marginalize over some coordinates of inactive
marginalizing_groups = np.ones(p, np.bool)
marginalizing_groups[:3] = False
L.decompose_subgradient(marginalizing_groups = marginalizing_groups)
def test_condition():
n, p = 20, 5
np.random.seed(10) # we are going to freeze the active set for this test
X = np.random.standard_normal((n, p))
X /= np.sqrt((X**2).sum(0))[None, :]
Y = X.dot([60.1, -61, 0, 0, 0]) + np.random.standard_normal(n)
n, p = X.shape
W = np.ones(p) * 20
L = lasso.gaussian(X, Y, W, randomizer='gaussian', randomizer_scale=0.01)
signs = L.fit()
# we should be able to reconstruct the initial randomness by hand
beta = L._view.initial_soln
omega = X.T.dot(X.dot(beta) - Y) + L.ridge_term * beta + L._view.initial_subgrad
np.testing.assert_allclose(omega, L._view._initial_omega)
A1, b1 = L._view.opt_transform
state1 = A1.dot(L._view.observed_opt_state) + b1
# now marginalize over some coordinates of inactive
conditioning_groups = np.ones(p, np.bool)
conditioning_groups[:3] = False
L.decompose_subgradient(conditioning_groups = conditioning_groups)
def test_both():
np.random.seed(10) # we are going to freeze the active set for this test
n, p = 20, 10
X = np.random.standard_normal((n, p))
X /= np.sqrt((X**2).sum(0))[None, :]
Y = X.dot([60.1, -61] + [0] * (p-2)) + np.random.standard_normal(n)
n, p = X.shape
W = np.ones(p) * 20
L = lasso.gaussian(X, Y, W, randomizer='gaussian', randomizer_scale=0.01)
signs = L.fit()
# we should be able to reconstruct the initial randomness by hand
beta = L._view.initial_soln
omega = X.T.dot(X.dot(beta) - Y) + L.ridge_term * beta + L._view.initial_subgrad
np.testing.assert_allclose(omega, L._view._initial_omega)
A1, b1 = L._view.opt_transform
opt_state1 = L._view.observed_opt_state.copy()
state1 = A1.dot(opt_state1) + b1
# now marginalize over some coordinates of inactive
marginalizing_groups = np.zeros(p, np.bool)
marginalizing_groups[3:5] = True
conditioning_groups = np.zeros(p, np.bool)
conditioning_groups[5:7] = True
L.decompose_subgradient(marginalizing_groups = marginalizing_groups,
conditioning_groups = conditioning_groups)
| selective-inference/selective-inference | selectinf/randomized/tests/sandbox/test_decompose_subgrad.py | Python | bsd-3-clause | 3,259 | [
"Gaussian"
] | 98300f27974c6d5712805945a3defd9b0be902178bb60bf021c33d471f772332 |
'''
Created on Oct 30, 2014
@author: rene
'''
import re
import string
from helper import DataObject, DataField, ListField
class IDDParser():
def _is_new_field(self, line):
return re.search(r"^\s*[AN]\d+\s*[,;]", line) is not None
def _is_list(self, line):
return re.search(r"^\s*L\d+\s*[,;]", line) is not None
def _is_field_attribute(self, line):
return re.search(r"^\s*\\", line) is not None
def _is_new_object(self, line):
if self._is_new_field(line) or self._is_field_attribute(line) or self._is_list(line):
return False
return re.search(r"^\s*(.*),", line) is not None
def _parse_object_name(self, line):
match_obj_name = re.search(r"^\s*(.*),", line)
assert match_obj_name is not None
internal_name = match_obj_name.group(1)
self.current_object = DataObject(internal_name)
def _parse_field_name(self, line):
# print "NewField:\t", line
match_field_name = re.search(r"\\field\s(.*)$", line)
match_field_type = re.search(r"^\s*([AN])", line)
if match_field_name is None or match_field_type is None:
print "Did not match field name: ", line, match_field_name, match_field_type
return
ftype = match_field_type.group(1)
internal_name = match_field_name.group(1).strip()
if len(self.current_object.fields) > 0:
self.current_object.fields[-1].conv_vals()
self.current_object.fields.append(DataField(internal_name, ftype))
def _parse_list(self, line):
match_list_name = re.search(r"\\list\s(.*)$", line)
if match_list_name is None:
print "Did not match list name: ", line, match_list_name
return
internal_name = match_list_name.group(1).strip()
df = ListField(internal_name)
df.is_list = True
self.current_object.fields.append(df)
def _parse_field_attribute(self, line):
last_field = self.current_object.fields[-1]
match_attribute_name = re.match(r"\s*\\([^\s]+)", line)
if match_attribute_name is not None:
attribute_name = match_attribute_name.group(1).strip()
no_value_attributes = ["required-field"]
if attribute_name in no_value_attributes:
last_field.add_attribute(attribute_name, None)
match_value = re.search(r"\s*\\[^\s]+\s?(.*)", line)
if match_value is not None:
value = match_value.group(1).strip()
multiple_value_attributes = ["key",
"note"]
if attribute_name in multiple_value_attributes:
if attribute_name not in last_field.attributes:
last_field.add_attribute(attribute_name, [])
last_field.attributes[attribute_name].append(value)
else:
last_field.add_attribute(attribute_name, value)
else:
print "found no field value for: ", line, attribute_name, match_value
else:
print "found no field attribute for: ", line, match_attribute_name
def __init__(self):
self.current_object = None
self.objects = []
def parse(self, path):
with open(path, mode='r') as f:
for line in f:
if line[0] == '!':
continue
line = line.strip()
print line
if self._is_new_object(line):
# print "New Object! ", line
if self.current_object is not None:
if len(self.current_object.fields) > 0:
self.current_object.fields[-1].conv_vals()
self.objects.append(self.current_object)
self.current_object = None
self._parse_object_name(line)
elif self._is_list(line):
self._parse_list(line)
elif self._is_new_field(line):
assert self.current_object is not None
self._parse_field_name(line)
elif self._is_field_attribute(line):
self._parse_field_attribute(line)
else:
print "No detect:", line
if self.current_object is not None:
self.objects.append(self.current_object)
list_objs = []
for obj in self.objects:
for field in obj.fields:
field.conv_vals()
if field.is_list:
list_objs.append(field.internal_name)
for obj in self.objects:
if obj.internal_name in list_objs:
obj.is_list_object = True
return self.objects
#
# for o in self.objects:
# print o.name, len(o.fields), [i.name for i in o.fields]
if __name__ == '__main__':
parser = IDDParser()
objects = parser.parse("epw.idd")
| rbuffat/pyepw | generator/iddparser.py | Python | apache-2.0 | 5,051 | [
"EPW"
] | b76d4a97315550f15ff7f713cfc4ccc8703241673739684886adc23e24e0ddba |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module implements a simple algorithm for extracting nearest neighbor
exchange parameters by mapping low energy magnetic orderings to a Heisenberg
model.
"""
import logging
import sys
from ast import literal_eval
import copy
import numpy as np
import pandas as pd
from monty.serialization import dumpfn
from monty.json import MSONable, jsanitize
from pymatgen.analysis.magnetism import CollinearMagneticStructureAnalyzer, Ordering
from pymatgen.analysis.graphs import StructureGraph
from pymatgen.analysis.local_env import MinimumDistanceNN
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen import Structure
__author__ = "ncfrey"
__version__ = "0.1"
__maintainer__ = "Nathan C. Frey"
__email__ = "ncfrey@lbl.gov"
__status__ = "Development"
__date__ = "June 2019"
class HeisenbergMapper:
"""
Class to compute exchange parameters from low energy magnetic orderings.
"""
def __init__(self, ordered_structures, energies, cutoff=0.0, tol=0.02):
"""
Exchange parameters are computed by mapping to a classical Heisenberg
model. Strategy is the scheme for generating neighbors. Currently only
MinimumDistanceNN is implemented.
n+1 unique orderings are required to compute n exchange
parameters.
First run a MagneticOrderingsWF to obtain low energy collinear magnetic
orderings and find the magnetic ground state. Then enumerate magnetic
states with the ground state as the input structure, find the subset
of supercells that map to the ground state, and do static calculations
for these orderings.
Args:
ordered_structures (list): Structure objects with magmoms.
energies (list): Total energies of each relaxed magnetic structure.
cutoff (float): Cutoff in Angstrom for nearest neighbor search.
Defaults to 0 (only NN, no NNN, etc.)
tol (float): Tolerance (in Angstrom) on nearest neighbor distances
being equal.
Parameters:
strategy (object): Class from pymatgen.analysis.local_env for
constructing graphs.
sgraphs (list): StructureGraph objects.
unique_site_ids (dict): Maps each site to its unique numerical
identifier.
wyckoff_ids (dict): Maps unique numerical identifier to wyckoff
position.
nn_interacations (dict): {i: j} pairs of NN interactions
between unique sites.
dists (dict): NN, NNN, and NNNN interaction distances
ex_mat (DataFrame): Invertible Heisenberg Hamiltonian for each
graph.
ex_params (dict): Exchange parameter values (meV/atom)
"""
# Save original copies of inputs
self.ordered_structures_ = ordered_structures
self.energies_ = energies
# Sanitize inputs and optionally order them by energy / magnetic moments
hs = HeisenbergScreener(ordered_structures, energies, screen=False)
ordered_structures = hs.screened_structures
energies = hs.screened_energies
self.ordered_structures = ordered_structures
self.energies = energies
self.cutoff = cutoff
self.tol = tol
# Get graph representations
self.sgraphs = self._get_graphs(cutoff, ordered_structures)
# Get unique site ids and wyckoff symbols
self.unique_site_ids, self.wyckoff_ids = self._get_unique_sites(
ordered_structures[0]
)
# These attributes are set by internal methods
self.nn_interactions = None
self.dists = None
self.ex_mat = None
self.ex_params = None
# Check how many commensurate graphs we found
if len(self.sgraphs) < 2:
print("We need at least 2 unique orderings.")
sys.exit(1)
else: # Set attributes
self._get_nn_dict()
self._get_exchange_df()
@staticmethod
def _get_graphs(cutoff, ordered_structures):
"""
Generate graph representations of magnetic structures with nearest
neighbor bonds. Right now this only works for MinimumDistanceNN.
Args:
cutoff (float): Cutoff in Angstrom for nearest neighbor search.
ordered_structures (list): Structure objects.
Returns:
sgraphs (list): StructureGraph objects.
"""
# Strategy for finding neighbors
if cutoff:
strategy = MinimumDistanceNN(cutoff=cutoff, get_all_sites=True)
else:
strategy = MinimumDistanceNN() # only NN
# Generate structure graphs
sgraphs = [
StructureGraph.with_local_env_strategy(s, strategy=strategy)
for s in ordered_structures
]
return sgraphs
@staticmethod
def _get_unique_sites(structure):
"""
Get dict that maps site indices to unique identifiers.
Args:
structure (Structure): ground state Structure object.
Returns:
unique_site_ids (dict): maps tuples of equivalent site indices to a
unique int identifier
wyckoff_ids (dict): maps tuples of equivalent site indices to their
wyckoff symbols
"""
# Get a nonmagnetic representation of the supercell geometry
s0 = CollinearMagneticStructureAnalyzer(
structure, make_primitive=False, threshold=0.0
).get_nonmagnetic_structure(make_primitive=False)
# Get unique sites and wyckoff positions
if "wyckoff" in s0.site_properties:
s0.remove_site_property("wyckoff")
symm_s0 = SpacegroupAnalyzer(s0).get_symmetrized_structure()
wyckoff = ["n/a"] * len(symm_s0)
equivalent_indices = symm_s0.equivalent_indices
wyckoff_symbols = symm_s0.wyckoff_symbols
# Construct dictionaries that map sites to numerical and wyckoff
# identifiers
unique_site_ids = {}
wyckoff_ids = {}
i = 0
for indices, symbol in zip(equivalent_indices, wyckoff_symbols):
unique_site_ids[tuple(indices)] = i
wyckoff_ids[i] = symbol
i += 1
for index in indices:
wyckoff[index] = symbol
return unique_site_ids, wyckoff_ids
def _get_nn_dict(self):
"""Get dict of unique nearest neighbor interactions.
Returns:
None: (sets self.nn_interactions and self.dists instance variables)
"""
tol = self.tol # tolerance on NN distances
sgraph = self.sgraphs[0]
unique_site_ids = self.unique_site_ids
nn_dict = {}
nnn_dict = {}
nnnn_dict = {}
all_dists = []
# Loop over unique sites and get neighbor distances up to NNNN
for k in unique_site_ids:
i = k[0]
i_key = unique_site_ids[k]
connected_sites = sgraph.get_connected_sites(i)
dists = [round(cs[-1], 2) for cs in connected_sites] # i<->j distances
dists = sorted(list(set(dists))) # NN, NNN, NNNN, etc.
dists = dists[:3] # keep up to NNNN
all_dists += dists
# Keep only up to NNNN and call dists equal if they are within tol
all_dists = sorted(list(set(all_dists)))
rm_list = []
for idx, d in enumerate(all_dists[:-1]):
if abs(d - all_dists[idx + 1]) < tol:
rm_list.append(idx + 1)
all_dists = [d for idx, d in enumerate(all_dists) if idx not in rm_list]
if len(all_dists) < 3: # pad with zeros
all_dists += [0.0] * (3 - len(all_dists))
all_dists = all_dists[:3]
labels = ["nn", "nnn", "nnnn"]
dists = {l: d for (l, d) in zip(labels, all_dists)}
# Get dictionary keys for interactions
for k in unique_site_ids:
i = k[0]
i_key = unique_site_ids[k]
connected_sites = sgraph.get_connected_sites(i)
# Loop over sites and determine unique NN, NNN, etc. interactions
for cs in connected_sites:
dist = round(cs[-1], 2) # i_j distance
j = cs[2] # j index
for key in unique_site_ids.keys():
if j in key:
j_key = unique_site_ids[key]
if abs(dist - dists["nn"]) <= tol:
nn_dict[i_key] = j_key
elif abs(dist - dists["nnn"]) <= tol:
nnn_dict[i_key] = j_key
elif abs(dist - dists["nnnn"]) <= tol:
nnnn_dict[i_key] = j_key
nn_interactions = {"nn": nn_dict, "nnn": nnn_dict, "nnnn": nnnn_dict}
self.dists = dists
self.nn_interactions = nn_interactions
def _get_exchange_df(self):
"""
Loop over all sites in a graph and count the number and types of
nearest neighbor interactions, computing +-|S_i . S_j| to construct
a Heisenberg Hamiltonian for each graph.
Returns:
None: (sets self.ex_mat instance variable)
TODO:
* Deal with large variance in |S| across configs
"""
sgraphs = self.sgraphs
tol = self.tol
unique_site_ids = self.unique_site_ids
nn_interactions = self.nn_interactions
dists = self.dists
# Get |site magmoms| from FM ordering so that S_i and S_j are consistent?
# Large S variations is throwing a loop
# fm_struct = self.get_low_energy_orderings()[0]
# Total energy and nonmagnetic energy contribution
columns = ["E", "E0"]
# Get labels of unique NN interactions
for k0, v0 in nn_interactions.items():
for i, j in v0.items(): # i and j indices
c = str(i) + "-" + str(j) + "-" + str(k0)
c_rev = str(j) + "-" + str(i) + "-" + str(k0)
if c not in columns and c_rev not in columns:
columns.append(c)
num_sgraphs = len(sgraphs)
# Keep n interactions (not counting 'E') for n+1 structure graphs
columns = columns[: num_sgraphs + 1]
num_nn_j = len(columns) - 1 # ignore total energy
j_columns = [name for name in columns if name not in ["E", "E0"]]
ex_mat_empty = pd.DataFrame(columns=columns)
ex_mat = ex_mat_empty.copy()
if len(j_columns) < 2:
self.ex_mat = ex_mat # Only <J> can be calculated here
else:
sgraphs_copy = copy.deepcopy(sgraphs)
sgraph_index = 0
# Loop over all sites in each graph and compute |S_i . S_j|
# for n+1 unique graphs to compute n exchange params
for graph in sgraphs:
sgraph = sgraphs_copy.pop(0)
ex_row = pd.DataFrame(
np.zeros((1, num_nn_j + 1)), index=[sgraph_index], columns=columns
)
for i, node in enumerate(sgraph.graph.nodes):
# s_i_sign = np.sign(sgraph.structure.site_properties['magmom'][i])
s_i = sgraph.structure.site_properties["magmom"][i]
for k in unique_site_ids.keys():
if i in k:
i_index = unique_site_ids[k]
# Get all connections for ith site and compute |S_i . S_j|
connections = sgraph.get_connected_sites(i)
# dists = [round(cs[-1], 2) for cs in connections] # i<->j distances
# dists = sorted(list(set(dists))) # NN, NNN, NNNN, etc.
for j, connection in enumerate(connections):
j_site = connection[2]
dist = round(connection[-1], 2) # i_j distance
# s_j_sign = np.sign(sgraph.structure.site_properties['magmom'][j_site])
s_j = sgraph.structure.site_properties["magmom"][j_site]
for k in unique_site_ids.keys():
if j_site in k:
j_index = unique_site_ids[k]
# Determine order of connection
if abs(dist - dists["nn"]) <= tol:
order = "-nn"
elif abs(dist - dists["nnn"]) <= tol:
order = "-nnn"
elif abs(dist - dists["nnnn"]) <= tol:
order = "-nnnn"
j_ij = str(i_index) + "-" + str(j_index) + order
j_ji = str(j_index) + "-" + str(i_index) + order
if j_ij in ex_mat.columns:
ex_row.at[sgraph_index, j_ij] -= s_i * s_j
elif j_ji in ex_mat.columns:
ex_row.at[sgraph_index, j_ji] -= s_i * s_j
# Ignore the row if it is a duplicate to avoid singular matrix
if ex_mat.append(ex_row)[j_columns].equals(
ex_mat.append(ex_row)[j_columns].drop_duplicates(keep="first")
):
e_index = self.ordered_structures.index(sgraph.structure)
ex_row.at[sgraph_index, "E"] = self.energies[e_index]
sgraph_index += 1
ex_mat = ex_mat.append(ex_row)
# if sgraph_index == num_nn_j: # check for zero columns
# zeros = [b for b in (ex_mat[j_columns] == 0).all(axis=0)]
# if True in zeros:
# sgraph_index -= 1 # keep looking
ex_mat[j_columns] = ex_mat[j_columns].div(
2.0
) # 1/2 factor in Heisenberg Hamiltonian
ex_mat[["E0"]] = 1 # Nonmagnetic contribution
# Check for singularities and delete columns with all zeros
zeros = [b for b in (ex_mat == 0).all(axis=0)]
if True in zeros:
c = ex_mat.columns[zeros.index(True)]
ex_mat = ex_mat.drop(columns=[c], axis=1)
# ex_mat = ex_mat.drop(ex_mat.tail(len_zeros).index)
# Force ex_mat to be square
ex_mat = ex_mat[: ex_mat.shape[1] - 1]
self.ex_mat = ex_mat
def get_exchange(self):
"""
Take Heisenberg Hamiltonian and corresponding energy for each row and
solve for the exchange parameters.
Returns:
ex_params (dict): Exchange parameter values (meV/atom).
"""
ex_mat = self.ex_mat
# Solve the matrix equation for J_ij values
E = ex_mat[["E"]]
j_names = [j for j in ex_mat.columns if j not in ["E"]]
# Only 1 NN interaction
if len(j_names) < 3:
# Estimate exchange by J ~ E_AFM - E_FM
j_avg = self.estimate_exchange()
ex_params = {"<J>": j_avg}
self.ex_params = ex_params
return ex_params
# Solve eigenvalue problem for more than 1 NN interaction
H = ex_mat.loc[:, ex_mat.columns != "E"].values
H_inv = np.linalg.inv(H)
j_ij = np.dot(H_inv, E)
# Convert J_ij to meV
j_ij[1:] *= 1000 # J_ij in meV
j_ij = j_ij.tolist()
ex_params = {j_name: j[0] for j_name, j in zip(j_names, j_ij)}
self.ex_params = ex_params
return ex_params
def get_low_energy_orderings(self):
"""
Find lowest energy FM and AFM orderings to compute E_AFM - E_FM.
Returns:
fm_struct (Structure): fm structure with 'magmom' site property
afm_struct (Structure): afm structure with 'magmom' site property
fm_e (float): fm energy
afm_e (float): afm energy
"""
fm_struct, afm_struct = None, None
mag_min = np.inf
mag_max = 0.001
fm_e_min = 0
afm_e_min = 0
# epas = [e / len(s) for (e, s) in zip(self.energies, self.ordered_structures)]
for s, e in zip(self.ordered_structures, self.energies):
ordering = CollinearMagneticStructureAnalyzer(
s, threshold=0.0, make_primitive=False
).ordering
magmoms = s.site_properties["magmom"]
# Try to find matching orderings first
if ordering == Ordering.FM and e < fm_e_min:
fm_struct = s
mag_max = abs(sum(magmoms))
fm_e = e
fm_e_min = e
if ordering == Ordering.AFM and e < afm_e_min:
afm_struct = s
afm_e = e
mag_min = abs(sum(magmoms))
afm_e_min = e
# Brute force search for closest thing to FM and AFM
if not fm_struct or not afm_struct:
for s, e in zip(self.ordered_structures, self.energies):
magmoms = s.site_properties["magmom"]
if abs(sum(magmoms)) > mag_max: # FM ground state
fm_struct = s
fm_e = e
mag_max = abs(sum(magmoms))
# AFM ground state
if abs(sum(magmoms)) < mag_min:
afm_struct = s
afm_e = e
mag_min = abs(sum(magmoms))
afm_e_min = e
elif abs(sum(magmoms)) == 0 and mag_min == 0:
if e < afm_e_min:
afm_struct = s
afm_e = e
afm_e_min = e
# Convert to magnetic structures with 'magmom' site property
fm_struct = CollinearMagneticStructureAnalyzer(
fm_struct, make_primitive=False, threshold=0.0
).get_structure_with_only_magnetic_atoms(make_primitive=False)
afm_struct = CollinearMagneticStructureAnalyzer(
afm_struct, make_primitive=False, threshold=0.0
).get_structure_with_only_magnetic_atoms(make_primitive=False)
return fm_struct, afm_struct, fm_e, afm_e
def estimate_exchange(self, fm_struct=None, afm_struct=None, fm_e=None, afm_e=None):
"""
Estimate <J> for a structure based on low energy FM and AFM orderings.
Args:
fm_struct (Structure): fm structure with 'magmom' site property
afm_struct (Structure): afm structure with 'magmom' site property
fm_e (float): fm energy/atom
afm_e (float): afm energy/atom
Returns:
j_avg (float): Average exchange parameter (meV/atom)
"""
# Get low energy orderings if not supplied
if any(arg is None for arg in [fm_struct, afm_struct, fm_e, afm_e]):
fm_struct, afm_struct, fm_e, afm_e = self.get_low_energy_orderings()
magmoms = fm_struct.site_properties["magmom"]
# Normalize energies by number of magnetic ions
# fm_e = fm_e / len(magmoms)
# afm_e = afm_e / len(afm_magmoms)
m_avg = np.mean([np.sqrt(m ** 2) for m in magmoms])
# If m_avg for FM config is < 1 we won't get sensibile results.
if m_avg < 1:
iamthedanger = """
Local magnetic moments are small (< 1 muB / atom). The
exchange parameters may be wrong, but <J> and the mean
field critical temperature estimate may be OK.
"""
logging.warning(iamthedanger)
delta_e = afm_e - fm_e # J > 0 -> FM
j_avg = delta_e / (m_avg ** 2) # eV / magnetic ion
j_avg *= 1000 # meV / ion
return j_avg
def get_mft_temperature(self, j_avg):
"""
Crude mean field estimate of critical temperature based on <J> for
one sublattice, or solving the coupled equations for a multisublattice
material.
Args:
j_avg (float): j_avg (float): Average exchange parameter (meV/atom)
Returns:
mft_t (float): Critical temperature (K)
"""
num_sublattices = len(self.unique_site_ids)
k_boltzmann = 0.0861733 # meV/K
# Only 1 magnetic sublattice
if num_sublattices == 1:
mft_t = 2 * abs(j_avg) / 3 / k_boltzmann
else: # multiple magnetic sublattices
omega = np.zeros((num_sublattices, num_sublattices))
ex_params = self.ex_params
ex_params = {k: v for (k, v) in ex_params.items() if k != "E0"} # ignore E0
for k in ex_params:
# split into i, j unique site identifiers
sites = [elem for elem in k.split("-")]
sites = [int(num) for num in sites[:2]] # cut 'nn' identifier
i, j = sites[0], sites[1]
omega[i, j] += ex_params[k]
omega[j, i] += ex_params[k]
omega = omega * 2 / 3 / k_boltzmann
eigenvals, eigenvecs = np.linalg.eig(omega)
mft_t = max(eigenvals)
if mft_t > 1500: # Not sensible!
stayoutofmyterritory = """
This mean field estimate is too high! Probably
the true low energy orderings were not given as inputs.
"""
logging.warning(stayoutofmyterritory)
return mft_t
def get_interaction_graph(self, filename=None):
"""
Get a StructureGraph with edges and weights that correspond to exchange
interactions and J_ij values, respectively.
Args:
filename (str): if not None, save interaction graph to filename.
Returns:
igraph (StructureGraph): Exchange interaction graph.
"""
structure = self.ordered_structures[0]
sgraph = self.sgraphs[0]
igraph = StructureGraph.with_empty_graph(
structure, edge_weight_name="exchange_constant", edge_weight_units="meV"
)
if "<J>" in self.ex_params: # Only <J> is available
warning_msg = """
Only <J> is available. The interaction graph will not tell
you much.
"""
logging.warning(warning_msg)
# J_ij exchange interaction matrix
for i, node in enumerate(sgraph.graph.nodes):
connections = sgraph.get_connected_sites(i)
for c in connections:
jimage = c[1] # relative integer coordinates of atom j
j = c[2] # index of neighbor
dist = c[-1] # i <-> j distance
j_exc = self._get_j_exc(i, j, dist)
igraph.add_edge(
i, j, to_jimage=jimage, weight=j_exc, warn_duplicates=False
)
# Save to a json file if desired
if filename:
if filename.endswith(".json"):
dumpfn(igraph, filename)
else:
filename += ".json"
dumpfn(igraph, filename)
return igraph
def _get_j_exc(self, i, j, dist):
"""
Convenience method for looking up exchange parameter between two sites.
Args:
i (int): index of ith site
j (int): index of jth site
dist (float): distance (Angstrom) between sites
(10E-2 precision)
Returns:
j_exc (float): Exchange parameter in meV
"""
# Get unique site identifiers
for k in self.unique_site_ids.keys():
if i in k:
i_index = self.unique_site_ids[k]
if j in k:
j_index = self.unique_site_ids[k]
order = ""
# Determine order of interaction
if abs(dist - self.dists["nn"]) <= self.tol:
order = "-nn"
elif abs(dist - self.dists["nnn"]) <= self.tol:
order = "-nnn"
elif abs(dist - self.dists["nnnn"]) <= self.tol:
order = "-nnnn"
j_ij = str(i_index) + "-" + str(j_index) + order
j_ji = str(j_index) + "-" + str(i_index) + order
if j_ij in self.ex_params:
j_exc = self.ex_params[j_ij]
elif j_ji in self.ex_params:
j_exc = self.ex_params[j_ji]
else:
j_exc = 0
# Check if only averaged NN <J> values are available
if "<J>" in self.ex_params and order == "-nn":
j_exc = self.ex_params["<J>"]
return j_exc
def get_heisenberg_model(self):
"""Save results of mapping to a HeisenbergModel object.
Returns:
hmodel (HeisenbergModel): MSONable object.
"""
# Original formula unit with nonmagnetic ions
hm_formula = str(self.ordered_structures_[0].composition.reduced_formula)
hm_structures = self.ordered_structures
hm_energies = self.energies
hm_cutoff = self.cutoff
hm_tol = self.tol
hm_sgraphs = self.sgraphs
hm_usi = self.unique_site_ids
hm_wids = self.wyckoff_ids
hm_nni = self.nn_interactions
hm_d = self.dists
# Exchange matrix DataFrame in json format
hm_em = self.ex_mat.to_json()
hm_ep = self.get_exchange()
hm_javg = self.estimate_exchange()
hm_igraph = self.get_interaction_graph()
hmodel = HeisenbergModel(
hm_formula,
hm_structures,
hm_energies,
hm_cutoff,
hm_tol,
hm_sgraphs,
hm_usi,
hm_wids,
hm_nni,
hm_d,
hm_em,
hm_ep,
hm_javg,
hm_igraph,
)
return hmodel
class HeisenbergScreener:
"""
Class to clean and screen magnetic orderings.
"""
def __init__(self, structures, energies, screen=False):
"""
This class pre-processes magnetic orderings and energies for
HeisenbergMapper. It prioritizes low-energy orderings with large and
localized magnetic moments.
Args:
structures (list): Structure objects with magnetic moments.
energies (list): Energies/atom of magnetic orderings.
screen (bool): Try to screen out high energy and low-spin configurations.
Attributes:
screened_structures (list): Sorted structures.
screened_energies (list): Sorted energies.
"""
# Cleanup
structures, energies = self._do_cleanup(structures, energies)
n_structures = len(structures)
# If there are more than 2 structures, we want to perform a
# screening to prioritize well-behaved orderings
if screen and n_structures > 2:
structures, energies = self._do_screen(structures, energies)
self.screened_structures = structures
self.screened_energies = energies
@staticmethod
def _do_cleanup(structures, energies):
"""Sanitize input structures and energies.
Takes magnetic structures and performs the following operations
- Erases nonmagnetic ions and gives all ions ['magmom'] site prop
- Converts total energies -> energy / magnetic ion
- Checks for duplicate/degenerate orderings
- Sorts by energy
Args:
structures (list): Structure objects with magmoms.
energies (list): Corresponding energies.
Returns:
ordered_structures (list): Sanitized structures.
ordered_energies (list): Sorted energies.
"""
# Get only magnetic ions & give all structures site_properties['magmom']
# zero threshold so that magnetic ions with small moments
# are preserved
ordered_structures = [
CollinearMagneticStructureAnalyzer(
s, make_primitive=False, threshold=0.0
).get_structure_with_only_magnetic_atoms(make_primitive=False)
for s in structures
]
# Convert to energies / magnetic ion
energies = [e / len(s) for (e, s) in zip(energies, ordered_structures)]
# Check for duplicate / degenerate states (sometimes different initial
# configs relax to the same state)
remove_list = []
for i, e in enumerate(energies):
e_tol = 6 # 10^-6 eV/atom tol on energies
e = round(e, e_tol)
if i not in remove_list:
for i_check, e_check in enumerate(energies):
e_check = round(e_check, e_tol)
if i != i_check and i_check not in remove_list and e == e_check:
remove_list.append(i_check)
# Also discard structures with small |magmoms| < 0.1 uB
# xx - get rid of these or just bury them in the list?
# for i, s in enumerate(ordered_structures):
# magmoms = s.site_properties['magmom']
# if i not in remove_list:
# if any(abs(m) < 0.1 for m in magmoms):
# remove_list.append(i)
# Remove duplicates
if len(remove_list):
ordered_structures = [
s for i, s in enumerate(ordered_structures) if i not in remove_list
]
energies = [e for i, e in enumerate(energies) if i not in remove_list]
# Sort by energy if not already sorted
ordered_structures = [
s for _, s in sorted(zip(energies, ordered_structures), reverse=False)
]
ordered_energies = sorted(energies, reverse=False)
return ordered_structures, ordered_energies
@staticmethod
def _do_screen(structures, energies):
"""Screen and sort magnetic orderings based on some criteria.
Prioritize low energy orderings and large, localized magmoms. do_clean should be run first to sanitize inputs.
Args:
structures (list): At least three structure objects.
energies (list): Energies.
Returns:
screened_structures (list): Sorted structures.
screened_energies (list): Sorted energies.
"""
magmoms = [s.site_properties["magmom"] for s in structures]
n_below_1ub = [len([m for m in ms if abs(m) < 1]) for ms in magmoms]
df = pd.DataFrame(
{
"structure": structures,
"energy": energies,
"magmoms": magmoms,
"n_below_1ub": n_below_1ub,
}
)
# keep the ground and first excited state fixed to capture the
# low-energy spectrum
index = list(df.index)[2:]
df_high_energy = df.iloc[2:]
# Prioritize structures with fewer magmoms < 1 uB
df_high_energy = df_high_energy.sort_values(by="n_below_1ub")
index = [0, 1] + list(df_high_energy.index)
# sort
df = df.reindex(index)
screened_structures = list(df["structure"].values)
screened_energies = list(df["energy"].values)
return screened_structures, screened_energies
class HeisenbergModel(MSONable):
"""
Store a Heisenberg model fit to low-energy magnetic orderings.
Intended to be generated by HeisenbergMapper.get_heisenberg_model().
"""
def __init__(
self,
formula=None,
structures=None,
energies=None,
cutoff=None,
tol=None,
sgraphs=None,
unique_site_ids=None,
wyckoff_ids=None,
nn_interactions=None,
dists=None,
ex_mat=None,
ex_params=None,
javg=None,
igraph=None,
):
"""
Args:
formula (str): Reduced formula of compound.
structures (list): Structure objects with magmoms.
energies (list): Energies of each relaxed magnetic structure.
cutoff (float): Cutoff in Angstrom for nearest neighbor search.
tol (float): Tolerance (in Angstrom) on nearest neighbor distances being equal.
sgraphs (list): StructureGraph objects.
unique_site_ids (dict): Maps each site to its unique numerical
identifier.
wyckoff_ids (dict): Maps unique numerical identifier to wyckoff
position.
nn_interacations (dict): {i: j} pairs of NN interactions
between unique sites.
dists (dict): NN, NNN, and NNNN interaction distances
ex_mat (DataFrame): Invertible Heisenberg Hamiltonian for each
graph.
ex_params (dict): Exchange parameter values (meV/atom).
javg (float): <J> exchange param (meV/atom).
igraph (StructureGraph): Exchange interaction graph.
"""
self.formula = formula
self.structures = structures
self.energies = energies
self.cutoff = cutoff
self.tol = tol
self.sgraphs = sgraphs
self.unique_site_ids = unique_site_ids
self.wyckoff_ids = wyckoff_ids
self.nn_interactions = nn_interactions
self.dists = dists
self.ex_mat = ex_mat
self.ex_params = ex_params
self.javg = javg
self.igraph = igraph
def as_dict(self):
"""
Because some dicts have tuple keys, some sanitization is required for json compatibility.
"""
d = {}
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
d["@version"] = __version__
d["formula"] = self.formula
d["structures"] = [s.as_dict() for s in self.structures]
d["energies"] = self.energies
d["cutoff"] = self.cutoff
d["tol"] = self.tol
d["sgraphs"] = [sgraph.as_dict() for sgraph in self.sgraphs]
d["dists"] = self.dists
d["ex_params"] = self.ex_params
d["javg"] = self.javg
d["igraph"] = self.igraph.as_dict()
# Sanitize tuple & int keys
d["ex_mat"] = jsanitize(self.ex_mat)
d["nn_interactions"] = jsanitize(self.nn_interactions)
d["unique_site_ids"] = jsanitize(self.unique_site_ids)
d["wyckoff_ids"] = jsanitize(self.wyckoff_ids)
return d
@classmethod
def from_dict(cls, d):
"""Create a HeisenbergModel from a dict."""
# Reconstitute the site ids
usids = {}
wids = {}
nnis = {}
for k, v in d["nn_interactions"].items():
nn_dict = {}
for k1, v1 in v.items():
key = literal_eval(k1)
nn_dict[key] = v1
nnis[k] = nn_dict
for k, v in d["unique_site_ids"].items():
key = literal_eval(k)
if type(key) == int:
usids[tuple([key])] = v
elif type(key) == tuple:
usids[key] = v
for k, v in d["wyckoff_ids"].items():
key = literal_eval(k)
wids[key] = v
# Reconstitute the structure and graph objects
structures = []
sgraphs = []
for v in d["structures"]:
structures.append(Structure.from_dict(v))
for v in d["sgraphs"]:
sgraphs.append(StructureGraph.from_dict(v))
# Interaction graph
igraph = StructureGraph.from_dict(d["igraph"])
# Reconstitute the exchange matrix DataFrame
try:
ex_mat = eval(d["ex_mat"])
ex_mat = pd.DataFrame.from_dict(ex_mat)
except SyntaxError: # if ex_mat is empty
ex_mat = pd.DataFrame(columns=["E", "E0"])
hmodel = HeisenbergModel(
formula=d["formula"],
structures=structures,
energies=d["energies"],
cutoff=d["cutoff"],
tol=d["tol"],
sgraphs=sgraphs,
unique_site_ids=usids,
wyckoff_ids=wids,
nn_interactions=nnis,
dists=d["dists"],
ex_mat=ex_mat,
ex_params=d["ex_params"],
javg=d["javg"],
igraph=igraph,
)
return hmodel
def _get_j_exc(self, i, j, dist):
"""
Convenience method for looking up exchange parameter between two sites.
Args:
i (int): index of ith site
j (int): index of jth site
dist (float): distance (Angstrom) between sites +- tol
Returns:
j_exc (float): Exchange parameter in meV
"""
# Get unique site identifiers
for k in self.unique_site_ids.keys():
if i in k:
i_index = self.unique_site_ids[k]
if j in k:
j_index = self.unique_site_ids[k]
order = ""
# Determine order of interaction
if abs(dist - self.dists["nn"]) <= self.tol:
order = "-nn"
elif abs(dist - self.dists["nnn"]) <= self.tol:
order = "-nnn"
elif abs(dist - self.dists["nnnn"]) <= self.tol:
order = "-nnnn"
j_ij = str(i_index) + "-" + str(j_index) + order
j_ji = str(j_index) + "-" + str(i_index) + order
if j_ij in self.ex_params:
j_exc = self.ex_params[j_ij]
elif j_ji in self.ex_params:
j_exc = self.ex_params[j_ji]
else:
j_exc = 0
# Check if only averaged NN <J> values are available
if "<J>" in self.ex_params and order == "-nn":
j_exc = self.ex_params["<J>"]
return j_exc
| gVallverdu/pymatgen | pymatgen/analysis/magnetism/heisenberg.py | Python | mit | 37,618 | [
"pymatgen"
] | f25bfb2c0b5645336d4ea478f4be37660d0945750e6f692970e840a970ded505 |
"""
ReportCLI class implementing command line interface to DIRAC Accounting
ReportGenerator Service. It is not complete yet
Once ready it could be used with a script as simple as:
from DIRAC.Core.Base.Script import Script
Script.localCfg.addDefaultEntry("LogLevel", "info")
Script.parseCommandLine()
from DIRAC.AccountingSystem.Client.ReportCLI import ReportCLI
if __name__=="__main__":
reli = ReportCLI()
reli.start()
"""
import sys
from DIRAC.Core.Base.CLI import CLI, colorize
from DIRAC.AccountingSystem.Client.ReportsClient import ReportsClient
from DIRAC import gLogger
class ReportCLI(CLI):
def __init__(self):
CLI.__init__(self)
self.do_connect(None)
def start(self):
"""
Start the command loop
"""
if not self.connected:
gLogger.error("Client is not connected")
try:
self.cmdloop()
except KeyboardInterrupt as v:
gLogger.warn("Received a keyboard interrupt.")
self.do_quit("")
def do_connect(self, args):
"""
Tries to connect to the server
Usage: connect
"""
gLogger.info("Trying to connect to server")
self.connected = False
self.prompt = "(%s)> " % colorize("Not connected", "red")
retVal = ReportsClient().ping()
if retVal["OK"]:
self.prompt = "(%s)> " % colorize("Connected", "green")
self.connected = True
def printComment(self, comment):
commentList = comment.split("\n")
for commentLine in commentList[:-1]:
print("# %s" % commentLine.strip())
def showTraceback(self):
import traceback
type, value = sys.exc_info()[:2]
print("________________________\n")
print("Exception", type, ":", value)
traceback.print_tb(sys.exc_info()[2])
print("________________________\n")
| DIRACGrid/DIRAC | src/DIRAC/AccountingSystem/Client/ReportCLI.py | Python | gpl-3.0 | 1,913 | [
"DIRAC"
] | 5df2b0477eeeabd4b3d1677e2f624ab7286f792e0a6530ff9dc48bdcce37d56e |
# sql/elements.py
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Core SQL expression elements, including :class:`.ClauseElement`,
:class:`.ColumnElement`, and derived classes.
"""
from __future__ import unicode_literals
from .. import util, exc, inspection
from . import type_api
from . import operators
from .visitors import Visitable, cloned_traverse, traverse
from .annotation import Annotated
import itertools
from .base import Executable, PARSE_AUTOCOMMIT, Immutable, NO_ARG
from .base import _generative
import numbers
import re
import operator
def _clone(element, **kw):
return element._clone()
def collate(expression, collation):
"""Return the clause ``expression COLLATE collation``.
e.g.::
collate(mycolumn, 'utf8_bin')
produces::
mycolumn COLLATE utf8_bin
"""
expr = _literal_as_binds(expression)
return BinaryExpression(
expr,
_literal_as_text(collation),
operators.collate, type_=expr.type)
def between(expr, lower_bound, upper_bound, symmetric=False):
"""Produce a ``BETWEEN`` predicate clause.
E.g.::
from sqlalchemy import between
stmt = select([users_table]).where(between(users_table.c.id, 5, 7))
Would produce SQL resembling::
SELECT id, name FROM user WHERE id BETWEEN :id_1 AND :id_2
The :func:`.between` function is a standalone version of the
:meth:`.ColumnElement.between` method available on all
SQL expressions, as in::
stmt = select([users_table]).where(users_table.c.id.between(5, 7))
All arguments passed to :func:`.between`, including the left side
column expression, are coerced from Python scalar values if a
the value is not a :class:`.ColumnElement` subclass. For example,
three fixed values can be compared as in::
print(between(5, 3, 7))
Which would produce::
:param_1 BETWEEN :param_2 AND :param_3
:param expr: a column expression, typically a :class:`.ColumnElement`
instance or alternatively a Python scalar expression to be coerced
into a column expression, serving as the left side of the ``BETWEEN``
expression.
:param lower_bound: a column or Python scalar expression serving as the
lower bound of the right side of the ``BETWEEN`` expression.
:param upper_bound: a column or Python scalar expression serving as the
upper bound of the right side of the ``BETWEEN`` expression.
:param symmetric: if True, will render " BETWEEN SYMMETRIC ". Note
that not all databases support this syntax.
.. versionadded:: 0.9.5
.. seealso::
:meth:`.ColumnElement.between`
"""
expr = _literal_as_binds(expr)
return expr.between(lower_bound, upper_bound, symmetric=symmetric)
def literal(value, type_=None):
"""Return a literal clause, bound to a bind parameter.
Literal clauses are created automatically when non-
:class:`.ClauseElement` objects (such as strings, ints, dates, etc.) are
used in a comparison operation with a :class:`.ColumnElement` subclass,
such as a :class:`~sqlalchemy.schema.Column` object. Use this function
to force the generation of a literal clause, which will be created as a
:class:`BindParameter` with a bound value.
:param value: the value to be bound. Can be any Python object supported by
the underlying DB-API, or is translatable via the given type argument.
:param type\_: an optional :class:`~sqlalchemy.types.TypeEngine` which
will provide bind-parameter translation for this literal.
"""
return BindParameter(None, value, type_=type_, unique=True)
def outparam(key, type_=None):
"""Create an 'OUT' parameter for usage in functions (stored procedures),
for databases which support them.
The ``outparam`` can be used like a regular function parameter.
The "output" value will be available from the
:class:`~sqlalchemy.engine.ResultProxy` object via its ``out_parameters``
attribute, which returns a dictionary containing the values.
"""
return BindParameter(
key, None, type_=type_, unique=False, isoutparam=True)
def not_(clause):
"""Return a negation of the given clause, i.e. ``NOT(clause)``.
The ``~`` operator is also overloaded on all
:class:`.ColumnElement` subclasses to produce the
same result.
"""
return operators.inv(_literal_as_binds(clause))
@inspection._self_inspects
class ClauseElement(Visitable):
"""Base class for elements of a programmatically constructed SQL
expression.
"""
__visit_name__ = 'clause'
_annotations = {}
supports_execution = False
_from_objects = []
bind = None
_is_clone_of = None
is_selectable = False
is_clause_element = True
description = None
_order_by_label_element = None
_is_from_container = False
def _clone(self):
"""Create a shallow copy of this ClauseElement.
This method may be used by a generative API. Its also used as
part of the "deep" copy afforded by a traversal that combines
the _copy_internals() method.
"""
c = self.__class__.__new__(self.__class__)
c.__dict__ = self.__dict__.copy()
ClauseElement._cloned_set._reset(c)
ColumnElement.comparator._reset(c)
# this is a marker that helps to "equate" clauses to each other
# when a Select returns its list of FROM clauses. the cloning
# process leaves around a lot of remnants of the previous clause
# typically in the form of column expressions still attached to the
# old table.
c._is_clone_of = self
return c
@property
def _constructor(self):
"""return the 'constructor' for this ClauseElement.
This is for the purposes for creating a new object of
this type. Usually, its just the element's __class__.
However, the "Annotated" version of the object overrides
to return the class of its proxied element.
"""
return self.__class__
@util.memoized_property
def _cloned_set(self):
"""Return the set consisting all cloned ancestors of this
ClauseElement.
Includes this ClauseElement. This accessor tends to be used for
FromClause objects to identify 'equivalent' FROM clauses, regardless
of transformative operations.
"""
s = util.column_set()
f = self
while f is not None:
s.add(f)
f = f._is_clone_of
return s
def __getstate__(self):
d = self.__dict__.copy()
d.pop('_is_clone_of', None)
return d
def _annotate(self, values):
"""return a copy of this ClauseElement with annotations
updated by the given dictionary.
"""
return Annotated(self, values)
def _with_annotations(self, values):
"""return a copy of this ClauseElement with annotations
replaced by the given dictionary.
"""
return Annotated(self, values)
def _deannotate(self, values=None, clone=False):
"""return a copy of this :class:`.ClauseElement` with annotations
removed.
:param values: optional tuple of individual values
to remove.
"""
if clone:
# clone is used when we are also copying
# the expression for a deep deannotation
return self._clone()
else:
# if no clone, since we have no annotations we return
# self
return self
def _execute_on_connection(self, connection, multiparams, params):
if self.supports_execution:
return connection._execute_clauseelement(self, multiparams, params)
else:
raise exc.ObjectNotExecutableError(self)
def unique_params(self, *optionaldict, **kwargs):
"""Return a copy with :func:`bindparam()` elements replaced.
Same functionality as ``params()``, except adds `unique=True`
to affected bind parameters so that multiple statements can be
used.
"""
return self._params(True, optionaldict, kwargs)
def params(self, *optionaldict, **kwargs):
"""Return a copy with :func:`bindparam()` elements replaced.
Returns a copy of this ClauseElement with :func:`bindparam()`
elements replaced with values taken from the given dictionary::
>>> clause = column('x') + bindparam('foo')
>>> print clause.compile().params
{'foo':None}
>>> print clause.params({'foo':7}).compile().params
{'foo':7}
"""
return self._params(False, optionaldict, kwargs)
def _params(self, unique, optionaldict, kwargs):
if len(optionaldict) == 1:
kwargs.update(optionaldict[0])
elif len(optionaldict) > 1:
raise exc.ArgumentError(
"params() takes zero or one positional dictionary argument")
def visit_bindparam(bind):
if bind.key in kwargs:
bind.value = kwargs[bind.key]
bind.required = False
if unique:
bind._convert_to_unique()
return cloned_traverse(self, {}, {'bindparam': visit_bindparam})
def compare(self, other, **kw):
"""Compare this ClauseElement to the given ClauseElement.
Subclasses should override the default behavior, which is a
straight identity comparison.
\**kw are arguments consumed by subclass compare() methods and
may be used to modify the criteria for comparison.
(see :class:`.ColumnElement`)
"""
return self is other
def _copy_internals(self, clone=_clone, **kw):
"""Reassign internal elements to be clones of themselves.
Called during a copy-and-traverse operation on newly
shallow-copied elements to create a deep copy.
The given clone function should be used, which may be applying
additional transformations to the element (i.e. replacement
traversal, cloned traversal, annotations).
"""
pass
def get_children(self, **kwargs):
"""Return immediate child elements of this :class:`.ClauseElement`.
This is used for visit traversal.
\**kwargs may contain flags that change the collection that is
returned, for example to return a subset of items in order to
cut down on larger traversals, or to return child items from a
different context (such as schema-level collections instead of
clause-level).
"""
return []
def self_group(self, against=None):
"""Apply a 'grouping' to this :class:`.ClauseElement`.
This method is overridden by subclasses to return a
"grouping" construct, i.e. parenthesis. In particular
it's used by "binary" expressions to provide a grouping
around themselves when placed into a larger expression,
as well as by :func:`.select` constructs when placed into
the FROM clause of another :func:`.select`. (Note that
subqueries should be normally created using the
:meth:`.Select.alias` method, as many platforms require
nested SELECT statements to be named).
As expressions are composed together, the application of
:meth:`self_group` is automatic - end-user code should never
need to use this method directly. Note that SQLAlchemy's
clause constructs take operator precedence into account -
so parenthesis might not be needed, for example, in
an expression like ``x OR (y AND z)`` - AND takes precedence
over OR.
The base :meth:`self_group` method of :class:`.ClauseElement`
just returns self.
"""
return self
@util.dependencies("sqlalchemy.engine.default")
def compile(self, default, bind=None, dialect=None, **kw):
"""Compile this SQL expression.
The return value is a :class:`~.Compiled` object.
Calling ``str()`` or ``unicode()`` on the returned value will yield a
string representation of the result. The
:class:`~.Compiled` object also can return a
dictionary of bind parameter names and values
using the ``params`` accessor.
:param bind: An ``Engine`` or ``Connection`` from which a
``Compiled`` will be acquired. This argument takes precedence over
this :class:`.ClauseElement`'s bound engine, if any.
:param column_keys: Used for INSERT and UPDATE statements, a list of
column names which should be present in the VALUES clause of the
compiled statement. If ``None``, all columns from the target table
object are rendered.
:param dialect: A ``Dialect`` instance from which a ``Compiled``
will be acquired. This argument takes precedence over the `bind`
argument as well as this :class:`.ClauseElement`'s bound engine,
if any.
:param inline: Used for INSERT statements, for a dialect which does
not support inline retrieval of newly generated primary key
columns, will force the expression used to create the new primary
key value to be rendered inline within the INSERT statement's
VALUES clause. This typically refers to Sequence execution but may
also refer to any server-side default generation function
associated with a primary key `Column`.
:param compile_kwargs: optional dictionary of additional parameters
that will be passed through to the compiler within all "visit"
methods. This allows any custom flag to be passed through to
a custom compilation construct, for example. It is also used
for the case of passing the ``literal_binds`` flag through::
from sqlalchemy.sql import table, column, select
t = table('t', column('x'))
s = select([t]).where(t.c.x == 5)
print s.compile(compile_kwargs={"literal_binds": True})
.. versionadded:: 0.9.0
.. seealso::
:ref:`faq_sql_expression_string`
"""
if not dialect:
if bind:
dialect = bind.dialect
elif self.bind:
dialect = self.bind.dialect
bind = self.bind
else:
dialect = default.StrCompileDialect()
return self._compiler(dialect, bind=bind, **kw)
def _compiler(self, dialect, **kw):
"""Return a compiler appropriate for this ClauseElement, given a
Dialect."""
return dialect.statement_compiler(dialect, self, **kw)
def __str__(self):
if util.py3k:
return str(self.compile())
else:
return unicode(self.compile()).encode('ascii', 'backslashreplace')
def __and__(self, other):
"""'and' at the ClauseElement level.
.. deprecated:: 0.9.5 - conjunctions are intended to be
at the :class:`.ColumnElement`. level
"""
return and_(self, other)
def __or__(self, other):
"""'or' at the ClauseElement level.
.. deprecated:: 0.9.5 - conjunctions are intended to be
at the :class:`.ColumnElement`. level
"""
return or_(self, other)
def __invert__(self):
if hasattr(self, 'negation_clause'):
return self.negation_clause
else:
return self._negate()
def _negate(self):
return UnaryExpression(
self.self_group(against=operators.inv),
operator=operators.inv,
negate=None)
def __bool__(self):
raise TypeError("Boolean value of this clause is not defined")
__nonzero__ = __bool__
def __repr__(self):
friendly = self.description
if friendly is None:
return object.__repr__(self)
else:
return '<%s.%s at 0x%x; %s>' % (
self.__module__, self.__class__.__name__, id(self), friendly)
class ColumnElement(operators.ColumnOperators, ClauseElement):
"""Represent a column-oriented SQL expression suitable for usage in the
"columns" clause, WHERE clause etc. of a statement.
While the most familiar kind of :class:`.ColumnElement` is the
:class:`.Column` object, :class:`.ColumnElement` serves as the basis
for any unit that may be present in a SQL expression, including
the expressions themselves, SQL functions, bound parameters,
literal expressions, keywords such as ``NULL``, etc.
:class:`.ColumnElement` is the ultimate base class for all such elements.
A wide variety of SQLAlchemy Core functions work at the SQL expression
level, and are intended to accept instances of :class:`.ColumnElement` as
arguments. These functions will typically document that they accept a
"SQL expression" as an argument. What this means in terms of SQLAlchemy
usually refers to an input which is either already in the form of a
:class:`.ColumnElement` object, or a value which can be **coerced** into
one. The coercion rules followed by most, but not all, SQLAlchemy Core
functions with regards to SQL expressions are as follows:
* a literal Python value, such as a string, integer or floating
point value, boolean, datetime, ``Decimal`` object, or virtually
any other Python object, will be coerced into a "literal bound
value". This generally means that a :func:`.bindparam` will be
produced featuring the given value embedded into the construct; the
resulting :class:`.BindParameter` object is an instance of
:class:`.ColumnElement`. The Python value will ultimately be sent
to the DBAPI at execution time as a paramterized argument to the
``execute()`` or ``executemany()`` methods, after SQLAlchemy
type-specific converters (e.g. those provided by any associated
:class:`.TypeEngine` objects) are applied to the value.
* any special object value, typically ORM-level constructs, which
feature a method called ``__clause_element__()``. The Core
expression system looks for this method when an object of otherwise
unknown type is passed to a function that is looking to coerce the
argument into a :class:`.ColumnElement` expression. The
``__clause_element__()`` method, if present, should return a
:class:`.ColumnElement` instance. The primary use of
``__clause_element__()`` within SQLAlchemy is that of class-bound
attributes on ORM-mapped classes; a ``User`` class which contains a
mapped attribute named ``.name`` will have a method
``User.name.__clause_element__()`` which when invoked returns the
:class:`.Column` called ``name`` associated with the mapped table.
* The Python ``None`` value is typically interpreted as ``NULL``,
which in SQLAlchemy Core produces an instance of :func:`.null`.
A :class:`.ColumnElement` provides the ability to generate new
:class:`.ColumnElement`
objects using Python expressions. This means that Python operators
such as ``==``, ``!=`` and ``<`` are overloaded to mimic SQL operations,
and allow the instantiation of further :class:`.ColumnElement` instances
which are composed from other, more fundamental :class:`.ColumnElement`
objects. For example, two :class:`.ColumnClause` objects can be added
together with the addition operator ``+`` to produce
a :class:`.BinaryExpression`.
Both :class:`.ColumnClause` and :class:`.BinaryExpression` are subclasses
of :class:`.ColumnElement`::
>>> from sqlalchemy.sql import column
>>> column('a') + column('b')
<sqlalchemy.sql.expression.BinaryExpression object at 0x101029dd0>
>>> print column('a') + column('b')
a + b
.. seealso::
:class:`.Column`
:func:`.expression.column`
"""
__visit_name__ = 'column'
primary_key = False
foreign_keys = []
_label = None
"""The named label that can be used to target
this column in a result set.
This label is almost always the label used when
rendering <expr> AS <label> in a SELECT statement. It also
refers to a name that this column expression can be located from
in a result set.
For a regular Column bound to a Table, this is typically the label
<tablename>_<columnname>. For other constructs, different rules
may apply, such as anonymized labels and others.
"""
key = None
"""the 'key' that in some circumstances refers to this object in a
Python namespace.
This typically refers to the "key" of the column as present in the
``.c`` collection of a selectable, e.g. sometable.c["somekey"] would
return a Column with a .key of "somekey".
"""
_key_label = None
"""A label-based version of 'key' that in some circumstances refers
to this object in a Python namespace.
_key_label comes into play when a select() statement is constructed with
apply_labels(); in this case, all Column objects in the ``.c`` collection
are rendered as <tablename>_<columnname> in SQL; this is essentially the
value of ._label. But to locate those columns in the ``.c`` collection,
the name is along the lines of <tablename>_<key>; that's the typical
value of .key_label.
"""
_render_label_in_columns_clause = True
"""A flag used by select._columns_plus_names that helps to determine
we are actually going to render in terms of "SELECT <col> AS <label>".
This flag can be returned as False for some Column objects that want
to be rendered as simple "SELECT <col>"; typically columns that don't have
any parent table and are named the same as what the label would be
in any case.
"""
_resolve_label = None
"""The name that should be used to identify this ColumnElement in a
select() object when "label resolution" logic is used; this refers
to using a string name in an expression like order_by() or group_by()
that wishes to target a labeled expression in the columns clause.
The name is distinct from that of .name or ._label to account for the case
where anonymizing logic may be used to change the name that's actually
rendered at compile time; this attribute should hold onto the original
name that was user-assigned when producing a .label() construct.
"""
_allow_label_resolve = True
"""A flag that can be flipped to prevent a column from being resolvable
by string label name."""
_alt_names = ()
def self_group(self, against=None):
if (against in (operators.and_, operators.or_, operators._asbool) and
self.type._type_affinity
is type_api.BOOLEANTYPE._type_affinity):
return AsBoolean(self, operators.istrue, operators.isfalse)
elif (against in (operators.any_op, operators.all_op)):
return Grouping(self)
else:
return self
def _negate(self):
if self.type._type_affinity is type_api.BOOLEANTYPE._type_affinity:
# TODO: see the note in AsBoolean that it seems to assume
# the element is the True_() / False_() constant, so this
# is too broad
return AsBoolean(self, operators.isfalse, operators.istrue)
else:
return super(ColumnElement, self)._negate()
@util.memoized_property
def type(self):
return type_api.NULLTYPE
@util.memoized_property
def comparator(self):
try:
comparator_factory = self.type.comparator_factory
except AttributeError:
raise TypeError(
"Object %r associated with '.type' attribute "
"is not a TypeEngine class or object" % self.type)
else:
return comparator_factory(self)
def __getattr__(self, key):
try:
return getattr(self.comparator, key)
except AttributeError:
raise AttributeError(
'Neither %r object nor %r object has an attribute %r' % (
type(self).__name__,
type(self.comparator).__name__,
key)
)
def operate(self, op, *other, **kwargs):
return op(self.comparator, *other, **kwargs)
def reverse_operate(self, op, other, **kwargs):
return op(other, self.comparator, **kwargs)
def _bind_param(self, operator, obj, type_=None):
return BindParameter(None, obj,
_compared_to_operator=operator,
type_=type_,
_compared_to_type=self.type, unique=True)
@property
def expression(self):
"""Return a column expression.
Part of the inspection interface; returns self.
"""
return self
@property
def _select_iterable(self):
return (self, )
@util.memoized_property
def base_columns(self):
return util.column_set(c for c in self.proxy_set
if not hasattr(c, '_proxies'))
@util.memoized_property
def proxy_set(self):
s = util.column_set([self])
if hasattr(self, '_proxies'):
for c in self._proxies:
s.update(c.proxy_set)
return s
def shares_lineage(self, othercolumn):
"""Return True if the given :class:`.ColumnElement`
has a common ancestor to this :class:`.ColumnElement`."""
return bool(self.proxy_set.intersection(othercolumn.proxy_set))
def _compare_name_for_result(self, other):
"""Return True if the given column element compares to this one
when targeting within a result row."""
return hasattr(other, 'name') and hasattr(self, 'name') and \
other.name == self.name
def _make_proxy(
self, selectable, name=None, name_is_truncatable=False, **kw):
"""Create a new :class:`.ColumnElement` representing this
:class:`.ColumnElement` as it appears in the select list of a
descending selectable.
"""
if name is None:
name = self.anon_label
if self.key:
key = self.key
else:
try:
key = str(self)
except exc.UnsupportedCompilationError:
key = self.anon_label
else:
key = name
co = ColumnClause(
_as_truncated(name) if name_is_truncatable else name,
type_=getattr(self, 'type', None),
_selectable=selectable
)
co._proxies = [self]
if selectable._is_clone_of is not None:
co._is_clone_of = \
selectable._is_clone_of.columns.get(key)
selectable._columns[key] = co
return co
def compare(self, other, use_proxies=False, equivalents=None, **kw):
"""Compare this ColumnElement to another.
Special arguments understood:
:param use_proxies: when True, consider two columns that
share a common base column as equivalent (i.e. shares_lineage())
:param equivalents: a dictionary of columns as keys mapped to sets
of columns. If the given "other" column is present in this
dictionary, if any of the columns in the corresponding set() pass
the comparison test, the result is True. This is used to expand the
comparison to other columns that may be known to be equivalent to
this one via foreign key or other criterion.
"""
to_compare = (other, )
if equivalents and other in equivalents:
to_compare = equivalents[other].union(to_compare)
for oth in to_compare:
if use_proxies and self.shares_lineage(oth):
return True
elif hash(oth) == hash(self):
return True
else:
return False
def cast(self, type_):
"""Produce a type cast, i.e. ``CAST(<expression> AS <type>)``.
This is a shortcut to the :func:`~.expression.cast` function.
.. versionadded:: 1.0.7
"""
return Cast(self, type_)
def label(self, name):
"""Produce a column label, i.e. ``<columnname> AS <name>``.
This is a shortcut to the :func:`~.expression.label` function.
if 'name' is None, an anonymous label name will be generated.
"""
return Label(name, self, self.type)
@util.memoized_property
def anon_label(self):
"""provides a constant 'anonymous label' for this ColumnElement.
This is a label() expression which will be named at compile time.
The same label() is returned each time anon_label is called so
that expressions can reference anon_label multiple times, producing
the same label name at compile time.
the compiler uses this function automatically at compile time
for expressions that are known to be 'unnamed' like binary
expressions and function calls.
"""
while self._is_clone_of is not None:
self = self._is_clone_of
return _anonymous_label(
'%%(%d %s)s' % (id(self), getattr(self, 'name', 'anon'))
)
class BindParameter(ColumnElement):
"""Represent a "bound expression".
:class:`.BindParameter` is invoked explicitly using the
:func:`.bindparam` function, as in::
from sqlalchemy import bindparam
stmt = select([users_table]).\\
where(users_table.c.name == bindparam('username'))
Detailed discussion of how :class:`.BindParameter` is used is
at :func:`.bindparam`.
.. seealso::
:func:`.bindparam`
"""
__visit_name__ = 'bindparam'
_is_crud = False
def __init__(self, key, value=NO_ARG, type_=None,
unique=False, required=NO_ARG,
quote=None, callable_=None,
isoutparam=False,
_compared_to_operator=None,
_compared_to_type=None):
"""Produce a "bound expression".
The return value is an instance of :class:`.BindParameter`; this
is a :class:`.ColumnElement` subclass which represents a so-called
"placeholder" value in a SQL expression, the value of which is
supplied at the point at which the statement in executed against a
database connection.
In SQLAlchemy, the :func:`.bindparam` construct has
the ability to carry along the actual value that will be ultimately
used at expression time. In this way, it serves not just as
a "placeholder" for eventual population, but also as a means of
representing so-called "unsafe" values which should not be rendered
directly in a SQL statement, but rather should be passed along
to the :term:`DBAPI` as values which need to be correctly escaped
and potentially handled for type-safety.
When using :func:`.bindparam` explicitly, the use case is typically
one of traditional deferment of parameters; the :func:`.bindparam`
construct accepts a name which can then be referred to at execution
time::
from sqlalchemy import bindparam
stmt = select([users_table]).\\
where(users_table.c.name == bindparam('username'))
The above statement, when rendered, will produce SQL similar to::
SELECT id, name FROM user WHERE name = :username
In order to populate the value of ``:username`` above, the value
would typically be applied at execution time to a method
like :meth:`.Connection.execute`::
result = connection.execute(stmt, username='wendy')
Explicit use of :func:`.bindparam` is also common when producing
UPDATE or DELETE statements that are to be invoked multiple times,
where the WHERE criterion of the statement is to change on each
invocation, such as::
stmt = (users_table.update().
where(user_table.c.name == bindparam('username')).
values(fullname=bindparam('fullname'))
)
connection.execute(
stmt, [{"username": "wendy", "fullname": "Wendy Smith"},
{"username": "jack", "fullname": "Jack Jones"},
]
)
SQLAlchemy's Core expression system makes wide use of
:func:`.bindparam` in an implicit sense. It is typical that Python
literal values passed to virtually all SQL expression functions are
coerced into fixed :func:`.bindparam` constructs. For example, given
a comparison operation such as::
expr = users_table.c.name == 'Wendy'
The above expression will produce a :class:`.BinaryExpression`
construct, where the left side is the :class:`.Column` object
representing the ``name`` column, and the right side is a
:class:`.BindParameter` representing the literal value::
print(repr(expr.right))
BindParameter('%(4327771088 name)s', 'Wendy', type_=String())
The expression above will render SQL such as::
user.name = :name_1
Where the ``:name_1`` parameter name is an anonymous name. The
actual string ``Wendy`` is not in the rendered string, but is carried
along where it is later used within statement execution. If we
invoke a statement like the following::
stmt = select([users_table]).where(users_table.c.name == 'Wendy')
result = connection.execute(stmt)
We would see SQL logging output as::
SELECT "user".id, "user".name
FROM "user"
WHERE "user".name = %(name_1)s
{'name_1': 'Wendy'}
Above, we see that ``Wendy`` is passed as a parameter to the database,
while the placeholder ``:name_1`` is rendered in the appropriate form
for the target database, in this case the Postgresql database.
Similarly, :func:`.bindparam` is invoked automatically
when working with :term:`CRUD` statements as far as the "VALUES"
portion is concerned. The :func:`.insert` construct produces an
``INSERT`` expression which will, at statement execution time,
generate bound placeholders based on the arguments passed, as in::
stmt = users_table.insert()
result = connection.execute(stmt, name='Wendy')
The above will produce SQL output as::
INSERT INTO "user" (name) VALUES (%(name)s)
{'name': 'Wendy'}
The :class:`.Insert` construct, at compilation/execution time,
rendered a single :func:`.bindparam` mirroring the column
name ``name`` as a result of the single ``name`` parameter
we passed to the :meth:`.Connection.execute` method.
:param key:
the key (e.g. the name) for this bind param.
Will be used in the generated
SQL statement for dialects that use named parameters. This
value may be modified when part of a compilation operation,
if other :class:`BindParameter` objects exist with the same
key, or if its length is too long and truncation is
required.
:param value:
Initial value for this bind param. Will be used at statement
execution time as the value for this parameter passed to the
DBAPI, if no other value is indicated to the statement execution
method for this particular parameter name. Defaults to ``None``.
:param callable\_:
A callable function that takes the place of "value". The function
will be called at statement execution time to determine the
ultimate value. Used for scenarios where the actual bind
value cannot be determined at the point at which the clause
construct is created, but embedded bind values are still desirable.
:param type\_:
A :class:`.TypeEngine` class or instance representing an optional
datatype for this :func:`.bindparam`. If not passed, a type
may be determined automatically for the bind, based on the given
value; for example, trivial Python types such as ``str``,
``int``, ``bool``
may result in the :class:`.String`, :class:`.Integer` or
:class:`.Boolean` types being autoamtically selected.
The type of a :func:`.bindparam` is significant especially in that
the type will apply pre-processing to the value before it is
passed to the database. For example, a :func:`.bindparam` which
refers to a datetime value, and is specified as holding the
:class:`.DateTime` type, may apply conversion needed to the
value (such as stringification on SQLite) before passing the value
to the database.
:param unique:
if True, the key name of this :class:`.BindParameter` will be
modified if another :class:`.BindParameter` of the same name
already has been located within the containing
expression. This flag is used generally by the internals
when producing so-called "anonymous" bound expressions, it
isn't generally applicable to explicitly-named :func:`.bindparam`
constructs.
:param required:
If ``True``, a value is required at execution time. If not passed,
it defaults to ``True`` if neither :paramref:`.bindparam.value`
or :paramref:`.bindparam.callable` were passed. If either of these
parameters are present, then :paramref:`.bindparam.required`
defaults to ``False``.
.. versionchanged:: 0.8 If the ``required`` flag is not specified,
it will be set automatically to ``True`` or ``False`` depending
on whether or not the ``value`` or ``callable`` parameters
were specified.
:param quote:
True if this parameter name requires quoting and is not
currently known as a SQLAlchemy reserved word; this currently
only applies to the Oracle backend, where bound names must
sometimes be quoted.
:param isoutparam:
if True, the parameter should be treated like a stored procedure
"OUT" parameter. This applies to backends such as Oracle which
support OUT parameters.
.. seealso::
:ref:`coretutorial_bind_param`
:ref:`coretutorial_insert_expressions`
:func:`.outparam`
"""
if isinstance(key, ColumnClause):
type_ = key.type
key = key.key
if required is NO_ARG:
required = (value is NO_ARG and callable_ is None)
if value is NO_ARG:
value = None
if quote is not None:
key = quoted_name(key, quote)
if unique:
self.key = _anonymous_label('%%(%d %s)s' % (id(self), key
or 'param'))
else:
self.key = key or _anonymous_label('%%(%d param)s'
% id(self))
# identifying key that won't change across
# clones, used to identify the bind's logical
# identity
self._identifying_key = self.key
# key that was passed in the first place, used to
# generate new keys
self._orig_key = key or 'param'
self.unique = unique
self.value = value
self.callable = callable_
self.isoutparam = isoutparam
self.required = required
if type_ is None:
if _compared_to_type is not None:
self.type = \
_compared_to_type.coerce_compared_value(
_compared_to_operator, value)
else:
self.type = type_api._resolve_value_to_type(value)
elif isinstance(type_, type):
self.type = type_()
else:
self.type = type_
def _with_value(self, value):
"""Return a copy of this :class:`.BindParameter` with the given value
set.
"""
cloned = self._clone()
cloned.value = value
cloned.callable = None
cloned.required = False
if cloned.type is type_api.NULLTYPE:
cloned.type = type_api._resolve_value_to_type(value)
return cloned
@property
def effective_value(self):
"""Return the value of this bound parameter,
taking into account if the ``callable`` parameter
was set.
The ``callable`` value will be evaluated
and returned if present, else ``value``.
"""
if self.callable:
return self.callable()
else:
return self.value
def _clone(self):
c = ClauseElement._clone(self)
if self.unique:
c.key = _anonymous_label('%%(%d %s)s' % (id(c), c._orig_key
or 'param'))
return c
def _convert_to_unique(self):
if not self.unique:
self.unique = True
self.key = _anonymous_label(
'%%(%d %s)s' % (id(self), self._orig_key or 'param'))
def compare(self, other, **kw):
"""Compare this :class:`BindParameter` to the given
clause."""
return isinstance(other, BindParameter) \
and self.type._compare_type_affinity(other.type) \
and self.value == other.value \
and self.callable == other.callable
def __getstate__(self):
"""execute a deferred value for serialization purposes."""
d = self.__dict__.copy()
v = self.value
if self.callable:
v = self.callable()
d['callable'] = None
d['value'] = v
return d
def __repr__(self):
return 'BindParameter(%r, %r, type_=%r)' % (self.key,
self.value, self.type)
class TypeClause(ClauseElement):
"""Handle a type keyword in a SQL statement.
Used by the ``Case`` statement.
"""
__visit_name__ = 'typeclause'
def __init__(self, type):
self.type = type
class TextClause(Executable, ClauseElement):
"""Represent a literal SQL text fragment.
E.g.::
from sqlalchemy import text
t = text("SELECT * FROM users")
result = connection.execute(t)
The :class:`.Text` construct is produced using the :func:`.text`
function; see that function for full documentation.
.. seealso::
:func:`.text`
"""
__visit_name__ = 'textclause'
_bind_params_regex = re.compile(r'(?<![:\w\x5c]):(\w+)(?!:)', re.UNICODE)
_execution_options = \
Executable._execution_options.union(
{'autocommit': PARSE_AUTOCOMMIT})
@property
def _select_iterable(self):
return (self,)
@property
def selectable(self):
# allows text() to be considered by
# _interpret_as_from
return self
_hide_froms = []
# help in those cases where text() is
# interpreted in a column expression situation
key = _label = _resolve_label = None
_allow_label_resolve = False
def __init__(
self,
text,
bind=None):
self._bind = bind
self._bindparams = {}
def repl(m):
self._bindparams[m.group(1)] = BindParameter(m.group(1))
return ':%s' % m.group(1)
# scan the string and search for bind parameter names, add them
# to the list of bindparams
self.text = self._bind_params_regex.sub(repl, text)
@classmethod
def _create_text(self, text, bind=None, bindparams=None,
typemap=None, autocommit=None):
"""Construct a new :class:`.TextClause` clause, representing
a textual SQL string directly.
E.g.::
from sqlalchemy import text
t = text("SELECT * FROM users")
result = connection.execute(t)
The advantages :func:`.text` provides over a plain string are
backend-neutral support for bind parameters, per-statement
execution options, as well as
bind parameter and result-column typing behavior, allowing
SQLAlchemy type constructs to play a role when executing
a statement that is specified literally. The construct can also
be provided with a ``.c`` collection of column elements, allowing
it to be embedded in other SQL expression constructs as a subquery.
Bind parameters are specified by name, using the format ``:name``.
E.g.::
t = text("SELECT * FROM users WHERE id=:user_id")
result = connection.execute(t, user_id=12)
For SQL statements where a colon is required verbatim, as within
an inline string, use a backslash to escape::
t = text("SELECT * FROM users WHERE name='\\:username'")
The :class:`.TextClause` construct includes methods which can
provide information about the bound parameters as well as the column
values which would be returned from the textual statement, assuming
it's an executable SELECT type of statement. The
:meth:`.TextClause.bindparams` method is used to provide bound
parameter detail, and :meth:`.TextClause.columns` method allows
specification of return columns including names and types::
t = text("SELECT * FROM users WHERE id=:user_id").\\
bindparams(user_id=7).\\
columns(id=Integer, name=String)
for id, name in connection.execute(t):
print(id, name)
The :func:`.text` construct is used in cases when
a literal string SQL fragment is specified as part of a larger query,
such as for the WHERE clause of a SELECT statement::
s = select([users.c.id, users.c.name]).where(text("id=:user_id"))
result = connection.execute(s, user_id=12)
:func:`.text` is also used for the construction
of a full, standalone statement using plain text.
As such, SQLAlchemy refers
to it as an :class:`.Executable` object, and it supports
the :meth:`Executable.execution_options` method. For example,
a :func:`.text` construct that should be subject to "autocommit"
can be set explicitly so using the
:paramref:`.Connection.execution_options.autocommit` option::
t = text("EXEC my_procedural_thing()").\\
execution_options(autocommit=True)
Note that SQLAlchemy's usual "autocommit" behavior applies to
:func:`.text` constructs implicitly - that is, statements which begin
with a phrase such as ``INSERT``, ``UPDATE``, ``DELETE``,
or a variety of other phrases specific to certain backends, will
be eligible for autocommit if no transaction is in progress.
:param text:
the text of the SQL statement to be created. use ``:<param>``
to specify bind parameters; they will be compiled to their
engine-specific format.
:param autocommit:
Deprecated. Use .execution_options(autocommit=<True|False>)
to set the autocommit option.
:param bind:
an optional connection or engine to be used for this text query.
:param bindparams:
Deprecated. A list of :func:`.bindparam` instances used to
provide information about parameters embedded in the statement.
This argument now invokes the :meth:`.TextClause.bindparams`
method on the construct before returning it. E.g.::
stmt = text("SELECT * FROM table WHERE id=:id",
bindparams=[bindparam('id', value=5, type_=Integer)])
Is equivalent to::
stmt = text("SELECT * FROM table WHERE id=:id").\\
bindparams(bindparam('id', value=5, type_=Integer))
.. deprecated:: 0.9.0 the :meth:`.TextClause.bindparams` method
supersedes the ``bindparams`` argument to :func:`.text`.
:param typemap:
Deprecated. A dictionary mapping the names of columns
represented in the columns clause of a ``SELECT`` statement
to type objects,
which will be used to perform post-processing on columns within
the result set. This parameter now invokes the
:meth:`.TextClause.columns` method, which returns a
:class:`.TextAsFrom` construct that gains a ``.c`` collection and
can be embedded in other expressions. E.g.::
stmt = text("SELECT * FROM table",
typemap={'id': Integer, 'name': String},
)
Is equivalent to::
stmt = text("SELECT * FROM table").columns(id=Integer,
name=String)
Or alternatively::
from sqlalchemy.sql import column
stmt = text("SELECT * FROM table").columns(
column('id', Integer),
column('name', String)
)
.. deprecated:: 0.9.0 the :meth:`.TextClause.columns` method
supersedes the ``typemap`` argument to :func:`.text`.
.. seealso::
:ref:`sqlexpression_text` - in the Core tutorial
:ref:`orm_tutorial_literal_sql` - in the ORM tutorial
"""
stmt = TextClause(text, bind=bind)
if bindparams:
stmt = stmt.bindparams(*bindparams)
if typemap:
stmt = stmt.columns(**typemap)
if autocommit is not None:
util.warn_deprecated('autocommit on text() is deprecated. '
'Use .execution_options(autocommit=True)')
stmt = stmt.execution_options(autocommit=autocommit)
return stmt
@_generative
def bindparams(self, *binds, **names_to_values):
"""Establish the values and/or types of bound parameters within
this :class:`.TextClause` construct.
Given a text construct such as::
from sqlalchemy import text
stmt = text("SELECT id, name FROM user WHERE name=:name "
"AND timestamp=:timestamp")
the :meth:`.TextClause.bindparams` method can be used to establish
the initial value of ``:name`` and ``:timestamp``,
using simple keyword arguments::
stmt = stmt.bindparams(name='jack',
timestamp=datetime.datetime(2012, 10, 8, 15, 12, 5))
Where above, new :class:`.BindParameter` objects
will be generated with the names ``name`` and ``timestamp``, and
values of ``jack`` and ``datetime.datetime(2012, 10, 8, 15, 12, 5)``,
respectively. The types will be
inferred from the values given, in this case :class:`.String` and
:class:`.DateTime`.
When specific typing behavior is needed, the positional ``*binds``
argument can be used in which to specify :func:`.bindparam` constructs
directly. These constructs must include at least the ``key``
argument, then an optional value and type::
from sqlalchemy import bindparam
stmt = stmt.bindparams(
bindparam('name', value='jack', type_=String),
bindparam('timestamp', type_=DateTime)
)
Above, we specified the type of :class:`.DateTime` for the
``timestamp`` bind, and the type of :class:`.String` for the ``name``
bind. In the case of ``name`` we also set the default value of
``"jack"``.
Additional bound parameters can be supplied at statement execution
time, e.g.::
result = connection.execute(stmt,
timestamp=datetime.datetime(2012, 10, 8, 15, 12, 5))
The :meth:`.TextClause.bindparams` method can be called repeatedly,
where it will re-use existing :class:`.BindParameter` objects to add
new information. For example, we can call
:meth:`.TextClause.bindparams` first with typing information, and a
second time with value information, and it will be combined::
stmt = text("SELECT id, name FROM user WHERE name=:name "
"AND timestamp=:timestamp")
stmt = stmt.bindparams(
bindparam('name', type_=String),
bindparam('timestamp', type_=DateTime)
)
stmt = stmt.bindparams(
name='jack',
timestamp=datetime.datetime(2012, 10, 8, 15, 12, 5)
)
.. versionadded:: 0.9.0 The :meth:`.TextClause.bindparams` method
supersedes the argument ``bindparams`` passed to
:func:`~.expression.text`.
"""
self._bindparams = new_params = self._bindparams.copy()
for bind in binds:
try:
existing = new_params[bind.key]
except KeyError:
raise exc.ArgumentError(
"This text() construct doesn't define a "
"bound parameter named %r" % bind.key)
else:
new_params[existing.key] = bind
for key, value in names_to_values.items():
try:
existing = new_params[key]
except KeyError:
raise exc.ArgumentError(
"This text() construct doesn't define a "
"bound parameter named %r" % key)
else:
new_params[key] = existing._with_value(value)
@util.dependencies('sqlalchemy.sql.selectable')
def columns(self, selectable, *cols, **types):
"""Turn this :class:`.TextClause` object into a :class:`.TextAsFrom`
object that can be embedded into another statement.
This function essentially bridges the gap between an entirely
textual SELECT statement and the SQL expression language concept
of a "selectable"::
from sqlalchemy.sql import column, text
stmt = text("SELECT id, name FROM some_table")
stmt = stmt.columns(column('id'), column('name')).alias('st')
stmt = select([mytable]).\\
select_from(
mytable.join(stmt, mytable.c.name == stmt.c.name)
).where(stmt.c.id > 5)
Above, we pass a series of :func:`.column` elements to the
:meth:`.TextClause.columns` method positionally. These :func:`.column`
elements now become first class elements upon the :attr:`.TextAsFrom.c`
column collection, just like any other selectable.
The column expressions we pass to :meth:`.TextClause.columns` may
also be typed; when we do so, these :class:`.TypeEngine` objects become
the effective return type of the column, so that SQLAlchemy's
result-set-processing systems may be used on the return values.
This is often needed for types such as date or boolean types, as well
as for unicode processing on some dialect configurations::
stmt = text("SELECT id, name, timestamp FROM some_table")
stmt = stmt.columns(
column('id', Integer),
column('name', Unicode),
column('timestamp', DateTime)
)
for id, name, timestamp in connection.execute(stmt):
print(id, name, timestamp)
As a shortcut to the above syntax, keyword arguments referring to
types alone may be used, if only type conversion is needed::
stmt = text("SELECT id, name, timestamp FROM some_table")
stmt = stmt.columns(
id=Integer,
name=Unicode,
timestamp=DateTime
)
for id, name, timestamp in connection.execute(stmt):
print(id, name, timestamp)
The positional form of :meth:`.TextClause.columns` also provides
the unique feature of **positional column targeting**, which is
particularly useful when using the ORM with complex textual queries.
If we specify the columns from our model to :meth:`.TextClause.columns`,
the result set will match to those columns positionally, meaning the
name or origin of the column in the textual SQL doesn't matter::
stmt = text("SELECT users.id, addresses.id, users.id, "
"users.name, addresses.email_address AS email "
"FROM users JOIN addresses ON users.id=addresses.user_id "
"WHERE users.id = 1").columns(
User.id,
Address.id,
Address.user_id,
User.name,
Address.email_address
)
query = session.query(User).from_statement(stmt).options(
contains_eager(User.addresses))
.. versionadded:: 1.1 the :meth:`.TextClause.columns` method now
offers positional column targeting in the result set when
the column expressions are passed purely positionally.
The :meth:`.TextClause.columns` method provides a direct
route to calling :meth:`.FromClause.alias` as well as
:meth:`.SelectBase.cte` against a textual SELECT statement::
stmt = stmt.columns(id=Integer, name=String).cte('st')
stmt = select([sometable]).where(sometable.c.id == stmt.c.id)
.. versionadded:: 0.9.0 :func:`.text` can now be converted into a
fully featured "selectable" construct using the
:meth:`.TextClause.columns` method. This method supersedes the
``typemap`` argument to :func:`.text`.
"""
positional_input_cols = [
ColumnClause(col.key, types.pop(col.key))
if col.key in types
else col
for col in cols
]
keyed_input_cols = [
ColumnClause(key, type_) for key, type_ in types.items()]
return selectable.TextAsFrom(
self,
positional_input_cols + keyed_input_cols,
positional=bool(positional_input_cols) and not keyed_input_cols)
@property
def type(self):
return type_api.NULLTYPE
@property
def comparator(self):
return self.type.comparator_factory(self)
def self_group(self, against=None):
if against is operators.in_op:
return Grouping(self)
else:
return self
def _copy_internals(self, clone=_clone, **kw):
self._bindparams = dict((b.key, clone(b, **kw))
for b in self._bindparams.values())
def get_children(self, **kwargs):
return list(self._bindparams.values())
def compare(self, other):
return isinstance(other, TextClause) and other.text == self.text
class Null(ColumnElement):
"""Represent the NULL keyword in a SQL statement.
:class:`.Null` is accessed as a constant via the
:func:`.null` function.
"""
__visit_name__ = 'null'
@util.memoized_property
def type(self):
return type_api.NULLTYPE
@classmethod
def _instance(cls):
"""Return a constant :class:`.Null` construct."""
return Null()
def compare(self, other):
return isinstance(other, Null)
class False_(ColumnElement):
"""Represent the ``false`` keyword, or equivalent, in a SQL statement.
:class:`.False_` is accessed as a constant via the
:func:`.false` function.
"""
__visit_name__ = 'false'
@util.memoized_property
def type(self):
return type_api.BOOLEANTYPE
def _negate(self):
return True_()
@classmethod
def _instance(cls):
"""Return a :class:`.False_` construct.
E.g.::
>>> from sqlalchemy import false
>>> print select([t.c.x]).where(false())
SELECT x FROM t WHERE false
A backend which does not support true/false constants will render as
an expression against 1 or 0::
>>> print select([t.c.x]).where(false())
SELECT x FROM t WHERE 0 = 1
The :func:`.true` and :func:`.false` constants also feature
"short circuit" operation within an :func:`.and_` or :func:`.or_`
conjunction::
>>> print select([t.c.x]).where(or_(t.c.x > 5, true()))
SELECT x FROM t WHERE true
>>> print select([t.c.x]).where(and_(t.c.x > 5, false()))
SELECT x FROM t WHERE false
.. versionchanged:: 0.9 :func:`.true` and :func:`.false` feature
better integrated behavior within conjunctions and on dialects
that don't support true/false constants.
.. seealso::
:func:`.true`
"""
return False_()
def compare(self, other):
return isinstance(other, False_)
class True_(ColumnElement):
"""Represent the ``true`` keyword, or equivalent, in a SQL statement.
:class:`.True_` is accessed as a constant via the
:func:`.true` function.
"""
__visit_name__ = 'true'
@util.memoized_property
def type(self):
return type_api.BOOLEANTYPE
def _negate(self):
return False_()
@classmethod
def _ifnone(cls, other):
if other is None:
return cls._instance()
else:
return other
@classmethod
def _instance(cls):
"""Return a constant :class:`.True_` construct.
E.g.::
>>> from sqlalchemy import true
>>> print select([t.c.x]).where(true())
SELECT x FROM t WHERE true
A backend which does not support true/false constants will render as
an expression against 1 or 0::
>>> print select([t.c.x]).where(true())
SELECT x FROM t WHERE 1 = 1
The :func:`.true` and :func:`.false` constants also feature
"short circuit" operation within an :func:`.and_` or :func:`.or_`
conjunction::
>>> print select([t.c.x]).where(or_(t.c.x > 5, true()))
SELECT x FROM t WHERE true
>>> print select([t.c.x]).where(and_(t.c.x > 5, false()))
SELECT x FROM t WHERE false
.. versionchanged:: 0.9 :func:`.true` and :func:`.false` feature
better integrated behavior within conjunctions and on dialects
that don't support true/false constants.
.. seealso::
:func:`.false`
"""
return True_()
def compare(self, other):
return isinstance(other, True_)
class ClauseList(ClauseElement):
"""Describe a list of clauses, separated by an operator.
By default, is comma-separated, such as a column listing.
"""
__visit_name__ = 'clauselist'
def __init__(self, *clauses, **kwargs):
self.operator = kwargs.pop('operator', operators.comma_op)
self.group = kwargs.pop('group', True)
self.group_contents = kwargs.pop('group_contents', True)
text_converter = kwargs.pop(
'_literal_as_text',
_expression_literal_as_text)
if self.group_contents:
self.clauses = [
text_converter(clause).self_group(against=self.operator)
for clause in clauses]
else:
self.clauses = [
text_converter(clause)
for clause in clauses]
def __iter__(self):
return iter(self.clauses)
def __len__(self):
return len(self.clauses)
@property
def _select_iterable(self):
return iter(self)
def append(self, clause):
if self.group_contents:
self.clauses.append(_literal_as_text(clause).
self_group(against=self.operator))
else:
self.clauses.append(_literal_as_text(clause))
def _copy_internals(self, clone=_clone, **kw):
self.clauses = [clone(clause, **kw) for clause in self.clauses]
def get_children(self, **kwargs):
return self.clauses
@property
def _from_objects(self):
return list(itertools.chain(*[c._from_objects for c in self.clauses]))
def self_group(self, against=None):
if self.group and operators.is_precedent(self.operator, against):
return Grouping(self)
else:
return self
def compare(self, other, **kw):
"""Compare this :class:`.ClauseList` to the given :class:`.ClauseList`,
including a comparison of all the clause items.
"""
if not isinstance(other, ClauseList) and len(self.clauses) == 1:
return self.clauses[0].compare(other, **kw)
elif isinstance(other, ClauseList) and \
len(self.clauses) == len(other.clauses) and \
self.operator is other.operator:
if self.operator in (operators.and_, operators.or_):
completed = set()
for clause in self.clauses:
for other_clause in set(other.clauses).difference(completed):
if clause.compare(other_clause, **kw):
completed.add(other_clause)
break
return len(completed) == len(other.clauses)
else:
for i in range(0, len(self.clauses)):
if not self.clauses[i].compare(other.clauses[i], **kw):
return False
else:
return True
else:
return False
class BooleanClauseList(ClauseList, ColumnElement):
__visit_name__ = 'clauselist'
def __init__(self, *arg, **kw):
raise NotImplementedError(
"BooleanClauseList has a private constructor")
@classmethod
def _construct(cls, operator, continue_on, skip_on, *clauses, **kw):
convert_clauses = []
clauses = [
_expression_literal_as_text(clause)
for clause in
util.coerce_generator_arg(clauses)
]
for clause in clauses:
if isinstance(clause, continue_on):
continue
elif isinstance(clause, skip_on):
return clause.self_group(against=operators._asbool)
convert_clauses.append(clause)
if len(convert_clauses) == 1:
return convert_clauses[0].self_group(against=operators._asbool)
elif not convert_clauses and clauses:
return clauses[0].self_group(against=operators._asbool)
convert_clauses = [c.self_group(against=operator)
for c in convert_clauses]
self = cls.__new__(cls)
self.clauses = convert_clauses
self.group = True
self.operator = operator
self.group_contents = True
self.type = type_api.BOOLEANTYPE
return self
@classmethod
def and_(cls, *clauses):
"""Produce a conjunction of expressions joined by ``AND``.
E.g.::
from sqlalchemy import and_
stmt = select([users_table]).where(
and_(
users_table.c.name == 'wendy',
users_table.c.enrolled == True
)
)
The :func:`.and_` conjunction is also available using the
Python ``&`` operator (though note that compound expressions
need to be parenthesized in order to function with Python
operator precedence behavior)::
stmt = select([users_table]).where(
(users_table.c.name == 'wendy') &
(users_table.c.enrolled == True)
)
The :func:`.and_` operation is also implicit in some cases;
the :meth:`.Select.where` method for example can be invoked multiple
times against a statement, which will have the effect of each
clause being combined using :func:`.and_`::
stmt = select([users_table]).\\
where(users_table.c.name == 'wendy').\\
where(users_table.c.enrolled == True)
.. seealso::
:func:`.or_`
"""
return cls._construct(operators.and_, True_, False_, *clauses)
@classmethod
def or_(cls, *clauses):
"""Produce a conjunction of expressions joined by ``OR``.
E.g.::
from sqlalchemy import or_
stmt = select([users_table]).where(
or_(
users_table.c.name == 'wendy',
users_table.c.name == 'jack'
)
)
The :func:`.or_` conjunction is also available using the
Python ``|`` operator (though note that compound expressions
need to be parenthesized in order to function with Python
operator precedence behavior)::
stmt = select([users_table]).where(
(users_table.c.name == 'wendy') |
(users_table.c.name == 'jack')
)
.. seealso::
:func:`.and_`
"""
return cls._construct(operators.or_, False_, True_, *clauses)
@property
def _select_iterable(self):
return (self, )
def self_group(self, against=None):
if not self.clauses:
return self
else:
return super(BooleanClauseList, self).self_group(against=against)
def _negate(self):
return ClauseList._negate(self)
and_ = BooleanClauseList.and_
or_ = BooleanClauseList.or_
class Tuple(ClauseList, ColumnElement):
"""Represent a SQL tuple."""
def __init__(self, *clauses, **kw):
"""Return a :class:`.Tuple`.
Main usage is to produce a composite IN construct::
from sqlalchemy import tuple_
tuple_(table.c.col1, table.c.col2).in_(
[(1, 2), (5, 12), (10, 19)]
)
.. warning::
The composite IN construct is not supported by all backends,
and is currently known to work on Postgresql and MySQL,
but not SQLite. Unsupported backends will raise
a subclass of :class:`~sqlalchemy.exc.DBAPIError` when such
an expression is invoked.
"""
clauses = [_literal_as_binds(c) for c in clauses]
self._type_tuple = [arg.type for arg in clauses]
self.type = kw.pop('type_', self._type_tuple[0]
if self._type_tuple else type_api.NULLTYPE)
super(Tuple, self).__init__(*clauses, **kw)
@property
def _select_iterable(self):
return (self, )
def _bind_param(self, operator, obj, type_=None):
return Tuple(*[
BindParameter(None, o, _compared_to_operator=operator,
_compared_to_type=compared_to_type, unique=True,
type_=type_)
for o, compared_to_type in zip(obj, self._type_tuple)
]).self_group()
class Case(ColumnElement):
"""Represent a ``CASE`` expression.
:class:`.Case` is produced using the :func:`.case` factory function,
as in::
from sqlalchemy import case
stmt = select([users_table]).\\
where(
case(
[
(users_table.c.name == 'wendy', 'W'),
(users_table.c.name == 'jack', 'J')
],
else_='E'
)
)
Details on :class:`.Case` usage is at :func:`.case`.
.. seealso::
:func:`.case`
"""
__visit_name__ = 'case'
def __init__(self, whens, value=None, else_=None):
"""Produce a ``CASE`` expression.
The ``CASE`` construct in SQL is a conditional object that
acts somewhat analogously to an "if/then" construct in other
languages. It returns an instance of :class:`.Case`.
:func:`.case` in its usual form is passed a list of "when"
constructs, that is, a list of conditions and results as tuples::
from sqlalchemy import case
stmt = select([users_table]).\\
where(
case(
[
(users_table.c.name == 'wendy', 'W'),
(users_table.c.name == 'jack', 'J')
],
else_='E'
)
)
The above statement will produce SQL resembling::
SELECT id, name FROM user
WHERE CASE
WHEN (name = :name_1) THEN :param_1
WHEN (name = :name_2) THEN :param_2
ELSE :param_3
END
When simple equality expressions of several values against a single
parent column are needed, :func:`.case` also has a "shorthand" format
used via the
:paramref:`.case.value` parameter, which is passed a column
expression to be compared. In this form, the :paramref:`.case.whens`
parameter is passed as a dictionary containing expressions to be
compared against keyed to result expressions. The statement below is
equivalent to the preceding statement::
stmt = select([users_table]).\\
where(
case(
{"wendy": "W", "jack": "J"},
value=users_table.c.name,
else_='E'
)
)
The values which are accepted as result values in
:paramref:`.case.whens` as well as with :paramref:`.case.else_` are
coerced from Python literals into :func:`.bindparam` constructs.
SQL expressions, e.g. :class:`.ColumnElement` constructs, are accepted
as well. To coerce a literal string expression into a constant
expression rendered inline, use the :func:`.literal_column` construct,
as in::
from sqlalchemy import case, literal_column
case(
[
(
orderline.c.qty > 100,
literal_column("'greaterthan100'")
),
(
orderline.c.qty > 10,
literal_column("'greaterthan10'")
)
],
else_=literal_column("'lessthan10'")
)
The above will render the given constants without using bound
parameters for the result values (but still for the comparison
values), as in::
CASE
WHEN (orderline.qty > :qty_1) THEN 'greaterthan100'
WHEN (orderline.qty > :qty_2) THEN 'greaterthan10'
ELSE 'lessthan10'
END
:param whens: The criteria to be compared against,
:paramref:`.case.whens` accepts two different forms, based on
whether or not :paramref:`.case.value` is used.
In the first form, it accepts a list of 2-tuples; each 2-tuple
consists of ``(<sql expression>, <value>)``, where the SQL
expression is a boolean expression and "value" is a resulting value,
e.g.::
case([
(users_table.c.name == 'wendy', 'W'),
(users_table.c.name == 'jack', 'J')
])
In the second form, it accepts a Python dictionary of comparison
values mapped to a resulting value; this form requires
:paramref:`.case.value` to be present, and values will be compared
using the ``==`` operator, e.g.::
case(
{"wendy": "W", "jack": "J"},
value=users_table.c.name
)
:param value: An optional SQL expression which will be used as a
fixed "comparison point" for candidate values within a dictionary
passed to :paramref:`.case.whens`.
:param else\_: An optional SQL expression which will be the evaluated
result of the ``CASE`` construct if all expressions within
:paramref:`.case.whens` evaluate to false. When omitted, most
databases will produce a result of NULL if none of the "when"
expressions evaluate to true.
"""
try:
whens = util.dictlike_iteritems(whens)
except TypeError:
pass
if value is not None:
whenlist = [
(_literal_as_binds(c).self_group(),
_literal_as_binds(r)) for (c, r) in whens
]
else:
whenlist = [
(_no_literals(c).self_group(),
_literal_as_binds(r)) for (c, r) in whens
]
if whenlist:
type_ = list(whenlist[-1])[-1].type
else:
type_ = None
if value is None:
self.value = None
else:
self.value = _literal_as_binds(value)
self.type = type_
self.whens = whenlist
if else_ is not None:
self.else_ = _literal_as_binds(else_)
else:
self.else_ = None
def _copy_internals(self, clone=_clone, **kw):
if self.value is not None:
self.value = clone(self.value, **kw)
self.whens = [(clone(x, **kw), clone(y, **kw))
for x, y in self.whens]
if self.else_ is not None:
self.else_ = clone(self.else_, **kw)
def get_children(self, **kwargs):
if self.value is not None:
yield self.value
for x, y in self.whens:
yield x
yield y
if self.else_ is not None:
yield self.else_
@property
def _from_objects(self):
return list(itertools.chain(*[x._from_objects for x in
self.get_children()]))
def literal_column(text, type_=None):
"""Produce a :class:`.ColumnClause` object that has the
:paramref:`.column.is_literal` flag set to True.
:func:`.literal_column` is similar to :func:`.column`, except that
it is more often used as a "standalone" column expression that renders
exactly as stated; while :func:`.column` stores a string name that
will be assumed to be part of a table and may be quoted as such,
:func:`.literal_column` can be that, or any other arbitrary column-oriented
expression.
:param text: the text of the expression; can be any SQL expression.
Quoting rules will not be applied. To specify a column-name expression
which should be subject to quoting rules, use the :func:`column`
function.
:param type\_: an optional :class:`~sqlalchemy.types.TypeEngine`
object which will
provide result-set translation and additional expression semantics for
this column. If left as None the type will be NullType.
.. seealso::
:func:`.column`
:func:`.text`
:ref:`sqlexpression_literal_column`
"""
return ColumnClause(text, type_=type_, is_literal=True)
class Cast(ColumnElement):
"""Represent a ``CAST`` expression.
:class:`.Cast` is produced using the :func:`.cast` factory function,
as in::
from sqlalchemy import cast, Numeric
stmt = select([
cast(product_table.c.unit_price, Numeric(10, 4))
])
Details on :class:`.Cast` usage is at :func:`.cast`.
.. seealso::
:func:`.cast`
"""
__visit_name__ = 'cast'
def __init__(self, expression, type_):
"""Produce a ``CAST`` expression.
:func:`.cast` returns an instance of :class:`.Cast`.
E.g.::
from sqlalchemy import cast, Numeric
stmt = select([
cast(product_table.c.unit_price, Numeric(10, 4))
])
The above statement will produce SQL resembling::
SELECT CAST(unit_price AS NUMERIC(10, 4)) FROM product
The :func:`.cast` function performs two distinct functions when
used. The first is that it renders the ``CAST`` expression within
the resulting SQL string. The second is that it associates the given
type (e.g. :class:`.TypeEngine` class or instance) with the column
expression on the Python side, which means the expression will take
on the expression operator behavior associated with that type,
as well as the bound-value handling and result-row-handling behavior
of the type.
.. versionchanged:: 0.9.0 :func:`.cast` now applies the given type
to the expression such that it takes effect on the bound-value,
e.g. the Python-to-database direction, in addition to the
result handling, e.g. database-to-Python, direction.
An alternative to :func:`.cast` is the :func:`.type_coerce` function.
This function performs the second task of associating an expression
with a specific type, but does not render the ``CAST`` expression
in SQL.
:param expression: A SQL expression, such as a :class:`.ColumnElement`
expression or a Python string which will be coerced into a bound
literal value.
:param type_: A :class:`.TypeEngine` class or instance indicating
the type to which the ``CAST`` should apply.
.. seealso::
:func:`.type_coerce` - Python-side type coercion without emitting
CAST.
"""
self.type = type_api.to_instance(type_)
self.clause = _literal_as_binds(expression, type_=self.type)
self.typeclause = TypeClause(self.type)
def _copy_internals(self, clone=_clone, **kw):
self.clause = clone(self.clause, **kw)
self.typeclause = clone(self.typeclause, **kw)
def get_children(self, **kwargs):
return self.clause, self.typeclause
@property
def _from_objects(self):
return self.clause._from_objects
class TypeCoerce(ColumnElement):
"""Represent a Python-side type-coercion wrapper.
:class:`.TypeCoerce` supplies the :func:`.expression.type_coerce`
function; see that function for usage details.
.. versionchanged:: 1.1 The :func:`.type_coerce` function now produces
a persistent :class:`.TypeCoerce` wrapper object rather than
translating the given object in place.
.. seealso::
:func:`.expression.type_coerce`
"""
__visit_name__ = 'type_coerce'
def __init__(self, expression, type_):
"""Associate a SQL expression with a particular type, without rendering
``CAST``.
E.g.::
from sqlalchemy import type_coerce
stmt = select([
type_coerce(log_table.date_string, StringDateTime())
])
The above construct will produce a :class:`.TypeCoerce` object, which
renders SQL that labels the expression, but otherwise does not
modify its value on the SQL side::
SELECT date_string AS anon_1 FROM log
When result rows are fetched, the ``StringDateTime`` type
will be applied to result rows on behalf of the ``date_string`` column.
The rationale for the "anon_1" label is so that the type-coerced
column remains separate in the list of result columns vs. other
type-coerced or direct values of the target column. In order to
provide a named label for the expression, use
:meth:`.ColumnElement.label`::
stmt = select([
type_coerce(
log_table.date_string, StringDateTime()).label('date')
])
A type that features bound-value handling will also have that behavior
take effect when literal values or :func:`.bindparam` constructs are
passed to :func:`.type_coerce` as targets.
For example, if a type implements the
:meth:`.TypeEngine.bind_expression`
method or :meth:`.TypeEngine.bind_processor` method or equivalent,
these functions will take effect at statement compilation/execution
time when a literal value is passed, as in::
# bound-value handling of MyStringType will be applied to the
# literal value "some string"
stmt = select([type_coerce("some string", MyStringType)])
:func:`.type_coerce` is similar to the :func:`.cast` function,
except that it does not render the ``CAST`` expression in the resulting
statement.
:param expression: A SQL expression, such as a :class:`.ColumnElement`
expression or a Python string which will be coerced into a bound
literal value.
:param type_: A :class:`.TypeEngine` class or instance indicating
the type to which the expression is coerced.
.. seealso::
:func:`.cast`
"""
self.type = type_api.to_instance(type_)
self.clause = _literal_as_binds(expression, type_=self.type)
def _copy_internals(self, clone=_clone, **kw):
self.clause = clone(self.clause, **kw)
self.__dict__.pop('typed_expression', None)
def get_children(self, **kwargs):
return self.clause,
@property
def _from_objects(self):
return self.clause._from_objects
@util.memoized_property
def typed_expression(self):
if isinstance(self.clause, BindParameter):
bp = self.clause._clone()
bp.type = self.type
return bp
else:
return self.clause
class Extract(ColumnElement):
"""Represent a SQL EXTRACT clause, ``extract(field FROM expr)``."""
__visit_name__ = 'extract'
def __init__(self, field, expr, **kwargs):
"""Return a :class:`.Extract` construct.
This is typically available as :func:`.extract`
as well as ``func.extract`` from the
:data:`.func` namespace.
"""
self.type = type_api.INTEGERTYPE
self.field = field
self.expr = _literal_as_binds(expr, None)
def _copy_internals(self, clone=_clone, **kw):
self.expr = clone(self.expr, **kw)
def get_children(self, **kwargs):
return self.expr,
@property
def _from_objects(self):
return self.expr._from_objects
class _label_reference(ColumnElement):
"""Wrap a column expression as it appears in a 'reference' context.
This expression is any that inclues an _order_by_label_element,
which is a Label, or a DESC / ASC construct wrapping a Label.
The production of _label_reference() should occur when an expression
is added to this context; this includes the ORDER BY or GROUP BY of a
SELECT statement, as well as a few other places, such as the ORDER BY
within an OVER clause.
"""
__visit_name__ = 'label_reference'
def __init__(self, element):
self.element = element
def _copy_internals(self, clone=_clone, **kw):
self.element = clone(self.element, **kw)
@property
def _from_objects(self):
return ()
class _textual_label_reference(ColumnElement):
__visit_name__ = 'textual_label_reference'
def __init__(self, element):
self.element = element
@util.memoized_property
def _text_clause(self):
return TextClause._create_text(self.element)
class UnaryExpression(ColumnElement):
"""Define a 'unary' expression.
A unary expression has a single column expression
and an operator. The operator can be placed on the left
(where it is called the 'operator') or right (where it is called the
'modifier') of the column expression.
:class:`.UnaryExpression` is the basis for several unary operators
including those used by :func:`.desc`, :func:`.asc`, :func:`.distinct`,
:func:`.nullsfirst` and :func:`.nullslast`.
"""
__visit_name__ = 'unary'
def __init__(self, element, operator=None, modifier=None,
type_=None, negate=None, wraps_column_expression=False):
self.operator = operator
self.modifier = modifier
self.element = element.self_group(
against=self.operator or self.modifier)
self.type = type_api.to_instance(type_)
self.negate = negate
self.wraps_column_expression = wraps_column_expression
@classmethod
def _create_nullsfirst(cls, column):
"""Produce the ``NULLS FIRST`` modifier for an ``ORDER BY`` expression.
:func:`.nullsfirst` is intended to modify the expression produced
by :func:`.asc` or :func:`.desc`, and indicates how NULL values
should be handled when they are encountered during ordering::
from sqlalchemy import desc, nullsfirst
stmt = select([users_table]).\\
order_by(nullsfirst(desc(users_table.c.name)))
The SQL expression from the above would resemble::
SELECT id, name FROM user ORDER BY name DESC NULLS FIRST
Like :func:`.asc` and :func:`.desc`, :func:`.nullsfirst` is typically
invoked from the column expression itself using
:meth:`.ColumnElement.nullsfirst`, rather than as its standalone
function version, as in::
stmt = (select([users_table]).
order_by(users_table.c.name.desc().nullsfirst())
)
.. seealso::
:func:`.asc`
:func:`.desc`
:func:`.nullslast`
:meth:`.Select.order_by`
"""
return UnaryExpression(
_literal_as_label_reference(column),
modifier=operators.nullsfirst_op,
wraps_column_expression=False)
@classmethod
def _create_nullslast(cls, column):
"""Produce the ``NULLS LAST`` modifier for an ``ORDER BY`` expression.
:func:`.nullslast` is intended to modify the expression produced
by :func:`.asc` or :func:`.desc`, and indicates how NULL values
should be handled when they are encountered during ordering::
from sqlalchemy import desc, nullslast
stmt = select([users_table]).\\
order_by(nullslast(desc(users_table.c.name)))
The SQL expression from the above would resemble::
SELECT id, name FROM user ORDER BY name DESC NULLS LAST
Like :func:`.asc` and :func:`.desc`, :func:`.nullslast` is typically
invoked from the column expression itself using
:meth:`.ColumnElement.nullslast`, rather than as its standalone
function version, as in::
stmt = select([users_table]).\\
order_by(users_table.c.name.desc().nullslast())
.. seealso::
:func:`.asc`
:func:`.desc`
:func:`.nullsfirst`
:meth:`.Select.order_by`
"""
return UnaryExpression(
_literal_as_label_reference(column),
modifier=operators.nullslast_op,
wraps_column_expression=False)
@classmethod
def _create_desc(cls, column):
"""Produce a descending ``ORDER BY`` clause element.
e.g.::
from sqlalchemy import desc
stmt = select([users_table]).order_by(desc(users_table.c.name))
will produce SQL as::
SELECT id, name FROM user ORDER BY name DESC
The :func:`.desc` function is a standalone version of the
:meth:`.ColumnElement.desc` method available on all SQL expressions,
e.g.::
stmt = select([users_table]).order_by(users_table.c.name.desc())
:param column: A :class:`.ColumnElement` (e.g. scalar SQL expression)
with which to apply the :func:`.desc` operation.
.. seealso::
:func:`.asc`
:func:`.nullsfirst`
:func:`.nullslast`
:meth:`.Select.order_by`
"""
return UnaryExpression(
_literal_as_label_reference(column),
modifier=operators.desc_op,
wraps_column_expression=False)
@classmethod
def _create_asc(cls, column):
"""Produce an ascending ``ORDER BY`` clause element.
e.g.::
from sqlalchemy import asc
stmt = select([users_table]).order_by(asc(users_table.c.name))
will produce SQL as::
SELECT id, name FROM user ORDER BY name ASC
The :func:`.asc` function is a standalone version of the
:meth:`.ColumnElement.asc` method available on all SQL expressions,
e.g.::
stmt = select([users_table]).order_by(users_table.c.name.asc())
:param column: A :class:`.ColumnElement` (e.g. scalar SQL expression)
with which to apply the :func:`.asc` operation.
.. seealso::
:func:`.desc`
:func:`.nullsfirst`
:func:`.nullslast`
:meth:`.Select.order_by`
"""
return UnaryExpression(
_literal_as_label_reference(column),
modifier=operators.asc_op,
wraps_column_expression=False)
@classmethod
def _create_distinct(cls, expr):
"""Produce an column-expression-level unary ``DISTINCT`` clause.
This applies the ``DISTINCT`` keyword to an individual column
expression, and is typically contained within an aggregate function,
as in::
from sqlalchemy import distinct, func
stmt = select([func.count(distinct(users_table.c.name))])
The above would produce an expression resembling::
SELECT COUNT(DISTINCT name) FROM user
The :func:`.distinct` function is also available as a column-level
method, e.g. :meth:`.ColumnElement.distinct`, as in::
stmt = select([func.count(users_table.c.name.distinct())])
The :func:`.distinct` operator is different from the
:meth:`.Select.distinct` method of :class:`.Select`,
which produces a ``SELECT`` statement
with ``DISTINCT`` applied to the result set as a whole,
e.g. a ``SELECT DISTINCT`` expression. See that method for further
information.
.. seealso::
:meth:`.ColumnElement.distinct`
:meth:`.Select.distinct`
:data:`.func`
"""
expr = _literal_as_binds(expr)
return UnaryExpression(
expr, operator=operators.distinct_op,
type_=expr.type, wraps_column_expression=False)
@property
def _order_by_label_element(self):
if self.modifier in (operators.desc_op, operators.asc_op):
return self.element._order_by_label_element
else:
return None
@property
def _from_objects(self):
return self.element._from_objects
def _copy_internals(self, clone=_clone, **kw):
self.element = clone(self.element, **kw)
def get_children(self, **kwargs):
return self.element,
def compare(self, other, **kw):
"""Compare this :class:`UnaryExpression` against the given
:class:`.ClauseElement`."""
return (
isinstance(other, UnaryExpression) and
self.operator == other.operator and
self.modifier == other.modifier and
self.element.compare(other.element, **kw)
)
def _negate(self):
if self.negate is not None:
return UnaryExpression(
self.element,
operator=self.negate,
negate=self.operator,
modifier=self.modifier,
type_=self.type,
wraps_column_expression=self.wraps_column_expression)
elif self.type._type_affinity is type_api.BOOLEANTYPE._type_affinity:
return UnaryExpression(
self.self_group(against=operators.inv),
operator=operators.inv,
type_=type_api.BOOLEANTYPE,
wraps_column_expression=self.wraps_column_expression,
negate=None)
else:
return ClauseElement._negate(self)
def self_group(self, against=None):
if self.operator and operators.is_precedent(self.operator, against):
return Grouping(self)
else:
return self
class CollectionAggregate(UnaryExpression):
"""Forms the basis for right-hand collection operator modifiers
ANY and ALL.
The ANY and ALL keywords are available in different ways on different
backends. On Postgresql, they only work for an ARRAY type. On
MySQL, they only work for subqueries.
"""
@classmethod
def _create_any(cls, expr):
"""Produce an ANY expression.
This may apply to an array type for some dialects (e.g. postgresql),
or to a subquery for others (e.g. mysql). e.g.::
# postgresql '5 = ANY (somearray)'
expr = 5 == any_(mytable.c.somearray)
# mysql '5 = ANY (SELECT value FROM table)'
expr = 5 == any_(select([table.c.value]))
.. versionadded:: 1.1
.. seealso::
:func:`.expression.all_`
"""
expr = _literal_as_binds(expr)
if expr.is_selectable and hasattr(expr, 'as_scalar'):
expr = expr.as_scalar()
expr = expr.self_group()
return CollectionAggregate(
expr, operator=operators.any_op,
type_=type_api.NULLTYPE, wraps_column_expression=False)
@classmethod
def _create_all(cls, expr):
"""Produce an ALL expression.
This may apply to an array type for some dialects (e.g. postgresql),
or to a subquery for others (e.g. mysql). e.g.::
# postgresql '5 = ALL (somearray)'
expr = 5 == all_(mytable.c.somearray)
# mysql '5 = ALL (SELECT value FROM table)'
expr = 5 == all_(select([table.c.value]))
.. versionadded:: 1.1
.. seealso::
:func:`.expression.any_`
"""
expr = _literal_as_binds(expr)
if expr.is_selectable and hasattr(expr, 'as_scalar'):
expr = expr.as_scalar()
expr = expr.self_group()
return CollectionAggregate(
expr, operator=operators.all_op,
type_=type_api.NULLTYPE, wraps_column_expression=False)
# operate and reverse_operate are hardwired to
# dispatch onto the type comparator directly, so that we can
# ensure "reversed" behavior.
def operate(self, op, *other, **kwargs):
if not operators.is_comparison(op):
raise exc.ArgumentError(
"Only comparison operators may be used with ANY/ALL")
kwargs['reverse'] = True
return self.comparator.operate(operators.mirror(op), *other, **kwargs)
def reverse_operate(self, op, other, **kwargs):
# comparison operators should never call reverse_operate
assert not operators.is_comparison(op)
raise exc.ArgumentError(
"Only comparison operators may be used with ANY/ALL")
class AsBoolean(UnaryExpression):
def __init__(self, element, operator, negate):
self.element = element
self.type = type_api.BOOLEANTYPE
self.operator = operator
self.negate = negate
self.modifier = None
self.wraps_column_expression = True
def self_group(self, against=None):
return self
def _negate(self):
# TODO: this assumes the element is the True_() or False_()
# object, but this assumption isn't enforced and
# ColumnElement._negate() can send any number of expressions here
return self.element._negate()
class BinaryExpression(ColumnElement):
"""Represent an expression that is ``LEFT <operator> RIGHT``.
A :class:`.BinaryExpression` is generated automatically
whenever two column expressions are used in a Python binary expression::
>>> from sqlalchemy.sql import column
>>> column('a') + column('b')
<sqlalchemy.sql.expression.BinaryExpression object at 0x101029dd0>
>>> print column('a') + column('b')
a + b
"""
__visit_name__ = 'binary'
def __init__(self, left, right, operator, type_=None,
negate=None, modifiers=None):
# allow compatibility with libraries that
# refer to BinaryExpression directly and pass strings
if isinstance(operator, util.string_types):
operator = operators.custom_op(operator)
self._orig = (left, right)
self.left = left.self_group(against=operator)
self.right = right.self_group(against=operator)
self.operator = operator
self.type = type_api.to_instance(type_)
self.negate = negate
if modifiers is None:
self.modifiers = {}
else:
self.modifiers = modifiers
def __bool__(self):
if self.operator in (operator.eq, operator.ne):
return self.operator(hash(self._orig[0]), hash(self._orig[1]))
else:
raise TypeError("Boolean value of this clause is not defined")
__nonzero__ = __bool__
@property
def is_comparison(self):
return operators.is_comparison(self.operator)
@property
def _from_objects(self):
return self.left._from_objects + self.right._from_objects
def _copy_internals(self, clone=_clone, **kw):
self.left = clone(self.left, **kw)
self.right = clone(self.right, **kw)
def get_children(self, **kwargs):
return self.left, self.right
def compare(self, other, **kw):
"""Compare this :class:`BinaryExpression` against the
given :class:`BinaryExpression`."""
return (
isinstance(other, BinaryExpression) and
self.operator == other.operator and
(
self.left.compare(other.left, **kw) and
self.right.compare(other.right, **kw) or
(
operators.is_commutative(self.operator) and
self.left.compare(other.right, **kw) and
self.right.compare(other.left, **kw)
)
)
)
def self_group(self, against=None):
if operators.is_precedent(self.operator, against):
return Grouping(self)
else:
return self
def _negate(self):
if self.negate is not None:
return BinaryExpression(
self.left,
self.right,
self.negate,
negate=self.operator,
type_=self.type,
modifiers=self.modifiers)
else:
return super(BinaryExpression, self)._negate()
class Slice(ColumnElement):
"""Represent SQL for a Python array-slice object.
This is not a specific SQL construct at this level, but
may be interpreted by specific dialects, e.g. Postgresql.
"""
__visit_name__ = 'slice'
def __init__(self, start, stop, step):
self.start = start
self.stop = stop
self.step = step
self.type = type_api.NULLTYPE
def self_group(self, against=None):
assert against is operator.getitem
return self
class IndexExpression(BinaryExpression):
"""Represent the class of expressions that are like an "index" operation.
"""
pass
class Grouping(ColumnElement):
"""Represent a grouping within a column expression"""
__visit_name__ = 'grouping'
def __init__(self, element):
self.element = element
self.type = getattr(element, 'type', type_api.NULLTYPE)
def self_group(self, against=None):
return self
@property
def _key_label(self):
return self._label
@property
def _label(self):
return getattr(self.element, '_label', None) or self.anon_label
def _copy_internals(self, clone=_clone, **kw):
self.element = clone(self.element, **kw)
def get_children(self, **kwargs):
return self.element,
@property
def _from_objects(self):
return self.element._from_objects
def __getattr__(self, attr):
return getattr(self.element, attr)
def __getstate__(self):
return {'element': self.element, 'type': self.type}
def __setstate__(self, state):
self.element = state['element']
self.type = state['type']
def compare(self, other, **kw):
return isinstance(other, Grouping) and \
self.element.compare(other.element)
RANGE_UNBOUNDED = util.symbol("RANGE_UNBOUNDED")
RANGE_CURRENT = util.symbol("RANGE_CURRENT")
class Over(ColumnElement):
"""Represent an OVER clause.
This is a special operator against a so-called
"window" function, as well as any aggregate function,
which produces results relative to the result set
itself. It's supported only by certain database
backends.
"""
__visit_name__ = 'over'
order_by = None
partition_by = None
def __init__(
self, element, partition_by=None,
order_by=None, range_=None, rows=None):
"""Produce an :class:`.Over` object against a function.
Used against aggregate or so-called "window" functions,
for database backends that support window functions.
:func:`~.expression.over` is usually called using
the :meth:`.FunctionElement.over` method, e.g.::
func.row_number().over(order_by=mytable.c.some_column)
Would produce::
ROW_NUMBER() OVER(ORDER BY some_column)
Ranges are also possible using the :paramref:`.expression.over.range_`
and :paramref:`.expression.over.rows` parameters. These
mutually-exclusive parameters each accept a 2-tuple, which contains
a combination of integers and None::
func.row_number().over(order_by=my_table.c.some_column, range_=(None, 0))
The above would produce::
ROW_NUMBER() OVER(ORDER BY some_column RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW)
A value of None indicates "unbounded", a
value of zero indicates "current row", and negative / positive
integers indicate "preceding" and "following":
* RANGE BETWEEN 5 PRECEDING AND 10 FOLLOWING::
func.row_number().over(order_by='x', range_=(-5, 10))
* ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW::
func.row_number().over(order_by='x', rows=(None, 0))
* RANGE BETWEEN 2 PRECEDING AND UNBOUNDED FOLLOWING::
func.row_number().over(order_by='x', range_=(-2, None))
.. versionadded:: 1.1 support for RANGE / ROWS within a window
:param element: a :class:`.FunctionElement`, :class:`.WithinGroup`,
or other compatible construct.
:param partition_by: a column element or string, or a list
of such, that will be used as the PARTITION BY clause
of the OVER construct.
:param order_by: a column element or string, or a list
of such, that will be used as the ORDER BY clause
of the OVER construct.
:param range_: optional range clause for the window. This is a
tuple value which can contain integer values or None, and will
render a RANGE BETWEEN PRECEDING / FOLLOWING clause
.. versionadded:: 1.1
:param rows: optional rows clause for the window. This is a tuple
value which can contain integer values or None, and will render
a ROWS BETWEEN PRECEDING / FOLLOWING clause.
.. versionadded:: 1.1
This function is also available from the :data:`~.expression.func`
construct itself via the :meth:`.FunctionElement.over` method.
.. seealso::
:data:`.expression.func`
:func:`.expression.within_group`
"""
self.element = element
if order_by is not None:
self.order_by = ClauseList(
*util.to_list(order_by),
_literal_as_text=_literal_as_label_reference)
if partition_by is not None:
self.partition_by = ClauseList(
*util.to_list(partition_by),
_literal_as_text=_literal_as_label_reference)
if range_:
self.range_ = self._interpret_range(range_)
if rows:
raise exc.ArgumentError(
"'range_' and 'rows' are mutually exclusive")
else:
self.rows = None
elif rows:
self.rows = self._interpret_range(rows)
self.range_ = None
else:
self.rows = self.range_ = None
def _interpret_range(self, range_):
if not isinstance(range_, tuple) or len(range_) != 2:
raise exc.ArgumentError("2-tuple expected for range/rows")
if range_[0] is None:
preceding = RANGE_UNBOUNDED
else:
try:
preceding = int(range_[0])
except ValueError:
raise exc.ArgumentError(
"Integer or None expected for preceding value")
else:
if preceding > 0:
raise exc.ArgumentError(
"Preceding value must be a "
"negative integer, zero, or None")
elif preceding < 0:
preceding = literal(abs(preceding))
else:
preceding = RANGE_CURRENT
if range_[1] is None:
following = RANGE_UNBOUNDED
else:
try:
following = int(range_[1])
except ValueError:
raise exc.ArgumentError(
"Integer or None expected for following value")
else:
if following < 0:
raise exc.ArgumentError(
"Following value must be a positive "
"integer, zero, or None")
elif following > 0:
following = literal(following)
else:
following = RANGE_CURRENT
return preceding, following
@property
def func(self):
"""the element referred to by this :class:`.Over`
clause.
.. deprecated:: 1.1 the ``func`` element has been renamed to
``.element``. The two attributes are synonymous though
``.func`` is read-only.
"""
return self.element
@util.memoized_property
def type(self):
return self.element.type
def get_children(self, **kwargs):
return [c for c in
(self.element, self.partition_by, self.order_by)
if c is not None]
def _copy_internals(self, clone=_clone, **kw):
self.element = clone(self.element, **kw)
if self.partition_by is not None:
self.partition_by = clone(self.partition_by, **kw)
if self.order_by is not None:
self.order_by = clone(self.order_by, **kw)
@property
def _from_objects(self):
return list(itertools.chain(
*[c._from_objects for c in
(self.element, self.partition_by, self.order_by)
if c is not None]
))
class WithinGroup(ColumnElement):
"""Represent a WITHIN GROUP (ORDER BY) clause.
This is a special operator against so-called
so-called "ordered set aggregate" and "hypothetical
set aggregate" functions, including ``percentile_cont()``,
``rank()``, ``dense_rank()``, etc.
It's supported only by certain database backends, such as PostgreSQL,
Oracle and MS SQL Server.
The :class:`.WithinGroup` consturct extracts its type from the
method :meth:`.FunctionElement.within_group_type`. If this returns
``None``, the function's ``.type`` is used.
"""
__visit_name__ = 'withingroup'
order_by = None
def __init__(self, element, *order_by):
"""Produce a :class:`.WithinGroup` object against a function.
Used against so-called "ordered set aggregate" and "hypothetical
set aggregate" functions, including :class:`.percentile_cont`,
:class:`.rank`, :class:`.dense_rank`, etc.
:func:`~.expression.within_group` is usually called using
the :meth:`.FunctionElement.within_group` method, e.g.::
from sqlalchemy import within_group
stmt = select([
department.c.id,
func.percentile_cont(0.5).within_group(
department.c.salary.desc()
)
])
The above statement would produce SQL similar to
``SELECT department.id, percentile_cont(0.5)
WITHIN GROUP (ORDER BY department.salary DESC)``.
:param element: a :class:`.FunctionElement` construct, typically
generated by :data:`~.expression.func`.
:param \*order_by: one or more column elements that will be used
as the ORDER BY clause of the WITHIN GROUP construct.
.. versionadded:: 1.1
.. seealso::
:data:`.expression.func`
:func:`.expression.over`
"""
self.element = element
if order_by is not None:
self.order_by = ClauseList(
*util.to_list(order_by),
_literal_as_text=_literal_as_label_reference)
def over(self, partition_by=None, order_by=None):
"""Produce an OVER clause against this :class:`.WithinGroup`
construct.
This function has the same signature as that of
:meth:`.FunctionElement.over`.
"""
return Over(self, partition_by=partition_by, order_by=order_by)
@util.memoized_property
def type(self):
wgt = self.element.within_group_type(self)
if wgt is not None:
return wgt
else:
return self.element.type
def get_children(self, **kwargs):
return [c for c in
(self.func, self.order_by)
if c is not None]
def _copy_internals(self, clone=_clone, **kw):
self.element = clone(self.element, **kw)
if self.order_by is not None:
self.order_by = clone(self.order_by, **kw)
@property
def _from_objects(self):
return list(itertools.chain(
*[c._from_objects for c in
(self.element, self.order_by)
if c is not None]
))
class FunctionFilter(ColumnElement):
"""Represent a function FILTER clause.
This is a special operator against aggregate and window functions,
which controls which rows are passed to it.
It's supported only by certain database backends.
Invocation of :class:`.FunctionFilter` is via
:meth:`.FunctionElement.filter`::
func.count(1).filter(True)
.. versionadded:: 1.0.0
.. seealso::
:meth:`.FunctionElement.filter`
"""
__visit_name__ = 'funcfilter'
criterion = None
def __init__(self, func, *criterion):
"""Produce a :class:`.FunctionFilter` object against a function.
Used against aggregate and window functions,
for database backends that support the "FILTER" clause.
E.g.::
from sqlalchemy import funcfilter
funcfilter(func.count(1), MyClass.name == 'some name')
Would produce "COUNT(1) FILTER (WHERE myclass.name = 'some name')".
This function is also available from the :data:`~.expression.func`
construct itself via the :meth:`.FunctionElement.filter` method.
.. versionadded:: 1.0.0
.. seealso::
:meth:`.FunctionElement.filter`
"""
self.func = func
self.filter(*criterion)
def filter(self, *criterion):
"""Produce an additional FILTER against the function.
This method adds additional criteria to the initial criteria
set up by :meth:`.FunctionElement.filter`.
Multiple criteria are joined together at SQL render time
via ``AND``.
"""
for criterion in list(criterion):
criterion = _expression_literal_as_text(criterion)
if self.criterion is not None:
self.criterion = self.criterion & criterion
else:
self.criterion = criterion
return self
def over(self, partition_by=None, order_by=None):
"""Produce an OVER clause against this filtered function.
Used against aggregate or so-called "window" functions,
for database backends that support window functions.
The expression::
func.rank().filter(MyClass.y > 5).over(order_by='x')
is shorthand for::
from sqlalchemy import over, funcfilter
over(funcfilter(func.rank(), MyClass.y > 5), order_by='x')
See :func:`~.expression.over` for a full description.
"""
return Over(self, partition_by=partition_by, order_by=order_by)
@util.memoized_property
def type(self):
return self.func.type
def get_children(self, **kwargs):
return [c for c in
(self.func, self.criterion)
if c is not None]
def _copy_internals(self, clone=_clone, **kw):
self.func = clone(self.func, **kw)
if self.criterion is not None:
self.criterion = clone(self.criterion, **kw)
@property
def _from_objects(self):
return list(itertools.chain(
*[c._from_objects for c in (self.func, self.criterion)
if c is not None]
))
class Label(ColumnElement):
"""Represents a column label (AS).
Represent a label, as typically applied to any column-level
element using the ``AS`` sql keyword.
"""
__visit_name__ = 'label'
def __init__(self, name, element, type_=None):
"""Return a :class:`Label` object for the
given :class:`.ColumnElement`.
A label changes the name of an element in the columns clause of a
``SELECT`` statement, typically via the ``AS`` SQL keyword.
This functionality is more conveniently available via the
:meth:`.ColumnElement.label` method on :class:`.ColumnElement`.
:param name: label name
:param obj: a :class:`.ColumnElement`.
"""
if isinstance(element, Label):
self._resolve_label = element._label
while isinstance(element, Label):
element = element.element
if name:
self.name = name
self._resolve_label = self.name
else:
self.name = _anonymous_label(
'%%(%d %s)s' % (id(self), getattr(element, 'name', 'anon'))
)
self.key = self._label = self._key_label = self.name
self._element = element
self._type = type_
self._proxies = [element]
def __reduce__(self):
return self.__class__, (self.name, self._element, self._type)
@util.memoized_property
def _allow_label_resolve(self):
return self.element._allow_label_resolve
@property
def _order_by_label_element(self):
return self
@util.memoized_property
def type(self):
return type_api.to_instance(
self._type or getattr(self._element, 'type', None)
)
@util.memoized_property
def element(self):
return self._element.self_group(against=operators.as_)
def self_group(self, against=None):
sub_element = self._element.self_group(against=against)
if sub_element is not self._element:
return Label(self.name,
sub_element,
type_=self._type)
else:
return self
@property
def primary_key(self):
return self.element.primary_key
@property
def foreign_keys(self):
return self.element.foreign_keys
def get_children(self, **kwargs):
return self.element,
def _copy_internals(self, clone=_clone, anonymize_labels=False, **kw):
self._element = clone(self._element, **kw)
self.__dict__.pop('element', None)
self.__dict__.pop('_allow_label_resolve', None)
if anonymize_labels:
self.name = self._resolve_label = _anonymous_label(
'%%(%d %s)s' % (
id(self), getattr(self.element, 'name', 'anon'))
)
self.key = self._label = self._key_label = self.name
@property
def _from_objects(self):
return self.element._from_objects
def _make_proxy(self, selectable, name=None, **kw):
e = self.element._make_proxy(selectable,
name=name if name else self.name)
e._proxies.append(self)
if self._type is not None:
e.type = self._type
return e
class ColumnClause(Immutable, ColumnElement):
"""Represents a column expression from any textual string.
The :class:`.ColumnClause`, a lightweight analogue to the
:class:`.Column` class, is typically invoked using the
:func:`.column` function, as in::
from sqlalchemy import column
id, name = column("id"), column("name")
stmt = select([id, name]).select_from("user")
The above statement would produce SQL like::
SELECT id, name FROM user
:class:`.ColumnClause` is the immediate superclass of the schema-specific
:class:`.Column` object. While the :class:`.Column` class has all the
same capabilities as :class:`.ColumnClause`, the :class:`.ColumnClause`
class is usable by itself in those cases where behavioral requirements
are limited to simple SQL expression generation. The object has none of
the associations with schema-level metadata or with execution-time
behavior that :class:`.Column` does, so in that sense is a "lightweight"
version of :class:`.Column`.
Full details on :class:`.ColumnClause` usage is at :func:`.column`.
.. seealso::
:func:`.column`
:class:`.Column`
"""
__visit_name__ = 'column'
onupdate = default = server_default = server_onupdate = None
_memoized_property = util.group_expirable_memoized_property()
def __init__(self, text, type_=None, is_literal=False, _selectable=None):
"""Produce a :class:`.ColumnClause` object.
The :class:`.ColumnClause` is a lightweight analogue to the
:class:`.Column` class. The :func:`.column` function can
be invoked with just a name alone, as in::
from sqlalchemy import column
id, name = column("id"), column("name")
stmt = select([id, name]).select_from("user")
The above statement would produce SQL like::
SELECT id, name FROM user
Once constructed, :func:`.column` may be used like any other SQL
expression element such as within :func:`.select` constructs::
from sqlalchemy.sql import column
id, name = column("id"), column("name")
stmt = select([id, name]).select_from("user")
The text handled by :func:`.column` is assumed to be handled
like the name of a database column; if the string contains mixed case,
special characters, or matches a known reserved word on the target
backend, the column expression will render using the quoting
behavior determined by the backend. To produce a textual SQL
expression that is rendered exactly without any quoting,
use :func:`.literal_column` instead, or pass ``True`` as the
value of :paramref:`.column.is_literal`. Additionally, full SQL
statements are best handled using the :func:`.text` construct.
:func:`.column` can be used in a table-like
fashion by combining it with the :func:`.table` function
(which is the lightweight analogue to :class:`.Table`) to produce
a working table construct with minimal boilerplate::
from sqlalchemy import table, column, select
user = table("user",
column("id"),
column("name"),
column("description"),
)
stmt = select([user.c.description]).where(user.c.name == 'wendy')
A :func:`.column` / :func:`.table` construct like that illustrated
above can be created in an
ad-hoc fashion and is not associated with any
:class:`.schema.MetaData`, DDL, or events, unlike its
:class:`.Table` counterpart.
.. versionchanged:: 1.0.0 :func:`.expression.column` can now
be imported from the plain ``sqlalchemy`` namespace like any
other SQL element.
:param text: the text of the element.
:param type: :class:`.types.TypeEngine` object which can associate
this :class:`.ColumnClause` with a type.
:param is_literal: if True, the :class:`.ColumnClause` is assumed to
be an exact expression that will be delivered to the output with no
quoting rules applied regardless of case sensitive settings. the
:func:`.literal_column()` function essentially invokes
:func:`.column` while passing ``is_literal=True``.
.. seealso::
:class:`.Column`
:func:`.literal_column`
:func:`.table`
:func:`.text`
:ref:`sqlexpression_literal_column`
"""
self.key = self.name = text
self.table = _selectable
self.type = type_api.to_instance(type_)
self.is_literal = is_literal
def _compare_name_for_result(self, other):
if self.is_literal or \
self.table is None or self.table._textual or \
not hasattr(other, 'proxy_set') or (
isinstance(other, ColumnClause) and
(other.is_literal or
other.table is None or
other.table._textual)
):
return (hasattr(other, 'name') and self.name == other.name) or \
(hasattr(other, '_label') and self._label == other._label)
else:
return other.proxy_set.intersection(self.proxy_set)
def _get_table(self):
return self.__dict__['table']
def _set_table(self, table):
self._memoized_property.expire_instance(self)
self.__dict__['table'] = table
table = property(_get_table, _set_table)
@_memoized_property
def _from_objects(self):
t = self.table
if t is not None:
return [t]
else:
return []
@util.memoized_property
def description(self):
if util.py3k:
return self.name
else:
return self.name.encode('ascii', 'backslashreplace')
@_memoized_property
def _key_label(self):
if self.key != self.name:
return self._gen_label(self.key)
else:
return self._label
@_memoized_property
def _label(self):
return self._gen_label(self.name)
@_memoized_property
def _render_label_in_columns_clause(self):
return self.table is not None
def _gen_label(self, name):
t = self.table
if self.is_literal:
return None
elif t is not None and t.named_with_column:
if getattr(t, 'schema', None):
label = t.schema.replace('.', '_') + "_" + \
t.name + "_" + name
else:
label = t.name + "_" + name
# propagate name quoting rules for labels.
if getattr(name, "quote", None) is not None:
if isinstance(label, quoted_name):
label.quote = name.quote
else:
label = quoted_name(label, name.quote)
elif getattr(t.name, "quote", None) is not None:
# can't get this situation to occur, so let's
# assert false on it for now
assert not isinstance(label, quoted_name)
label = quoted_name(label, t.name.quote)
# ensure the label name doesn't conflict with that
# of an existing column
if label in t.c:
_label = label
counter = 1
while _label in t.c:
_label = label + "_" + str(counter)
counter += 1
label = _label
return _as_truncated(label)
else:
return name
def _bind_param(self, operator, obj, type_=None):
return BindParameter(self.key, obj,
_compared_to_operator=operator,
_compared_to_type=self.type,
type_=type_,
unique=True)
def _make_proxy(self, selectable, name=None, attach=True,
name_is_truncatable=False, **kw):
# propagate the "is_literal" flag only if we are keeping our name,
# otherwise its considered to be a label
is_literal = self.is_literal and (name is None or name == self.name)
c = self._constructor(
_as_truncated(name or self.name) if
name_is_truncatable else
(name or self.name),
type_=self.type,
_selectable=selectable,
is_literal=is_literal
)
if name is None:
c.key = self.key
c._proxies = [self]
if selectable._is_clone_of is not None:
c._is_clone_of = \
selectable._is_clone_of.columns.get(c.key)
if attach:
selectable._columns[c.key] = c
return c
class _IdentifiedClause(Executable, ClauseElement):
__visit_name__ = 'identified'
_execution_options = \
Executable._execution_options.union({'autocommit': False})
def __init__(self, ident):
self.ident = ident
class SavepointClause(_IdentifiedClause):
__visit_name__ = 'savepoint'
class RollbackToSavepointClause(_IdentifiedClause):
__visit_name__ = 'rollback_to_savepoint'
class ReleaseSavepointClause(_IdentifiedClause):
__visit_name__ = 'release_savepoint'
class quoted_name(util.MemoizedSlots, util.text_type):
"""Represent a SQL identifier combined with quoting preferences.
:class:`.quoted_name` is a Python unicode/str subclass which
represents a particular identifier name along with a
``quote`` flag. This ``quote`` flag, when set to
``True`` or ``False``, overrides automatic quoting behavior
for this identifier in order to either unconditionally quote
or to not quote the name. If left at its default of ``None``,
quoting behavior is applied to the identifier on a per-backend basis
based on an examination of the token itself.
A :class:`.quoted_name` object with ``quote=True`` is also
prevented from being modified in the case of a so-called
"name normalize" option. Certain database backends, such as
Oracle, Firebird, and DB2 "normalize" case-insensitive names
as uppercase. The SQLAlchemy dialects for these backends
convert from SQLAlchemy's lower-case-means-insensitive convention
to the upper-case-means-insensitive conventions of those backends.
The ``quote=True`` flag here will prevent this conversion from occurring
to support an identifier that's quoted as all lower case against
such a backend.
The :class:`.quoted_name` object is normally created automatically
when specifying the name for key schema constructs such as
:class:`.Table`, :class:`.Column`, and others. The class can also be
passed explicitly as the name to any function that receives a name which
can be quoted. Such as to use the :meth:`.Engine.has_table` method with
an unconditionally quoted name::
from sqlaclchemy import create_engine
from sqlalchemy.sql.elements import quoted_name
engine = create_engine("oracle+cx_oracle://some_dsn")
engine.has_table(quoted_name("some_table", True))
The above logic will run the "has table" logic against the Oracle backend,
passing the name exactly as ``"some_table"`` without converting to
upper case.
.. versionadded:: 0.9.0
"""
__slots__ = 'quote', 'lower', 'upper'
def __new__(cls, value, quote):
if value is None:
return None
# experimental - don't bother with quoted_name
# if quote flag is None. doesn't seem to make any dent
# in performance however
# elif not sprcls and quote is None:
# return value
elif isinstance(value, cls) and (
quote is None or value.quote == quote
):
return value
self = super(quoted_name, cls).__new__(cls, value)
self.quote = quote
return self
def __reduce__(self):
return quoted_name, (util.text_type(self), self.quote)
def _memoized_method_lower(self):
if self.quote:
return self
else:
return util.text_type(self).lower()
def _memoized_method_upper(self):
if self.quote:
return self
else:
return util.text_type(self).upper()
def __repr__(self):
backslashed = self.encode('ascii', 'backslashreplace')
if not util.py2k:
backslashed = backslashed.decode('ascii')
return "'%s'" % backslashed
class _truncated_label(quoted_name):
"""A unicode subclass used to identify symbolic "
"names that may require truncation."""
__slots__ = ()
def __new__(cls, value, quote=None):
quote = getattr(value, "quote", quote)
# return super(_truncated_label, cls).__new__(cls, value, quote, True)
return super(_truncated_label, cls).__new__(cls, value, quote)
def __reduce__(self):
return self.__class__, (util.text_type(self), self.quote)
def apply_map(self, map_):
return self
class conv(_truncated_label):
"""Mark a string indicating that a name has already been converted
by a naming convention.
This is a string subclass that indicates a name that should not be
subject to any further naming conventions.
E.g. when we create a :class:`.Constraint` using a naming convention
as follows::
m = MetaData(naming_convention={
"ck": "ck_%(table_name)s_%(constraint_name)s"
})
t = Table('t', m, Column('x', Integer),
CheckConstraint('x > 5', name='x5'))
The name of the above constraint will be rendered as ``"ck_t_x5"``.
That is, the existing name ``x5`` is used in the naming convention as the
``constraint_name`` token.
In some situations, such as in migration scripts, we may be rendering
the above :class:`.CheckConstraint` with a name that's already been
converted. In order to make sure the name isn't double-modified, the
new name is applied using the :func:`.schema.conv` marker. We can
use this explicitly as follows::
m = MetaData(naming_convention={
"ck": "ck_%(table_name)s_%(constraint_name)s"
})
t = Table('t', m, Column('x', Integer),
CheckConstraint('x > 5', name=conv('ck_t_x5')))
Where above, the :func:`.schema.conv` marker indicates that the constraint
name here is final, and the name will render as ``"ck_t_x5"`` and not
``"ck_t_ck_t_x5"``
.. versionadded:: 0.9.4
.. seealso::
:ref:`constraint_naming_conventions`
"""
__slots__ = ()
class _defer_name(_truncated_label):
"""mark a name as 'deferred' for the purposes of automated name
generation.
"""
__slots__ = ()
def __new__(cls, value):
if value is None:
return _NONE_NAME
elif isinstance(value, conv):
return value
else:
return super(_defer_name, cls).__new__(cls, value)
def __reduce__(self):
return self.__class__, (util.text_type(self), )
class _defer_none_name(_defer_name):
"""indicate a 'deferred' name that was ultimately the value None."""
__slots__ = ()
_NONE_NAME = _defer_none_name("_unnamed_")
# for backwards compatibility in case
# someone is re-implementing the
# _truncated_identifier() sequence in a custom
# compiler
_generated_label = _truncated_label
class _anonymous_label(_truncated_label):
"""A unicode subclass used to identify anonymously
generated names."""
__slots__ = ()
def __add__(self, other):
return _anonymous_label(
quoted_name(
util.text_type.__add__(self, util.text_type(other)),
self.quote)
)
def __radd__(self, other):
return _anonymous_label(
quoted_name(
util.text_type.__add__(util.text_type(other), self),
self.quote)
)
def apply_map(self, map_):
if self.quote is not None:
# preserve quoting only if necessary
return quoted_name(self % map_, self.quote)
else:
# else skip the constructor call
return self % map_
def _as_truncated(value):
"""coerce the given value to :class:`._truncated_label`.
Existing :class:`._truncated_label` and
:class:`._anonymous_label` objects are passed
unchanged.
"""
if isinstance(value, _truncated_label):
return value
else:
return _truncated_label(value)
def _string_or_unprintable(element):
if isinstance(element, util.string_types):
return element
else:
try:
return str(element)
except Exception:
return "unprintable element %r" % element
def _expand_cloned(elements):
"""expand the given set of ClauseElements to be the set of all 'cloned'
predecessors.
"""
return itertools.chain(*[x._cloned_set for x in elements])
def _select_iterables(elements):
"""expand tables into individual columns in the
given list of column expressions.
"""
return itertools.chain(*[c._select_iterable for c in elements])
def _cloned_intersection(a, b):
"""return the intersection of sets a and b, counting
any overlap between 'cloned' predecessors.
The returned set is in terms of the entities present within 'a'.
"""
all_overlap = set(_expand_cloned(a)).intersection(_expand_cloned(b))
return set(elem for elem in a
if all_overlap.intersection(elem._cloned_set))
def _cloned_difference(a, b):
all_overlap = set(_expand_cloned(a)).intersection(_expand_cloned(b))
return set(elem for elem in a
if not all_overlap.intersection(elem._cloned_set))
@util.dependencies("sqlalchemy.sql.functions")
def _labeled(functions, element):
if not hasattr(element, 'name') or \
isinstance(element, functions.FunctionElement):
return element.label(None)
else:
return element
def _is_column(col):
"""True if ``col`` is an instance of :class:`.ColumnElement`."""
return isinstance(col, ColumnElement)
def _find_columns(clause):
"""locate Column objects within the given expression."""
cols = util.column_set()
traverse(clause, {}, {'column': cols.add})
return cols
# there is some inconsistency here between the usage of
# inspect() vs. checking for Visitable and __clause_element__.
# Ideally all functions here would derive from inspect(),
# however the inspect() versions add significant callcount
# overhead for critical functions like _interpret_as_column_or_from().
# Generally, the column-based functions are more performance critical
# and are fine just checking for __clause_element__(). It is only
# _interpret_as_from() where we'd like to be able to receive ORM entities
# that have no defined namespace, hence inspect() is needed there.
def _column_as_key(element):
if isinstance(element, util.string_types):
return element
if hasattr(element, '__clause_element__'):
element = element.__clause_element__()
try:
return element.key
except AttributeError:
return None
def _clause_element_as_expr(element):
if hasattr(element, '__clause_element__'):
return element.__clause_element__()
else:
return element
def _literal_as_label_reference(element):
if isinstance(element, util.string_types):
return _textual_label_reference(element)
elif hasattr(element, '__clause_element__'):
element = element.__clause_element__()
return _literal_as_text(element)
def _literal_and_labels_as_label_reference(element):
if isinstance(element, util.string_types):
return _textual_label_reference(element)
elif hasattr(element, '__clause_element__'):
element = element.__clause_element__()
if isinstance(element, ColumnElement) and \
element._order_by_label_element is not None:
return _label_reference(element)
else:
return _literal_as_text(element)
def _expression_literal_as_text(element):
return _literal_as_text(element, warn=True)
def _literal_as_text(element, warn=False):
if isinstance(element, Visitable):
return element
elif hasattr(element, '__clause_element__'):
return element.__clause_element__()
elif isinstance(element, util.string_types):
if warn:
util.warn_limited(
"Textual SQL expression %(expr)r should be "
"explicitly declared as text(%(expr)r)",
{"expr": util.ellipses_string(element)})
return TextClause(util.text_type(element))
elif isinstance(element, (util.NoneType, bool)):
return _const_expr(element)
else:
raise exc.ArgumentError(
"SQL expression object or string expected, got object of type %r "
"instead" % type(element)
)
def _no_literals(element):
if hasattr(element, '__clause_element__'):
return element.__clause_element__()
elif not isinstance(element, Visitable):
raise exc.ArgumentError("Ambiguous literal: %r. Use the 'text()' "
"function to indicate a SQL expression "
"literal, or 'literal()' to indicate a "
"bound value." % element)
else:
return element
def _is_literal(element):
return not isinstance(element, Visitable) and \
not hasattr(element, '__clause_element__')
def _only_column_elements_or_none(element, name):
if element is None:
return None
else:
return _only_column_elements(element, name)
def _only_column_elements(element, name):
if hasattr(element, '__clause_element__'):
element = element.__clause_element__()
if not isinstance(element, ColumnElement):
raise exc.ArgumentError(
"Column-based expression object expected for argument "
"'%s'; got: '%s', type %s" % (name, element, type(element)))
return element
def _literal_as_binds(element, name=None, type_=None):
if hasattr(element, '__clause_element__'):
return element.__clause_element__()
elif not isinstance(element, Visitable):
if element is None:
return Null()
else:
return BindParameter(name, element, type_=type_, unique=True)
else:
return element
_guess_straight_column = re.compile(r'^\w\S*$', re.I)
def _interpret_as_column_or_from(element):
if isinstance(element, Visitable):
return element
elif hasattr(element, '__clause_element__'):
return element.__clause_element__()
insp = inspection.inspect(element, raiseerr=False)
if insp is None:
if isinstance(element, (util.NoneType, bool)):
return _const_expr(element)
elif hasattr(insp, "selectable"):
return insp.selectable
# be forgiving as this is an extremely common
# and known expression
if element == "*":
guess_is_literal = True
elif isinstance(element, (numbers.Number)):
return ColumnClause(str(element), is_literal=True)
else:
element = str(element)
# give into temptation, as this fact we are guessing about
# is not one we've previously ever needed our users tell us;
# but let them know we are not happy about it
guess_is_literal = not _guess_straight_column.match(element)
util.warn_limited(
"Textual column expression %(column)r should be "
"explicitly declared with text(%(column)r), "
"or use %(literal_column)s(%(column)r) "
"for more specificity",
{
"column": util.ellipses_string(element),
"literal_column": "literal_column"
if guess_is_literal else "column"
})
return ColumnClause(
element,
is_literal=guess_is_literal)
def _const_expr(element):
if isinstance(element, (Null, False_, True_)):
return element
elif element is None:
return Null()
elif element is False:
return False_()
elif element is True:
return True_()
else:
raise exc.ArgumentError(
"Expected None, False, or True"
)
def _type_from_args(args):
for a in args:
if not a.type._isnull:
return a.type
else:
return type_api.NULLTYPE
def _corresponding_column_or_error(fromclause, column,
require_embedded=False):
c = fromclause.corresponding_column(column,
require_embedded=require_embedded)
if c is None:
raise exc.InvalidRequestError(
"Given column '%s', attached to table '%s', "
"failed to locate a corresponding column from table '%s'"
%
(column,
getattr(column, 'table', None),
fromclause.description)
)
return c
class AnnotatedColumnElement(Annotated):
def __init__(self, element, values):
Annotated.__init__(self, element, values)
ColumnElement.comparator._reset(self)
for attr in ('name', 'key', 'table'):
if self.__dict__.get(attr, False) is None:
self.__dict__.pop(attr)
def _with_annotations(self, values):
clone = super(AnnotatedColumnElement, self)._with_annotations(values)
ColumnElement.comparator._reset(clone)
return clone
@util.memoized_property
def name(self):
"""pull 'name' from parent, if not present"""
return self._Annotated__element.name
@util.memoized_property
def table(self):
"""pull 'table' from parent, if not present"""
return self._Annotated__element.table
@util.memoized_property
def key(self):
"""pull 'key' from parent, if not present"""
return self._Annotated__element.key
@util.memoized_property
def info(self):
return self._Annotated__element.info
@util.memoized_property
def anon_label(self):
return self._Annotated__element.anon_label
| MarkWh1te/xueqiu_predict | python3_env/lib/python3.4/site-packages/sqlalchemy/sql/elements.py | Python | mit | 148,818 | [
"VisIt"
] | 28a993ef9fabc1b694b0dff93b6c6131b8db253186a43589e3899d3aa19091e7 |
from command import Command
import control
class Dots(Command):
OFF = '0'
RED = '1'
GREEN = '2'
AMBER = '3'
colors = {' ': OFF, 'r': RED, 'g': GREEN, 'a': AMBER, 'o': AMBER}
def __init__(self, dots, label='A', locked=True, width=None, height=None):
Command.__init__(self)
self.dots = dots
self.label = label
self.locked = control.LOCKED if locked else control.UNLOCKED
if type(dots) == str:
dots = dots.split('\n')
self.height = height if height else len(dots)
if width:
self.width = width
else:
self.width = 0
for row in dots:
self.width = max(self.width, len(row))
data = []
for row in dots:
for dot in row:
if dot in Dots.colors:
dot = Dots.colors[dot]
data.append(str(dot))
while len(data) < self.width:
data.append(Dots.OFF)
data.append(control.NEW_LINE)
data = ''.join(data)
self._command = "%s%s%s%s%s" % (control.WRITE_SMALL_DOTS, label,
"%02X" % self.height,
"%02X" % self.width, data)
@staticmethod
def call(label):
return control.CALL_SMALL_DOTS + label
| ctmyers/sign | protocol/dots.py | Python | mit | 1,374 | [
"Amber"
] | e12a546c35af3f0ebd6bc9b5e10055bb0c336c34f88906649207e6411d31af84 |
# Licensed under GPL version 3 - see LICENSE.rst
import numpy as np
import pytest
from scipy.stats import normaltest
from astropy.table import Table, QTable
from astropy.coordinates import SkyCoord
import astropy.units as u
from ...source import (SymbolFSource, Source, poisson_process,
PointSource,
DiskSource, SphericalDiskSource, GaussSource)
from ...optics import RectangleAperture
from ..source import SourceSpecificationError
def test_photons_header():
'''All generated photons should have some common keywords.
Just test that some of the keywords are there. It's unlikely
that I need a full list here.
'''
s = SymbolFSource(coords=SkyCoord(-123., -43., unit=u.deg), size=1.*u.deg)
photons = s.generate_photons(5. * u.s)
for n in ['EXPOSURE', 'CREATOR', 'MARXSVER', 'SIMTIME', 'SIMUSER']:
assert n in photons.meta
def test_energy_input_default():
'''For convenience and testing, defaults for time, energy and pol are set.'''
s = Source()
photons = s.generate_photons(5. * u.s)
assert len(photons) == 5
assert np.all(photons['energy'] == 1.)
assert len(set(photons['polangle'])) == 5 # all pol angles are different
def test_flux_input():
'''Options: contant rate or function'''
# 1. constant rate
s = Source(flux=5 / u.hour / u.cm**2)
photons = s.generate_photons(5. * u.h)
assert len(photons) == 25
delta_t = np.diff(photons['time'])
assert np.allclose(delta_t, delta_t[0]) # constante rate
# 2. function
def f(t, geomarea):
return np.logspace(1, np.log10(t.to(u.s).value)) * u.s
s = Source(flux=f)
photons = s.generate_photons(100 * u.s)
assert np.all(photons['time'] == np.logspace(1, 2))
# 3. anything else
s = Source(flux=ValueError())
with pytest.raises(SourceSpecificationError) as e:
photons = s.generate_photons(5 * u.s)
assert '`flux` must be' in str(e.value)
def test_energy_input():
'''Many different options ...'''
# 1. contant energy
s = PointSource(coords=SkyCoord("1h12m43.2s +1d12m43s"), energy=2. * u.keV)
photons = s.generate_photons(5 * u.minute)
assert np.all(photons['energy'] == 2.)
# 2. function
def f(t):
return (t / u.s * u.keV).decompose()
s = Source(energy=f)
photons = s.generate_photons(5 * u.s)
assert np.all(photons['energy'] == photons['time'])
# bad function
s = Source(energy=lambda x: np.array([np.sum(x.value)]) * u.erg)
with pytest.raises(SourceSpecificationError) as e:
photons = s.generate_photons(5 * u.s)
assert 'an array of same size as' in str(e.value)
# 3. array, table, recarray, dict, ... just try some of of the opion with keys
# spectrum with distinct lines
engrid = [0.5, 1., 2., 3.] * u.keV
# first entry (456) will be ignored
fluxgrid = [456., 1., 0., 2.] / u.s / u.cm**2 / u.keV
s = Source(energy=QTable({'energy': engrid, 'fluxdensity': fluxgrid}))
photons = s.generate_photons(1000 * u.s)
en = photons['energy']
ind0510 = (en >= 0.5) & (en <=1.0)
ind2030 = (en >= 2.) & (en <=3.0)
assert (ind0510.sum() + ind2030.sum()) == len(photons)
assert ind0510.sum() < ind2030.sum()
# 4. anything else
s = Source(energy=object())
with pytest.raises(SourceSpecificationError) as e:
photons = s.generate_photons(5 * u.s)
assert '`energy` must be' in str(e.value)
def test_polarization_input():
'''Many differnet options ...'''
# 1. 100 % polarized flux
s = Source(polarization=90 * u.deg)
photons = s.generate_photons(5 * u.s)
assert np.all(photons['polangle'] == np.pi / 2)
# 2. function
def f(t, en):
return (t / u.s * en / u.keV * u.rad).decompose()
s = Source(polarization=f, energy=2. * u.keV)
photons = s.generate_photons(5 * u.s)
assert np.all(photons['polangle'] == photons['time'] * photons['energy'])
# bad function
s = Source(polarization=lambda x, y: [np.dot(x.value, y.value)] * u.deg)
with pytest.raises(SourceSpecificationError) as e:
photons = s.generate_photons(5 * u.s)
assert 'an array of same size as' in str(e.value)
# 3.table
# spectrum with distinct lines
polgrid = [0.5, 1., 2., 3.] * u.rad
probgrid = [456., 1., 0., 2.] / u.rad # first entry (456) will be ignored
s = Source(polarization=QTable({'angle': polgrid, 'probabilitydensity': probgrid}))
photons = s.generate_photons(1000 * u.s)
pol = photons['polangle']
ind0510 = (pol >= 0.5) & (pol <=1.0)
ind2030 = (pol >= 2.) & (pol <=3.0)
assert (ind0510.sum() + ind2030.sum()) == len(photons)
assert ind0510.sum() < ind2030.sum()
# 4. None (unpolarized source)
s = Source(polarization=None)
photons = s.generate_photons(5 * u.s)
assert len(set(photons['polangle'])) == len(photons) # all different
# 5. anything else
s = Source(polarization=object())
with pytest.raises(SourceSpecificationError) as e:
photons = s.generate_photons(5 * u.s)
assert '`polarization` must be' in str(e.value)
def test_poisson_process():
'''Do some consistency checks for the Poisson process.
It turns out that this is hard to test properly, without reimplemention the
scipy version.
But at least we can test consistency of the unit handling.
'''
p = poisson_process(20. / (u.s * u.cm**2))
times = p(100. * u.s, 1. * u.cm**2).value
assert (len(times) > 1500) and (len(times) < 2500)
assert (times[-1] > 99.) and (times[-1] < 100.)
times = p(.1 * u.ks, 1000. * u.mm**2).value
assert (len(times) > 18000) and (len(times) < 22000)
assert (times[-1] > 99.) and (times[-1] < 100.)
def test_poisson_input():
'''Input must be a scalar.'''
with pytest.raises(ValueError) as e:
p = poisson_process(np.arange(5) / u.s / u.cm**2)
assert 'must be scalar' in str(e.value)
def test_Aeff():
'''Check that a higher effective area leads to more counts.'''
a = RectangleAperture(zoom=2)
s = Source(flux=50 / u.s / u.cm**2, geomarea=a.area)
photons = s.generate_photons(5. * u.s)
assert len(photons) == 40
def test_disk_radius():
pos = SkyCoord(12. * u.degree, -23.*u.degree)
s = DiskSource(coords=pos, a_inner=50.* u.arcsec,
a_outer=10. * u.arcmin)
photons = s.generate_photons(1e4 * u.s)
d = pos.separation(SkyCoord(photons['ra'], photons['dec'], unit='deg'))
assert np.max(d.arcmin <= 10.)
assert np.min(d.arcmin >= 0.8)
def test_sphericaldisk_radius():
pos = SkyCoord(12. * u.degree, -23.*u.degree)
s = SphericalDiskSource(coords=pos, a_inner=50.* u.arcsec,
a_outer=10. * u.arcmin)
photons = s.generate_photons(1e4 * u.s)
d = pos.separation(SkyCoord(photons['ra'], photons['dec'], unit='deg'))
assert np.max(d.arcmin <= 10.)
assert np.min(d.arcmin >= 0.8)
@pytest.mark.parametrize("diskclass,diskpar,n_expected",
[(DiskSource, {'a_outer': 30. * u.arcmin}, 2777),
(SphericalDiskSource, {'a_outer': 30. * u.arcmin}, 2777),
(GaussSource, {'sigma': 1.5 * u.deg}, 150)])
def test_disk_distribution(diskclass, diskpar, n_expected):
'''This is a separate test from test_disk_radius, because it's a simpler
to write if we don't have to worry about the inner hole.
For the test itself: The results should be poisson distributed (or, for large
numbers this will be almost normal).
That makes testing it a little awkard in a short run time, thus the limits are
fairly loose.
This test is run for several extended sources, incl Gaussian. Strictly speaking
it should fail for a Gaussian distribution, but if the sigma is large enough it
will pass a loose test (and still fail if things to catastrophically wrong,
e.g. some test circles are outside the source).
'''
s = diskclass(coords=SkyCoord(213., -10., unit=u.deg), **diskpar)
photons = s.generate_photons(1e5 * u.s)
n = np.empty(50)
for i in range(len(n)):
circ = SkyCoord((213. + np.random.uniform(-0.1, .1)) * u.degree,
(- 10. + np.random.uniform(-0.1, .1)) * u.degree)
d = circ.separation(SkyCoord(photons['ra'], photons['dec'], unit='deg'))
n[i] = (d < 5. * u.arcmin).sum()
# s, p = normaltest(n)
# assert a p value here that is soo small that it's never going to be hit
# by chance.
# assert p > .05
# better: Test number of expected photons matches
# Allow large variation so that this is not triggered by chance
assert np.isclose(n.mean(), n_expected, rtol=.2)
def test_spectral_density():
'''Two tests that check that spectral density units are treated correctly'''
spec1 = QTable({'energy': [1, 2, 3] * u.keV,
'fluxdensity': [1, 1, 1.] / u.s / u.cm**2 / u.keV})
spec2 = QTable({'energy': [1, 1.5, 2, 2.5, 3] * u.keV,
'fluxdensity': [1, 1, 1, 1., 1.] / u.s / u.cm**2 / u.keV})
spec3 = QTable({'energy': [1, 2, 3] * u.keV,
'fluxdensity': [1, 1, 2.] / u.s / u.cm**2 / u.keV})
spec4 = QTable({'energy': [1, 1.5, 2, 2.5, 3] * u.keV,
'fluxdensity': [1, 1, 1, 2., 2.] / u.s / u.cm**2 / u.keV})
for sp1, sp2 in zip([spec1, spec3], [spec2, spec4]):
s1 = PointSource(coords=SkyCoord(-123., -43., unit=u.deg),
energy=sp1, flux=1/u.s/u.cm**2)
s2 = PointSource(coords=SkyCoord(-123., -43., unit=u.deg),
energy=sp2, flux=1/u.s/u.cm**2)
photons = [s.generate_photons(1e5 * u.s) for s in [s1, s2]]
hists = [np.histogram(p['energy'], bins=np.arange(1, 3, .1))
for p in photons]
assert np.allclose(hists[0][0], hists[1][0], rtol=0.14)
| Chandra-MARX/marxs | marxs/source/tests/test_source.py | Python | gpl-3.0 | 9,880 | [
"Gaussian"
] | 906fa88199a8c0f95fa3a4787f4ee24cc2c6618cc005c8e2b6956dfeb29ba659 |
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import io
import numpy as np
import pandas as pd
import numpy.testing as npt
from unittest import TestCase, main
from skbio import Protein, DNA, RNA, Sequence
from skbio.util import get_data_path
from skbio.io import GenBankFormatError
from skbio.io.format.genbank import (
_genbank_sniffer,
_genbank_to_generator, _genbank_to_sequence,
_genbank_to_dna, _genbank_to_rna, _genbank_to_protein,
_parse_locus, _parse_reference,
_parse_loc_str, _parse_section_default,
_generator_to_genbank, _sequence_to_genbank,
_protein_to_genbank, _rna_to_genbank, _dna_to_genbank,
_serialize_locus)
class SnifferTests(TestCase):
def setUp(self):
self.positive_fps = list(map(get_data_path, [
'genbank_5_blanks_start_of_file',
'genbank_single_record_upper',
'genbank_single_record_lower',
'genbank_multi_records']))
self.negative_fps = list(map(get_data_path, [
'empty',
'whitespace_only',
'genbank_6_blanks_start_of_file',
'genbank_w_beginning_whitespace',
'genbank_missing_locus_name']))
def test_positives(self):
for fp in self.positive_fps:
self.assertEqual(_genbank_sniffer(fp), (True, {}))
def test_negatives(self):
for fp in self.negative_fps:
self.assertEqual(_genbank_sniffer(fp), (False, {}))
class GenBankIOTests(TestCase):
# parent class to set up test data for the child class
def setUp(self):
# test locus line
self.locus = (
(['LOCUS NC_005816 9609 bp '
'DNA circular CON 07-FEB-2015'],
{'division': 'CON', 'mol_type': 'DNA', 'shape': 'circular',
'locus_name': 'NC_005816', 'date': '07-FEB-2015',
'unit': 'bp', 'size': 9609}),
(['LOCUS SCU49845 5028 bp '
'DNA PLN 21-JUN-1999'],
{'division': 'PLN', 'mol_type': 'DNA', 'shape': None,
'locus_name': 'SCU49845', 'date': '21-JUN-1999',
'unit': 'bp', 'size': 5028}),
(['LOCUS NP_001832 360 aa '
'linear PRI 18-DEC-2001'],
{'division': 'PRI', 'mol_type': None, 'shape': 'linear',
'locus_name': 'NP_001832', 'date': '18-DEC-2001',
'unit': 'aa', 'size': 360}))
# test single record and read uppercase sequence
self.single_upper_fp = get_data_path('genbank_single_record_upper')
self.single_lower_fp = get_data_path('genbank_single_record_lower')
self.single = (
'GSREILDFK',
{'LOCUS': {'date': '23-SEP-1994',
'division': 'BCT',
'locus_name': 'AAB29917',
'mol_type': None,
'shape': 'linear',
'size': 9,
'unit': 'aa'}},
None,
Protein)
self.single_rna_fp = get_data_path('genbank_single_record')
self.single_rna = (
'gugaaacaaagcacuauugcacuggcugucuuaccguuacuguuuaccccugugacaaaagcc',
{'ACCESSION': 'M14399',
'COMMENT': 'Original source text: E.coli, cDNA to mRNA.',
'DEFINITION': "alkaline phosphatase signal mRNA, 5' end.",
'FEATURES': [{'db_xref': '"taxon:562"',
'index_': 0,
'left_partial_': False,
'location': '1..63',
'mol_type': '"mRNA"',
'organism': '"Escherichia coli"',
'rc_': False,
'right_partial_': False,
'type_': 'source'},
{'codon_start': '1',
'db_xref': [
'"GI:145230"', '"taxon:562"', '"taxon:561"'],
'index_': 1,
'left_partial_': False,
'location': '1..>63',
'note': '"alkaline phosphatase signal peptide"',
'protein_id': '"AAA23431.1"',
'rc_': False,
'right_partial_': True,
'transl_table': '11',
'translation': '"MKQSTIALAVLPLLFTPVTKA"',
'type_': 'CDS'}],
'KEYWORDS': 'alkaline phosphatase; signal peptide.',
'LOCUS': {'date': '26-APR-1993',
'division': 'BCT',
'locus_name': 'ECOALKP',
'mol_type': 'mRNA',
'shape': 'linear',
'size': 63,
'unit': 'bp'},
'SOURCE': {'ORGANISM': 'Escherichia coli',
'taxonomy': 'Bacteria; Proteobacteria; '
'Gammaproteobacteria; Enterobacteriales; '
'Enterobacteriaceae; Escherichia.'},
'VERSION': 'M14399.1 GI:145229'},
pd.DataFrame({0: np.ones(63, dtype=bool),
1: np.ones(63, dtype=bool)}),
RNA)
# test:
# 1. multiple records in one file
# 2. lowercase sequence
# 3. DNA, RNA, Protein type
# 4. variation of formats
self.multi_fp = get_data_path('genbank_multi_records')
self.multi = (
('gsreildfk',
{'ACCESSION': 'AAB29917',
'COMMENT': 'Method: direct peptide sequencing.',
'DBSOURCE': 'accession AAB29917.1',
'DEFINITION': 'L-carnitine amidase {N-terminal}',
'FEATURES': [{'index_': 0,
'left_partial_': False,
'location': '1..9',
'organism': '"Bacteria"',
'rc_': False,
'right_partial_': False,
'type_': 'source'},
{'index_': 1,
'left_partial_': False,
'location': '1..>9',
'product': '"L-carnitine amidase"',
'rc_': False,
'right_partial_': True,
'type_': 'Protein'}],
'KEYWORDS': '.',
'LOCUS': {'date': '23-SEP-1994',
'division': 'BCT',
'locus_name': 'AAB29917',
'mol_type': None,
'shape': 'linear',
'size': 9,
'unit': 'aa'},
'REFERENCE': [{'AUTHORS': 'Joeres,U. and Kula,M.R.',
'JOURNAL': 'AMB 40 (5), 606-610 (1994)',
'PUBMED': '7764422',
'REFERENCE': '1 (residues 1 to 9)',
'REMARK': 'from the original journal article.',
'TITLE': 'a microbial L-carnitine amidase'},
{'AUTHORS': 'Joeres,U. and Kula,M.R.',
'JOURNAL': 'AMB 40 (5), 606-610 (1994)',
'PUBMED': '7764422',
'REFERENCE': '1 (residues 1 to 9)',
'TITLE': 'a microbial L-carnitine amidase'}],
'SOURCE': {'ORGANISM': 'Bacteria',
'taxonomy': 'Unclassified.'},
'VERSION': 'AAB29917.1 GI:545426'},
pd.DataFrame({0: np.ones(9, dtype=bool),
1: np.ones(9, dtype=bool)}),
Protein),
('catgcaggc',
{'ACCESSION': 'HQ018078',
'DEFINITION': 'Uncultured Xylanimonas sp.16S, partial',
'FEATURES': [{'country': '"Brazil: Parana, Paranavai"',
'environmental_sample': '',
'index_': 0,
'left_partial_': False,
'location': '1..9',
'rc_': False,
'right_partial_': False,
'type_': 'source'},
{'index_': 1,
'left_partial_': True,
'location': 'complement(<2..>8)',
'product': '"16S ribosomal RNA"',
'rc_': True,
'right_partial_': True,
'type_': 'rRNA'}],
'KEYWORDS': 'ENV.',
'LOCUS': {'date': '29-AUG-2010',
'division': 'ENV',
'locus_name': 'HQ018078',
'mol_type': 'DNA',
'shape': 'linear',
'size': 9,
'unit': 'bp'},
'SOURCE': {'ORGANISM': 'uncultured Xylanimonas sp.',
'taxonomy': 'Bacteria; Actinobacteria; '
'Micrococcales; Promicromonosporaceae; '
'Xylanimonas; environmental samples.'},
'VERSION': 'HQ018078.1 GI:304421728'},
pd.DataFrame({0: [True] * 9,
1: [False] + [True] * 7 + [False]}),
DNA))
class ReaderTests(GenBankIOTests):
def test_parse_reference(self):
lines = '''
REFERENCE 1 (bases 1 to 154478)
AUTHORS Sato,S., Nakamura,Y., Kaneko,T., and Tabata,S.
TITLE Complete structure of the chloroplast genome of
Arabidopsis thaliana
JOURNAL DNA Res. 6 (5), 283-290 (1999)
PUBMED 10574454'''.split('\n')
exp = {'AUTHORS': 'Sato,S., Nakamura,Y., Kaneko,T., and Tabata,S.',
'JOURNAL': 'DNA Res. 6 (5), 283-290 (1999)',
'PUBMED': '10574454',
'REFERENCE': '1 (bases 1 to 154478)',
'TITLE': ('Complete structure of the chloroplast genome of'
' Arabidopsis thaliana')}
self.assertEqual(_parse_reference(lines), exp)
def test_parse_locus(self):
for serialized, parsed in self.locus:
self.assertEqual(_parse_locus(serialized), parsed)
def test_parse_locus_invalid(self):
lines = [
# missing unit
['LOCUS NC_005816 9609 '
' DNA circular CON 07-FEB-2015'],
# missing division
['LOCUS SCU49845 5028 bp'
' DNA 21-JUN-1999'],
# wrong date format
['LOCUS NP_001832 360 aa'
' linear PRI 2001-12-18']]
for line in lines:
with self.assertRaisesRegex(GenBankFormatError,
'Could not parse the LOCUS line:.*'):
_parse_locus(line)
def test_parse_section_default(self):
lines = [
['FOO blah blah',
' blah'],
['FOO=blah',
' blah'],
['FOO']]
kwargs = [{'join_delimitor': '=', 'return_label': False},
{'label_delimitor': '=', 'join_delimitor': '',
'return_label': True},
{'label_delimitor': '=', 'join_delimitor': '=',
'return_label': True}]
expects = ['blah blah=blah',
('FOO', 'blahblah'),
('FOO', '')]
for i, j, k in zip(lines, kwargs, expects):
self.assertEqual(k, _parse_section_default(i, **j))
def test_parse_loc_str(self):
length = 12
examples = [
'',
'9', # a single base in the presented sequence
'3..8',
'<3..8',
'1..>8',
'complement(3..8)',
'complement(join(3..5,7..9))',
'join(3..5,7..9)',
'J00194.1:1..9',
'1.9',
'1^9']
expects = [
({'right_partial_': False, 'left_partial_': False, 'rc_': False},
np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=bool)),
({'right_partial_': False, 'left_partial_': False, 'rc_': False},
np.array([0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0], dtype=bool)),
({'right_partial_': False, 'left_partial_': False, 'rc_': False},
np.array([0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], dtype=bool)),
({'right_partial_': False, 'left_partial_': True, 'rc_': False},
np.array([0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], dtype=bool)),
({'right_partial_': True, 'left_partial_': False, 'rc_': False},
np.array([1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], dtype=bool)),
({'right_partial_': False, 'left_partial_': False, 'rc_': True},
np.array([0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], dtype=bool)),
({'right_partial_': False, 'left_partial_': False, 'rc_': True},
np.array([0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0], dtype=bool)),
({'right_partial_': False, 'left_partial_': False, 'rc_': False},
np.array([0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0], dtype=bool)),
({'right_partial_': False, 'left_partial_': False, 'rc_': False},
np.zeros(length, dtype=bool)),
({'right_partial_': False, 'left_partial_': False, 'rc_': False},
np.zeros(length, dtype=bool)),
({'right_partial_': False, 'left_partial_': False, 'rc_': False},
np.zeros(length, dtype=bool))]
for example, expect in zip(examples, expects):
parsed = _parse_loc_str(example, length)
self.assertDictEqual(parsed[0], expect[0])
npt.assert_equal(parsed[1], expect[1])
def test_parse_loc_str_invalid(self):
length = 12
examples = [
'abc',
'3-8']
for example in examples:
with self.assertRaisesRegex(GenBankFormatError,
'Could not parse location string: '
'"%s"' % example):
_parse_loc_str(example, length)
def test_genbank_to_generator_single(self):
# test single record and uppercase sequence
for c in [Sequence, Protein]:
obs = next(_genbank_to_generator(
self.single_upper_fp, constructor=c))
exp = c(self.single[0], metadata=self.single[1],
positional_metadata=self.single[2])
self.assertEqual(exp, obs)
def test_genbank_to_generator(self):
for i, obs in enumerate(_genbank_to_generator(self.multi_fp)):
seq, md, pmd, constructor = self.multi[i]
exp = constructor(seq, metadata=md, lowercase=True,
positional_metadata=pmd)
self.assertEqual(exp, obs)
def test_genbank_to_sequence(self):
for i, exp in enumerate(self.multi):
obs = _genbank_to_sequence(self.multi_fp, seq_num=i+1)
exp = Sequence(exp[0], metadata=exp[1], lowercase=True,
positional_metadata=exp[2])
self.assertEqual(exp, obs)
def test_genbank_to_rna(self):
seq, md, pmd, constructor = self.single_rna
obs = _genbank_to_rna(self.single_rna_fp)
exp = constructor(seq, metadata=md,
lowercase=True, positional_metadata=pmd)
self.assertEqual(exp, obs)
def test_genbank_to_dna(self):
i = 1
exp = self.multi[i]
obs = _genbank_to_dna(self.multi_fp, seq_num=i+1)
exp = DNA(exp[0], metadata=exp[1], lowercase=True,
positional_metadata=exp[2])
self.assertEqual(exp, obs)
def test_genbank_to_protein(self):
i = 0
exp = self.multi[i]
obs = _genbank_to_protein(self.multi_fp, seq_num=i+1)
exp = Protein(exp[0], metadata=exp[1],
lowercase=True, positional_metadata=exp[2])
self.assertEqual(exp, obs)
class WriterTests(GenBankIOTests):
def test_serialize_locus(self):
for serialized, parsed in self.locus:
self.assertEqual(
_serialize_locus('LOCUS', parsed), serialized[0] + '\n')
def test_generator_to_genbank(self):
seq, md, pmd, constructor = self.single
obj = constructor(seq, md, pmd)
fh = io.StringIO()
_generator_to_genbank([obj], fh)
obs = fh.getvalue()
fh.close()
with io.open(self.single_lower_fp) as fh:
exp = fh.read()
self.assertEqual(obs, exp)
def test_sequence_to_genbank(self):
fh = io.StringIO()
for i, (seq, md, pmd, constructor) in enumerate(self.multi):
obj = Sequence(seq, md, pmd, lowercase=True)
_sequence_to_genbank(obj, fh)
obs = fh.getvalue()
fh.close()
with io.open(self.multi_fp) as fh:
exp = fh.read()
self.assertEqual(obs, exp)
def test_dna_protein_to_genbank(self):
writers = [_protein_to_genbank,
_dna_to_genbank]
fh = io.StringIO()
for i, (seq, md, pmd, constructor) in enumerate(self.multi):
obj = constructor(seq, md, pmd, lowercase=True)
writers[i](obj, fh)
obs = fh.getvalue()
fh.close()
with io.open(self.multi_fp) as fh:
exp = fh.read()
self.assertEqual(obs, exp)
def test_rna_to_genbank(self):
fh = io.StringIO()
seq, md, pmd, constructor = self.single_rna
obj = constructor(seq, md, pmd, lowercase=True)
_rna_to_genbank(obj, fh)
obs = fh.getvalue()
fh.close()
with io.open(self.single_rna_fp) as fh:
exp = fh.read()
self.assertEqual(obs, exp)
class RoundtripTests(GenBankIOTests):
def test_roundtrip_generator(self):
fh = io.StringIO()
_generator_to_genbank(_genbank_to_generator(self.multi_fp), fh)
obs = fh.getvalue()
fh.close()
with io.open(self.multi_fp) as fh:
exp = fh.read()
self.assertEqual(obs, exp)
def test_roundtrip_rna(self):
fh = io.StringIO()
_rna_to_genbank(_genbank_to_rna(self.single_rna_fp), fh)
obs = fh.getvalue()
fh.close()
with io.open(self.single_rna_fp) as fh:
exp = fh.read()
self.assertEqual(obs, exp)
def test_roundtrip_dna(self):
fh = io.StringIO()
_dna_to_genbank(_genbank_to_dna(self.single_rna_fp), fh)
obs = fh.getvalue()
fh.close()
with io.open(self.single_rna_fp) as fh:
exp = fh.read()
self.assertEqual(obs, exp)
def test_roundtrip_protein(self):
fh = io.StringIO()
_protein_to_genbank(_genbank_to_protein(self.single_lower_fp), fh)
obs = fh.getvalue()
fh.close()
with io.open(self.single_lower_fp) as fh:
exp = fh.read()
self.assertEqual(obs, exp)
def test_roundtrip_sequence(self):
fh = io.StringIO()
_sequence_to_genbank(_genbank_to_sequence(self.single_rna_fp), fh)
obs = fh.getvalue()
fh.close()
with io.open(self.single_rna_fp) as fh:
exp = fh.read()
self.assertEqual(obs, exp)
if __name__ == '__main__':
main()
| anderspitman/scikit-bio | skbio/io/format/tests/test_genbank.py | Python | bsd-3-clause | 19,935 | [
"scikit-bio"
] | 193226a8d7eba4ffe6cf0a0a2f326a3129727b9c11d7769d7ad7784ec63ca151 |
import numpy as np
from math import pi, sqrt
from ase import Atoms, io, optimize
from ase.calculators.lj import LennardJones
from ase.optimize.basin import BasinHopping
from ase.io import Trajectory, read
from ase.units import kB
# Global minima from
# Wales and Doye, J. Phys. Chem. A, vol 101 (1997) 5111-5116
E_global = {
4: -6.000000,
5: -9.103852,
6: -12.712062,
7: -16.505384,
}
N = 7
R = N**(1./3.)
np.random.seed(42)
pos = np.random.uniform(-R, R, (N, 3))
s = Atoms('He' + str(N),
positions = pos)
s.set_calculator(LennardJones())
original_positions = 1. * s.get_positions()
ftraj = 'lowest.traj'
for GlobalOptimizer in [BasinHopping(s,
temperature=100 * kB,
dr=0.5,
trajectory=ftraj,
optimizer_logfile=None),
]:
if isinstance(GlobalOptimizer, BasinHopping):
GlobalOptimizer.run(10)
Emin, smin = GlobalOptimizer.get_minimum()
else:
GlobalOptimizer(totalsteps=10)
Emin = s.get_potential_energy()
smin = s
print("N=", N, 'minimal energy found', Emin,
' global minimum:', E_global[N])
# recalc energy
smin.set_calculator(LennardJones())
E = smin.get_potential_energy()
assert abs(E - Emin) < 1e-15
smim = read(ftraj)
E = smin.get_potential_energy()
assert abs(E - Emin) < 1e-15
# check that only minima were written
last_energy = None
for im in io.read(ftraj + '@:'):
energy = im.get_potential_energy()
if last_energy is not None:
assert energy < last_energy
last_energy = energy
# reset positions
s.set_positions(original_positions)
| suttond/MODOI | ase/test/basin.py | Python | lgpl-3.0 | 1,794 | [
"ASE"
] | 7b905a90311bc483f40acafd6f9ad7b1af314aec43f365e75e49499d07171a45 |
"""
A collection of Naive Bayes type classifier models.
.. autoclass:: revscoring.scoring.models.GaussianNB
:members:
:member-order:
.. autoclass:: revscoring.scoring.models.MultinomialNB
:members:
:member-order:
.. autoclass:: revscoring.scoring.models.BernoulliNB
:members:
:member-order:
"""
import logging
from sklearn import naive_bayes
from .sklearn import ProbabilityClassifier
logger = logging.getLogger(__name__)
class NaiveBayes(ProbabilityClassifier):
pass
class GaussianNB(NaiveBayes):
"""
Implements a Gaussian Naive Bayes model
"""
Estimator = naive_bayes.GaussianNB
class MultinomialNB(NaiveBayes):
"""
Implements a Multinomial Naive Bayes model
"""
Estimator = naive_bayes.MultinomialNB
class BernoulliNB(NaiveBayes):
"""
Implements a Bernoulli Naive Bayes model
"""
Estimator = naive_bayes.BernoulliNB
| wiki-ai/revscoring | revscoring/scoring/models/naive_bayes.py | Python | mit | 909 | [
"Gaussian"
] | a2541ac6713b59f5428c83c5f8c352117eaffbd6aa2f4658de38d7bd32a6cca4 |
import argparse
import gzip
import sys
import array
import pysam
class SNP:
"""SNP objects hold data for a single SNP"""
def __init__(self, snp_line):
"""
Initialize SNP object.
Parameters:
-----------
snp_line : str
Line from SNP file.
Attributes:
-----------
pos : int
Genomic position of SNP.
alleles : list
List of two alleles.
ptype : str
Type of polymorphism (snp or indel). If there are multiple alleles
and one is an indel, ptype will be "indel". If the alleles are all
single nucleotide variants, ptype will be "snp".
max_len : int
Maximum allele length. If greater than one, implies an insertion.
"""
snp_split = snp_line.strip().split()
self.pos = int(snp_split[0]) - 1
self.alleles = [snp_split[1], snp_split[2]]
self.ptype = "snp"
self.max_len = 0
for i in range(len(self.alleles)):
if self.alleles[i] == "-":
self.alleles[i] = ""
self.ptype = "indel"
elif len(self.alleles[i]) > self.max_len:
self.max_len = len(self.alleles[i])
if self.max_len > 1:
self.ptype = "indel"
def add_allele(self, new_alleles):
"""
Add new alleles for a snp or indel.
Parameters:
-----------
new_alleles : list
List of alleles (each allele is a string like "A").
"""
# If a string is passed, each element of the string will be added as an
# allele.
assert type(new_alleles) is list
for new_allele in new_alleles:
if new_allele == "-":
self.ptype = "indel"
new_allele = ""
# Only add the new allele if it doesn't already exist.
if not (new_allele in self.alleles):
self.alleles.append(new_allele)
if len(new_allele) > self.max_len:
self.max_len = len(new_allele)
if self.max_len > 1:
self.ptype = "indel"
def shift_indel(self):
"""
Currently not used anywhere.
"""
self.pos += 1
self.max_len -= 1
i = 0
while i < len(self.alleles):
if len(self.alleles) <= 1:
self.alleles.pop(i)
else:
self.alleles[i] = self.alleles[i][1:]
i += 1
self.alleles.append("")
class BamScanner:
"""
Class to keep track of all the information read in from the bamfile/snpfile.
"""
def __init__(self, is_paired_end, max_window, file_name, keep_file_name,
remap_name, remap_num_name, fastq_names, snp_dir):
"""
Constructor: opens files, creates initial table.
Attributes:
-----------
is_paired_end : boolean
Boolean indicating whether input data are paired end.
snp_dir : str
Path to directory that contains gzipped SNP files (one per
chromosome).
bamfile : pysam.Samfile
Input bam file that we are reading.
keep_bam : pysam.Samfile
Output bam file of reads that do not overlap SNPs.
remap_bam : pysam.Samfile
Output bam file of reads that do overlap SNPs and need to be
remapped.
remap_num_file : gzip.GzipFile
File to write XXX to.
fastqs : list
List of gzip.GzipFile objects for the different fastqs that will
contain the reads to remap.
read_table : list
List of lists. Sublist i contains the reads whose positions are
[real read position] % max_window.
cur_read : pysam.XXX
Current read from the bam file.
end_of_file : boolean
Boolean indicating whether we've reached the end of the bam file.
remap_num : int
A counter for the number of reads to be remapped. This starts at one
and is incremented when a read (pair) is written to the fastq
file(s). TODO: Is this supposed to start at one?
ref_match : int
This is incremented everytime a read sequence matches a SNP
genotype. Note that a particular read sequence can be looked at
multiple times if it has multiple SNPs, so this is somewhat hard to
interpret.
alt_match : int
This is initialized but not used anywhere.
no_match : int
This is incremented everytime a read sequence doesn't matche a SNP
genotype. Note that a particular read sequence can be looked at
multiple times if it has multiple SNPs, so this is somewhat hard to
interpret.
toss : int
Number of reads tossed.
nosnp : int
Number of reads with no SNPs. If one read in a read pair has a SNP
and the other doesn't, both "nosnp" and "remap" (below) will be
incremented by one.
remap : int
Number of reads to remap. If one read in a read pair has a SNP and
the other doesn't, both "nosnp" and "remap" (below) will be
incremented by one.
tot : int
I think this is total number of reads, although it is only
incremented in empty_slot_single(self) so it doesn't seem to be
fully implemented right now.
printstats : boolean
Boolean for some print statements, currently not used.
num_reads : int
Number of reads for a given window that we have read but not yet
written. This number is incremented when we read in a read and
decremented when we pop a read out of the read table.
window_too_small : int
The number of reads thrown out because their CIGAR contained a run
of N's longer than max_window.
cur_snp : SNP
The current SNP to be or being parsed.
pos : int
The current genomic position we are analyzing.
chr_num : int
Bam file ID number of the current chromosome we are analyzing.
chr_name : str
Name of the current chromosome we are analyzing.
max_window : int
Size of the window in base pairs to process reads. All of the reads
and SNPs within max_window base pairs are processed at once. Any
junction-spanning reads (i.e. with N in the cigar) that extend
outside of the window are thrown out.
"""
self.is_paired_end = is_paired_end
# Read in all input files and create output files
self.snp_dir = snp_dir
self.bamfile = pysam.Samfile(file_name,"rb")
self.keep_bam = pysam.Samfile(keep_file_name, "wb",
template=self.bamfile)
self.remap_bam = pysam.Samfile(remap_name, "wb", template=self.bamfile)
self.remap_num_file = gzip.open(remap_num_name, "w")
self.fastqs = [gzip.open(fqn,"w") for fqn in fastq_names]
try:
self.cur_read = self.bamfile.next()
except:
sys.stderr.write("No lines available for input")
return()
self.end_of_file = False
self.remap_num = 1
self.ref_match = 0
self.alt_match = 0
self.no_match = 0
self.toss = 0
self.nosnp = 0
self.remap = 0
self.tot = 0
self.window_too_small = 0
self.printstats = True
self.num_reads = 0
self.cur_snp = None
self.pos = self.cur_read.pos
self.chr_num = self.cur_read.tid
self.chr_name = self.bamfile.getrname(self.cur_read.tid)
self.max_window = max_window
# Initialize the read tracking tables.
self.read_table = [[] for x in range(self.max_window)]
# Initialize the SNP and indel tracking tables.
self.switch_chr()
# Fill all tables.
self.fill_table()
def fill_table(self):
"""
Fills the table of reads starting from the current position and
extending for the next <max_window> base pairs. The read table is a
list of lists of length max_window. If the position of the current read
is 100, the first sublist contains all of the reads at position 100, the
next sublist contains all of the reads at position 101, etc.
"""
if self.end_of_file:
return()
# For first read we need to set self.pos and initialize the SNP table.
if self.num_reads == 0:
self.pos = self.cur_read.pos
self.init_snp_table()
#self.num_reads+=1000
while (self.cur_read.tid == self.chr_num) and \
(self.cur_read.pos < (self.pos + self.max_window)):
self.num_reads += 1
self.read_table[self.cur_read.pos % self.max_window].append(self.cur_read)
# Get a new read and check for the end of the file.
try:
self.cur_read = self.bamfile.next()
except:
self.empty_table()
self.end_of_file = True
return()
# Check to see if we've come across a new chromosome.
if self.cur_read.tid != self.chr_num:
self.empty_table()
self.chr_num = self.cur_read.tid
try:
self.chr_name = self.bamfile.getrname(self.chr_num)
except:
sys.stderr.write("Problem with tid: " + str(self.chr_num) + "\n")
self.skip_chr()
self.pos = self.cur_read.pos
self.switch_chr()
self.fill_table()
def switch_chr(self):
"""Switches to looking for SNPs on next chromosome."""
chr_match = False
while not chr_match and not self.end_of_file:
try:
self.snpfile = gzip.open("%s/%s.snps.txt.gz"
% (self.snp_dir,self.chr_name))
sys.stderr.write("Starting on chromosome " + self.chr_name+"\n")
chr_match = True
except:
sys.stderr.write("SNP file for chromosome " +
self.chr_name + " is not found. Skipping these reads.\n")
self.skip_chr()
self.end_of_snp_file = False
self.get_next_snp()
def init_snp_table(self):
"""
Creates an empty SNP table starting from the position of the current
and extending max_window base pairs. The SNP table is max_window long
and has a zero if there are no variants overlapping a position or
contains a SNP object if there is variant that overlaps a given
position.
Also creates an indel table which is a list of lists of length
max_window.
Also creates an indel dict which is a dict whose keys are genomic
positions and whose values are SNP objects whose ptype is indel.
"""
# Number of SNPs in this table. I think this is total number of
# different alleles across the whole table. I'm not exactly sure.
self.num_snps = 0
self.indel_dict = {}
self.snp_table = [0 for x in range(self.max_window)]
self.indel_table = [[] for x in range(self.max_window)]
# Get SNPs in this window but skip SNPs that are upstream of the current
# read.
while not self.end_of_snp_file and self.cur_snp.pos < self.pos:
self.get_next_snp()
# Add SNPs downstream of the current read and within the current window.
while not self.end_of_snp_file and (self.cur_snp.pos < self.pos + self.max_window):
if self.cur_snp.ptype == "snp":
self.add_snp()
else:
self.add_indel()
self.get_next_snp()
def add_snp(self):
"""
Add a SNP to the SNP table. If the SNP table has a zero at this
position, the SNP object will replace the zero. If the SNP table
already has a SNP object at this position, the SNP will be added as new
alleles.
"""
cur_pos = self.cur_snp.pos % self.max_window
if self.snp_table[cur_pos] == 0:
self.num_snps += 1
self.snp_table[cur_pos] = self.cur_snp
elif isinstance(self.snp_table[cur_pos], SNP):
self.snp_table[cur_pos].add_allele(self.cur_snp.alleles)
def add_indel(self):
"""
Add an indel to the indel table and indel dict. If there is already an
indel in the indel dict at this position, add the alleles from cur_snp.
"""
position = self.cur_snp.pos
if self.indel_dict.has_key(position):
start = self.indel_dict[position].max_len
self.indel_dict[position].add_allele(self.cur_snp.alleles)
else:
self.indel_dict[position] = self.cur_snp
start = 0
end = self.indel_dict[position].max_len
# max_len is the length of the longest allele for an indel and
# "position" is the genomic position of this indel. If the indel_dict
# already has an indel at this genomic position, we will append
# "position" to all of the positions/sublists in indel_table beyond the
# lenght of the indel that already exists. If there isn't already an
# indel in indel_table at this "position", we'll append "position" to
# all of the sublists in indel_table that are spanned by the indel.
i = start
while (i < end) and ((self.cur_snp.pos + i) < (self.pos + self.max_window)):
self.indel_table[(self.cur_snp.pos + i) % self.max_window].append(position)
i += 1
def get_next_snp(self):
"""Read in next SNP (and set self.cur_snp) or signal end of file."""
snp_line = self.snpfile.readline()
if snp_line:
self.cur_snp = SNP(snp_line)
else:
self.end_of_snp_file = True
def skip_chr(self):
"""Skips all of the reads from the chromosome of the current read and
moves on to the next chromosome. Used if the SNP file can't be
located."""
while self.cur_read.tid == self.chr_num:
try:
self.cur_read = self.bamfile.next()
except:
self.empty_table()
self.end_of_file = True
return
self.chr_num = self.cur_read.tid
try:
self.chr_name = self.bamfile.getrname(self.chr_num)
except:
sys.stderr.write("Problem with tid: " + str(self.chr_num) + "\n")
self.skip_chr()
def empty_slot_single(self):
"""Processes all reads that map to the current position and
removes them from the read table Treats reads as single-end"""
cur_slot = self.pos % self.max_window
while len(self.read_table[cur_slot]) > 0:
self.tot += 1
read = self.read_table[cur_slot].pop()
self.num_reads -= 1
seqs = self.check_for_snps(read, 0)
# num_seqs it the numbers of different sequences for this read which
# includes the original sequence as well as the different sequences
# with alternate alleles swapped in.
num_seqs = len(seqs)
if (num_seqs == 0) or (num_seqs > 10):
continue
if (num_seqs == 1):
self.keep_bam.write(read)
else:
self.remap_num_file.write("%i\n" % (num_seqs - 1))
self.remap_num_file.flush()
self.remap_bam.write(read)
for seq in seqs[1:]:
loc_line = "%i:%s:%i:%i" % (
self.remap_num,
self.chr_name,
read.pos,
num_seqs - 1)
self.fastqs[0].write("@%s\n%s\n+%s\n%s\n" % (
loc_line,
seq,
loc_line,
read.qual))
self.remap_num += 1
self.shift_SNP_table()
def empty_slot_paired(self):
"""Processes all reads that map to the current position and
removes them from the read table. Treats reads as paired-end."""
cur_slot = self.pos % self.max_window
# While there are reads in this slot...
while len(self.read_table[cur_slot]) > 0:
# Pop the first read in the slot
read = self.read_table[self.pos % self.max_window].pop()
self.num_reads -= 1
# Figure out the matching read position
pair_chr_num = read.rnext
pair_pos = read.mpos
if (pair_chr_num != self.chr_num) or \
((pair_pos - self.pos) > self.max_window):
continue
# Find the slot the matching read is in
pair_slot = pair_pos % self.max_window
for indx in range(len(self.read_table[pair_slot])):
if self.read_table[pair_slot][indx].qname.split(":")[-1] == read.qname.split(":")[-1]:
pair_read = self.read_table[pair_slot].pop(indx)
self.num_reads -= 1
seq1s = self.check_for_snps(read, 0)
seq2s = self.check_for_snps(pair_read, read.mpos - read.pos)
num_seqs = len(seq1s)*len(seq2s)
if (num_seqs == 0) or (num_seqs > 32):
break
if (num_seqs == 1):
self.keep_bam.write(read)
self.keep_bam.write(pair_read)
else:
self.remap_bam.write(read)
self.remap_bam.write(pair_read)
self.remap_num_file.write("%i\n" % (2*(num_seqs-1)))
first = True
for seq1 in seq1s:
for seq2 in seq2s:
if not first:
left_pos = min(read.pos, pair_read.pos)
right_pos = max(read.pos, pair_read.pos)
loc_line="%i:%s:%i:%i:%i" % (
self.remap_num,
self.chr_name,
left_pos,
right_pos,
num_seqs - 1)
self.fastqs[0].write("@%s\n%s\n+%s\n%s\n" % (
loc_line,
seq1,
loc_line,
read.qual))
self.fastqs[1].write("@%s\n%s\n+%s\n%s\n" % (
loc_line,
self.reverse_complement(seq2),
loc_line,
pair_read.qual))
first=False
self.remap_num+=1
# Stop searching for the pair since it was found.
break
self.shift_SNP_table()
def check_for_snps(self, read, start_dist):
"""
Checks a single aligned read for overlapping SNPs and creates
alternative sequences for remapping.
Parameters
----------
read : pysam.AlignedRead
Read to check for SNPs in.
start_dist : int
I think this is the distance from the current position of the
BamScanner to the start of the read.
Returns
-------
seqs : list
List of read sequences. This first entry is the read sequence from
the bam file. Any subsequent sequences are the read sequence from
the bam file except one base that overlapped a SNP is switched to
the other allele. If the list is empty, the read overlaps an indel
or has a CIGAR character besides N or M so we throw it out.
"""
indx = read.pos % self.max_window
# p keeps track of the number of read bases we've already analyzed. When
# p = length of the read, we are done with this read.
p = 0
# num_snps is the number of SNPs in this read.
num_snps = 0
# I think seg_len is the distance from the current position of the
# BamScanner to where we are
seg_len = start_dist
seqs = [read.seq]
if start_dist > 0:
# has_junc indicates whether the read has an N in the CIGAR although
# this doesn't seem to be used anywhere.
has_junc = False
# read.cigar is a list of tuples. Each tuple has two entries. The first
# entry specifies the character in the cigar and the second entry
# specifies the length of that character. The values are
# M BAM_CMATCH 0
# I BAM_CINS 1
# D BAM_CDEL 2
# N BAM_CREF_SKIP 3
# S BAM_CSOFT_CLIP 4
# H BAM_CHARD_CLIP 5
# P BAM_CPAD 6
# = BAM_CEQUAL 7
# X BAM_CDIFF 8
# So a tuple (0, 5) means five matches and (4, 2) means a soft clip of
# two.
# We'll go through each cigar tuple one at a time.
for cigar in read.cigar:
seg_len += cigar[1]
# Check whether this cigar segment is longer than the max window.
# This generally happens if there is a junction read longer than the
# max window.
if seg_len > self.max_window:
self.window_too_small += 1
return([])
if cigar[0] == 4:
# CIGAR indicates a soft-clipping
p = p + cigar[1]
elif cigar[0] == 0:
# CIGAR indicates a match alignment to the reference genome.
# Since there is a match, let's go through each matched base and
# see whether it contains a SNP.
for i in range(cigar[1]):
if len(self.indel_table[indx]) == 0:
snp = self.snp_table[indx]
if snp != 0:
num_snps += 1
if num_snps > 10:
# If there are more than 10 snps overlapping,
# throw out the read to prevent memory blow-up.
# TODO: should we increment self.toss here?
return([])
for seq in list(seqs):
matches = 0
for geno in snp.alleles:
if seq[p] == geno:
matches += 1
for alt_geno in snp.alleles:
if not alt_geno == geno:
new_seq = (seq[:p] + alt_geno +
seq[p+1:])
seqs.append(new_seq)
if matches == 0:
self.no_match += 1
else:
self.ref_match += 1
else:
# It's an indel, throw it out.
self.toss += 1
return([])
indx = (indx + 1) % self.max_window
p += 1
elif cigar[0] == 3:
# Skipped in the reference genome (splice junction).
indx = (indx + cigar[1]) % self.max_window
has_junc = True
else:
# There is a non-N/M in the read CIGAR--throw out the read.
self.toss += 1
return([])
if len(seqs) == 1:
self.nosnp += 1
else:
self.remap += 1
return seqs
def shift_SNP_table(self):
"""Shifts the SNP table over one position and makes sure that
indels are not lost."""
self.pos += 1
# Current slot to fill is the position + max_window - 1
cur_slot=(self.pos-1) % self.max_window
# Delete indels that are no longer used (if they ended at the previous position)
for indel_pos in self.indel_table[cur_slot]:
if (indel_pos + self.indel_dict[indel_pos].max_len-1) == (self.pos-1):
del self.indel_dict[indel_pos]
self.indel_table[cur_slot]=[]
# Carry over indels from the previous slot
for indel_pos in self.indel_table[cur_slot-1]:
if (indel_pos + self.indel_dict[indel_pos].max_len-1) >= (self.pos+self.max_window-1):
self.indel_table[cur_slot].append(indel_pos)
if self.snp_table[cur_slot] != 0:
self.num_snps -= 1
self.snp_table[cur_slot] = 0
# See if there is a SNP overlapping the current spot.
while not self.end_of_snp_file and self.pos + self.max_window-1 > self.cur_snp.pos:
sys.stderr.write(str(self.num_snps) + " " + str(self.pos) + " " +
str(self.cur_snp.pos)+" !!!\n")
sys.stderr.write("SNP out of order has been skipped\n")
self.get_next_snp()
while not self.end_of_snp_file and (self.cur_snp.pos == (self.pos + self.max_window - 1)):
if self.cur_snp.ptype == "snp":
self.add_snp()
else:
self.add_indel()
if not self.cur_snp.pos in self.indel_table[cur_slot]:
self.indel_table[cur_slot].append(cur_snp.pos)
self.get_next_snp()
def empty_table(self):
"""Completely empties the read_table by repeatedly calling
empty_slot function"""
end_pos = self.pos + self.max_window
while self.pos < end_pos:
if self.is_paired_end:
self.empty_slot_paired()
else:
self.empty_slot_single()
def complement(self, letter):
if letter == 'A':
return('T')
elif letter == 'T':
return('A')
elif letter == 'C':
return('G')
elif letter == 'G':
return('C')
else:
return(letter)
def reverse_complement(self, read):
# reverse = ""
# for letter in read:
# reverse = self.complement(letter) + reverse
# return reverse
reverse = [self.complement(letter) for letter in list(read)]
reverse.reverse()
return ''.join(reverse)
def run(self):
"""Iterate through bam and SNP files and write output files."""
self.fill_table()
while not self.end_of_file:
if self.is_paired_end:
self.empty_slot_paired()
else:
self.empty_slot_single()
self.fill_table()
if self.window_too_small > 0:
sys.stderr.write(
'Segment distance (from read pair and junction separation) was '
'too large for {:,} reads so those reads have been thrown out. '
'Consider increasing the max window '
'size.\n'.format(self.window_too_small)
)
sys.stderr.write("Finished!\n")
self.keep_bam.close()
self.remap_bam.close()
self.remap_num_file.close()
[x.close() for x in self.fastqs]
def main():
parser=argparse.ArgumentParser()
parser.add_argument("-p", action='store_true', dest='is_paired_end',
default=False, help=('Indicates that reads are '
'paired-end (default is single).'))
parser.add_argument("-s", action='store_true', dest='is_sorted',
default=False, help=('Indicates that the input bam file'
' is coordinate sorted (default '
'is False).'))
mdefault = 100000
mhelp = ('Changes the maximum window to search for SNPs. The default is '
'{:,} base pairs. Reads or read pairs that span more than this '
'distance (usually due to splice junctions) will be thrown out. '
'Increasing this window allows for longer junctions, but may '
'increase run time and memory requirements.'.format(mdefault))
parser.add_argument("-m", action='store', dest='max_window', type=int,
default=mdefault, help=mhelp)
parser.add_argument("infile", action='store', help=("Coordinate sorted bam "
"file."))
snp_dir_help = ('Directory containing the SNPs segregating within the '
'sample in question (which need to be checked for '
'mappability issues). This directory should contain '
'sorted files of SNPs separated by chromosome and named: '
'chr<#>.snps.txt.gz. These files should contain 3 columns: '
'position RefAllele AltAllele')
parser.add_argument("snp_dir", action='store', help=snp_dir_help)
options = parser.parse_args()
infile = options.infile
snp_dir = options.snp_dir
name_split = infile.split(".")
if len(name_split) > 1:
pref = ".".join(name_split[:-1])
else:
pref = name_split[0]
if not options.is_sorted:
pysam.sort(infile, pref + ".sort")
infile = pref + ".sort"
sort_file_name = pref + ".sort.bam"
else:
sort_file_name = infile
keep_file_name = pref + ".keep.bam"
remap_name = pref + ".to.remap.bam"
remap_num_name = pref + ".to.remap.num.gz"
if options.is_paired_end:
fastq_names = [pref + ".remap.fq1.gz",
pref + ".remap.fq2.gz"]
else:
fastq_names = [pref + ".remap.fq.gz"]
bam_data = BamScanner(options.is_paired_end, options.max_window,
sort_file_name, keep_file_name, remap_name,
remap_num_name, fastq_names, snp_dir)
bam_data.run()
if __name__ == '__main__':
main()
| cdeboever3/WASP | mapping/find_intersecting_snps.py | Python | apache-2.0 | 31,164 | [
"pysam"
] | 29c6a5c6a41f8fcf187502a86ad197f1bfb75b760904a81ea8fca32cc8948c94 |
#!/usr/bin/env python
from __future__ import print_function
import os
import string
import sys
from pulsar.main import (
ArgumentParser,
DEFAULT_APP_YAML,
DEFAULT_INI
)
try:
import pip
except ImportError:
pip = None # type: ignore
try:
import virtualenv
except ImportError:
virtualenv = None
IS_WINDOWS = os.environ.get("MOCK_WINDOWS", None) or sys.platform.startswith('win')
CONFIGURE_URL = "https://pulsar.readthedocs.org/en/latest/configure.html"
DESCRIPTION = "Initialize a directory with a minimal pulsar config."
HELP_DIRECTORY = "Directory containing the configuration files for Pulsar."
HELP_MQ = ("Write configuration files for message queue server deployment "
"instead of more traditional RESTful web based pulsar.")
HELP_AUTO_CONDA = ("Auto initialize Conda for tool resolution and auto install "
"dependencies on demand.")
HELP_NO_LOGGING = ("Do not write Pulsar's default logging configuration to server.ini "
"and if uwsgi is configured do not configure its logging either.")
HELP_SUPERVISOR = ("Write a supervisord configuration file for "
"managing pulsar out as well.")
HELP_FORCE = "Overwrite existing files if they already exist."
HELP_WSGI_SERVER = ("Web server stack used to host Pulsar wsgi application.")
HELP_LIBDRMAA = ("Configure Pulsar to submit jobs to a cluster via DRMAA by "
"supplying the path to a libdrmaa .so file using this argument.")
HELP_INSTALL = ("Install optional dependencies required by specified configuration "
"(e.g. drmaa, supervisor, uwsgi, etc...).")
HELP_HOST = ("Host to bind Pulsar to - defaults to localhost. Specify 0.0.0.0 "
"to listen on all interfaces.")
HELP_TOKEN = ("Private token used to authorize clients. If Pulsar is not protected "
"via firewall, this should be specified and SSL should be enabled. See "
"%s for more information on security.") % CONFIGURE_URL
HELP_PORT = ("Port to bind Pulsar to (ignored if --mq is specified).")
HELP_PIP_INSTALL_ARGS_HELP = ("Arguments to pip install (defaults to pulsar-app) - unimplemented")
DEFAULT_HOST = "localhost"
LOGGING_CONFIG_SECTIONS = """## Configure Python loggers.
[loggers]
keys = root,pulsar
[handlers]
keys = console
[formatters]
keys = generic
[logger_root]
level = INFO
handlers = console
[logger_pulsar]
level = DEBUG
handlers = console
qualname = pulsar
propagate = 0
[handler_console]
class = StreamHandler
args = (sys.stderr,)
level = DEBUG
formatter = generic
[formatter_generic]
format = %(asctime)s %(levelname)-5.5s [%(name)s][%(threadName)s] %(message)s
"""
SUPERVISOR_CONFIG_TEMPLATE = string.Template("""[program:pulsar]
user = ${user}
directory = ${directory}
command = pulsar --mode '${mode}' --config '${directory}'
redirect_stderr = true
autorestart = true
""")
SERVER_CONFIG_TEMPLATE = string.Template("""[server:main]
use = egg:Paste#http
port = ${port}
host = ${host}
## pem file to use to enable SSL.
# ssl_pem = host.pem
[app:main]
paste.app_factory = pulsar.web.wsgi:app_factory
app_config = %(here)s/app.yml
## Configure uWSGI (if used).
[uwsgi]
master = True
paste-logger = ${use_logging}
socket = ${host}:3031
processes = 1
enable-threads = True
## Configure circus and chaussette (if used).
[circus]
endpoint = tcp://127.0.0.1:5555
pubsub_endpoint = tcp://127.0.0.1:5556
#stats_endpoint = tcp://127.0.0.1:5557
[watcher:web]
cmd = chaussette --fd $(circus.sockets.web) paste:server.ini
use_sockets = True
# Pulsar must be single-process for now...
numprocesses = 1
[socket:web]
host = ${host}
port = ${port}
${logging_sections}
""")
LOCAL_ENV_TEMPLATE = string.Template("""## Place local configuration variables used by Pulsar and run.sh in here. For example
## If using the drmaa queue manager, you will need to set the DRMAA_LIBRARY_PATH variable,
## you may also need to update LD_LIBRARY_PATH for underlying library as well.
$libdrmaa_line
## If you wish to use a variety of Galaxy tools that depend on galaxy.eggs being defined,
## set GALAXY_HOME to point to a copy of Galaxy.
#export GALAXY_HOME=/path/to/galaxy-dist
""")
def main(argv=None):
dependencies = []
arg_parser = PlatformArgumentParser(description=DESCRIPTION)
arg_parser.add_argument("--directory",
default=".",
help=HELP_DIRECTORY)
arg_parser.add_argument("--auto_conda",
action="store_true",
default=False,
help=HELP_AUTO_CONDA)
arg_parser.add_argument("--mq",
action="store_true",
default=False,
help=HELP_MQ)
arg_parser.add_argument("--no_logging",
dest="logging",
action="store_false",
default=True,
help=HELP_NO_LOGGING)
arg_parser.add_argument("--supervisor",
action="store_true",
default=False,
help=HELP_SUPERVISOR,
skip_on_windows=True)
arg_parser.add_argument("--wsgi_server",
choices=["paster", "uwsgi"],
default=None,
help=HELP_WSGI_SERVER,
skip_on_windows=True)
arg_parser.add_argument("--libdrmaa_path",
help=HELP_LIBDRMAA,
skip_on_windows=True)
arg_parser.add_argument("--host",
default=DEFAULT_HOST,
help=HELP_HOST)
arg_parser.add_argument("--private_token",
default=None,
help=HELP_TOKEN)
arg_parser.add_argument("--port",
default="8913",
help=HELP_PORT)
arg_parser.add_argument("--install",
action="store_true",
help=HELP_INSTALL)
arg_parser.add_argument("--force",
action="store_true",
default=False,
help=HELP_FORCE)
# arg_parser.add_argument("--pip_install_args",
# default="pulsar-app",
# help=HELP_PIP_INSTALL_ARGS_HELP)
args = arg_parser.parse_args(argv)
directory = args.directory
relative_directory = directory
directory = os.path.abspath(directory)
mode = _determine_mode(args)
if mode == "uwsgi":
dependencies.append("uwsgi")
if not os.path.exists(directory):
os.makedirs(directory)
default_dependencies_dir = os.path.join(directory, "dependencies")
if not os.path.exists(default_dependencies_dir):
os.makedirs(default_dependencies_dir)
print("Bootstrapping pulsar configuration into directory %s" % relative_directory)
_handle_app_yaml(args, directory)
_handle_server_ini(args, directory)
if not IS_WINDOWS:
_handle_local_env(args, directory, dependencies)
_handle_supervisor(args, mode, directory, dependencies)
_handle_install(args, dependencies)
_print_config_summary(args, mode, relative_directory)
def _print_config_summary(args, mode, relative_directory):
print(" - app.yml created, update to configure Pulsar application.")
_print_server_ini_info(args, mode)
if not IS_WINDOWS:
print(" - local_env.sh created, update to configure environment.")
print("\n")
print("Start pulsar by running the command from directory [%s]:" % relative_directory)
_print_pulsar_run(mode)
_print_pulsar_check(args, mode)
def _print_server_ini_info(args, mode):
if not args.mq:
print(" - server.ini created, update to configure web server.")
print(" * Target web server %s" % mode)
if args.host == DEFAULT_HOST:
print(" * Binding to host localhost, remote clients will not be able to connect.")
elif args.private_token:
print(" * Binding to host [%s] with a private token, please configure SSL if network is not firewalled off.", args.host)
else:
print(" * Binding to host [%s], configure a private token and SSL if network is not firewalled off.", args.host)
print(" * Target web server %s" % mode)
def _print_pulsar_run(mode):
if IS_WINDOWS:
print(" pulsar")
elif mode == "uwsgi":
print(" pulsar --mode %s" % mode)
print("Any extra commands passed to pulsar will be forwarded along to uwsgi.")
elif mode != "paster":
print(" pulsar --mode %s" % mode)
else:
print(" pulsar")
def _print_pulsar_check(args, mode):
if not mode == "webless":
# TODO: Implement pulsar-check for mq
return
print("Run a test job against your Pulsar server using the command:")
command = "pulsar-check --url http://%s:%s" % (args.host, args.port)
if args.private_token:
command += '--private_token %s' % args.private_token
print(" %s" % command)
print("If it reports no problems, your pulsar server is running properly.")
def _determine_mode(args):
if not IS_WINDOWS and args.wsgi_server:
mode = args.wsgi_server
elif args.mq:
mode = "webless"
else:
mode = "paster"
return mode
def _handle_server_ini(args, directory):
force = args.force
ini_file = os.path.join(directory, DEFAULT_INI)
if not args.mq:
_check_file(ini_file, force)
config_dict = dict(
port=args.port,
host=args.host,
)
if args.logging:
config_dict["logging_sections"] = LOGGING_CONFIG_SECTIONS
config_dict["use_logging"] = "true"
else:
config_dict["logging_sections"] = ""
config_dict["use_logging"] = "false"
server_config = SERVER_CONFIG_TEMPLATE.safe_substitute(
**config_dict
)
open(ini_file, "w").write(server_config)
def _handle_app_yaml(args, directory):
force = args.force
yaml_file = os.path.join(directory, DEFAULT_APP_YAML)
_check_file(yaml_file, force)
contents = "---\n"
if args.private_token:
contents += 'private_token: %s\n' % args.private_token
if args.mq:
contents += 'message_queue_url: "amqp://guest:guest@localhost:5672//"\n'
auto_conda = 'true' if args.auto_conda else 'false'
contents += 'conda_auto_init: {}\n'.format(auto_conda)
contents += 'conda_auto_install: {}\n'.format(auto_conda)
if not IS_WINDOWS and args.libdrmaa_path:
contents += 'manager:\n type: queued_drmaa\n'
open(yaml_file, "w").write(contents)
def _handle_local_env(args, directory, dependencies):
local_env_file = os.path.join(directory, "local_env.sh")
if args.libdrmaa_path:
libdrmaa_line = 'export DRMAA_LIBRARY_PATH=%s' % args.libdrmaa_path
os.environ["DRMAA_LIBRARY_PATH"] = args.libdrmaa_path
dependencies.append("drmaa")
else:
libdrmaa_line = '#export DRMAA_LIBRARY_PATH=/path/to/libdrmaa.so'
local_env_contents = LOCAL_ENV_TEMPLATE.safe_substitute(
libdrmaa_line=libdrmaa_line,
)
open(local_env_file, "w").write(local_env_contents)
def _handle_supervisor(args, mode, directory, dependencies):
if args.supervisor:
template = SUPERVISOR_CONFIG_TEMPLATE
config = template.safe_substitute(
user=os.environ["USER"],
directory=directory,
mode=mode,
)
conf_path = os.path.join(directory, "supervisor.conf")
open(conf_path, "w").write(config)
dependencies.append("supervisor")
def _handle_install(args, dependencies):
if args.install and dependencies:
if pip is None:
raise ImportError("Bootstrapping Pulsar dependencies requires pip library.")
pip.main(["install"] + dependencies)
# def _install_pulsar_in_virtualenv(venv):
# if virtualenv is None:
# raise ImportError("Bootstrapping Pulsar into a virtual environment, requires virtualenv.")
# if IS_WINDOWS:
# bin_dir = "Scripts"
# else:
# bin_dir = "bin"
# virtualenv.create_environment(venv)
# # TODO: Remove --pre on release.
# subprocess.call([os.path.join(venv, bin_dir, 'pip'), 'install', "--pre", "pulsar-app"])
def _check_file(path, force):
if os.path.exists(path) and not force:
print("File %s exists, exiting. Run with --force to replace configuration." % path, file=sys.stderr)
sys.exit(1)
class PlatformArgumentParser(ArgumentParser):
def add_argument(self, *args, **kwds):
if "skip_on_windows" in kwds:
skip_on_windows = kwds["skip_on_windows"]
if skip_on_windows and IS_WINDOWS:
return
del kwds["skip_on_windows"]
return ArgumentParser.add_argument(self, *args, **kwds)
| natefoo/pulsar | pulsar/scripts/config.py | Python | apache-2.0 | 13,153 | [
"Galaxy"
] | 887e6b8207092a0d770322aa4e2597d9d0a294790f90d4b39926d7dd97207f8e |
import simtk.openmm.app as app
import simtk.openmm as mm
import simtk.unit as u
from parmed import gromacs
# user parameters
integrator_timestep_ps = 0.005 # picoseconds
save_traj_ps = 10 # picoseconds
save_full_traj_ps = 100 # picoseconds
save_restart_file_ns = 100 # nanoseconds
# physical values:
temperature = 300 * u.kelvin
pressure = 1 * u.bar
# load pdb, force field and create system
gromacs.GROMACS_TOPDIR = 'top'
top = gromacs.GromacsTopologyFile('files/topol-NTL9.top')
gro = gromacs.GromacsGroFile.parse('files/start-NTL9.gro')
top.box = gro.box
# system
system = top.createSystem(
nonbondedMethod=app.PME,
nonbondedCutoff=1 * u.nanometer,
constraints=app.HBonds)
# integrator
integrator = mm.LangevinIntegrator(
300 * u.kelvin,
1 / u.picosecond,
0.002 * u.picoseconds)
gro.write_pdb('files/input.pdb')
with open('files/system.xml', 'w') as f:
system_xml = mm.XmlSerializer.serialize(system)
f.write(system_xml)
with open('files/integrator.xml', 'w') as f:
integrator_xml = mm.XmlSerializer.serialize(integrator)
f.write(integrator_xml)
| markovmodel/adaptivemd | examples/files/ntl9/openmmsetup.py | Python | lgpl-2.1 | 1,102 | [
"Gromacs",
"OpenMM"
] | 858015cc989f027886a718879500b4eb7c0ffb937e0405aca1285ad035aaa7a7 |
#!/usr/bin/env python
# Copyright (c) 2014, Palo Alto Networks
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# Author: Brian Torres-Gil <btorres-gil@paloaltonetworks.com>
"""
upgrade.py
==========
This script upgrades a Palo Alto Networks firewall or Panorama to the
specified version. It takes care of all intermediate upgrades and reboots.
**Usage**::
upgrade.py [-h] [-v] [-q] [-n] hostname username password version
**Examples**:
Upgrade a firewall at 10.0.0.1 to PAN-OS 7.0.0::
$ python upgrade.py 10.0.0.1 admin password 7.0.0
Upgrade a Panorama at 172.16.4.4 to the latest Panorama version::
$ python upgrade.py 172.16.4.4 admin password latest
"""
__author__ = 'btorres-gil'
import sys
import os
import argparse
import logging
curpath = os.path.dirname(os.path.abspath(__file__))
sys.path[:0] = [os.path.join(curpath, os.pardir)]
from pandevice.base import PanDevice
def main():
# Get command line arguments
parser = argparse.ArgumentParser(description="Upgrade a Palo Alto Networks Firewall or Panorama to the specified version")
parser.add_argument('-v', '--verbose', action='count', help="Verbose (-vv for extra verbose)")
parser.add_argument('-q', '--quiet', action='store_true', help="No output")
parser.add_argument('-n', '--dryrun', action='store_true', help="Print what would happen, but don't perform upgrades")
# Palo Alto Networks related arguments
fw_group = parser.add_argument_group('Palo Alto Networks Device')
fw_group.add_argument('hostname', help="Hostname of Firewall or Panorama")
fw_group.add_argument('username', help="Username for Firewall or Panorama")
fw_group.add_argument('password', help="Password for Firewall or Panorama")
fw_group.add_argument('version', help="The target PAN-OS/Panorama version (eg. 7.0.0 or latest)")
args = parser.parse_args()
### Set up logger
# Logging Levels
# WARNING is 30
# INFO is 20
# DEBUG is 10
if args.verbose is None:
args.verbose = 0
if not args.quiet:
logging_level = 20 - (args.verbose * 10)
if logging_level <= logging.DEBUG:
logging_format = '%(levelname)s:%(name)s:%(message)s'
else:
logging_format = '%(message)s'
logging.basicConfig(format=logging_format, level=logging_level)
# Connect to the device and determine its type (Firewall or Panorama).
# This is important to know what version to upgrade to next.
device = PanDevice.create_from_device(args.hostname,
args.username,
args.password,
)
# Perform the upgrades in sequence with reboots between each upgrade
device.software.upgrade_to_version(args.version, args.dryrun)
# Call the main() function to begin the program if not
# loaded as a module.
if __name__ == '__main__':
main()
| PaloAltoNetworks-BD/SplunkforPaloAltoNetworks | SplunkforPaloAltoNetworks/bin/lib/pandevice/examples/upgrade.py | Python | isc | 3,615 | [
"Brian"
] | 479b590f274a17d25de7cea9f5063f8e157e9c539db83bd23f7672ec3f6b05a4 |
# coding=utf-8
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for StatementVisitor."""
from __future__ import unicode_literals
import re
import subprocess
import textwrap
import unittest
from grumpy.compiler import block
from grumpy.compiler import imputil
from grumpy.compiler import shard_test
from grumpy.compiler import stmt
from grumpy.compiler import util
from grumpy import pythonparser
from grumpy.pythonparser import ast
class StatementVisitorTest(unittest.TestCase):
def testAssertNoMsg(self):
self.assertEqual((0, 'AssertionError()\n'), _GrumpRun(textwrap.dedent("""\
try:
assert False
except AssertionError as e:
print repr(e)""")))
def testAssertMsg(self):
want = (0, "AssertionError('foo',)\n")
self.assertEqual(want, _GrumpRun(textwrap.dedent("""\
try:
assert False, 'foo'
except AssertionError as e:
print repr(e)""")))
def testBareAssert(self):
# Assertion errors at the top level of a block should raise:
# https://github.com/google/grumpy/issues/18
want = (0, 'ok\n')
self.assertEqual(want, _GrumpRun(textwrap.dedent("""\
def foo():
assert False
try:
foo()
except AssertionError:
print 'ok'
else:
print 'bad'""")))
def testAssignAttribute(self):
self.assertEqual((0, '123\n'), _GrumpRun(textwrap.dedent("""\
e = Exception()
e.foo = 123
print e.foo""")))
def testAssignName(self):
self.assertEqual((0, 'bar\n'), _GrumpRun(textwrap.dedent("""\
foo = 'bar'
print foo""")))
def testAssignMultiple(self):
self.assertEqual((0, 'baz baz\n'), _GrumpRun(textwrap.dedent("""\
foo = bar = 'baz'
print foo, bar""")))
def testAssignSubscript(self):
self.assertEqual((0, "{'bar': None}\n"), _GrumpRun(textwrap.dedent("""\
foo = {}
foo['bar'] = None
print foo""")))
def testAssignTuple(self):
self.assertEqual((0, 'a b\n'), _GrumpRun(textwrap.dedent("""\
baz = ('a', 'b')
foo, bar = baz
print foo, bar""")))
def testAugAssign(self):
self.assertEqual((0, '42\n'), _GrumpRun(textwrap.dedent("""\
foo = 41
foo += 1
print foo""")))
def testAugAssignBitAnd(self):
self.assertEqual((0, '3\n'), _GrumpRun(textwrap.dedent("""\
foo = 7
foo &= 3
print foo""")))
def testAugAssignPow(self):
self.assertEqual((0, '64\n'), _GrumpRun(textwrap.dedent("""\
foo = 8
foo **= 2
print foo""")))
def testClassDef(self):
self.assertEqual((0, "<type 'type'>\n"), _GrumpRun(textwrap.dedent("""\
class Foo(object):
pass
print type(Foo)""")))
def testClassDefWithVar(self):
self.assertEqual((0, 'abc\n'), _GrumpRun(textwrap.dedent("""\
class Foo(object):
bar = 'abc'
print Foo.bar""")))
def testDeleteAttribute(self):
self.assertEqual((0, 'False\n'), _GrumpRun(textwrap.dedent("""\
class Foo(object):
bar = 42
del Foo.bar
print hasattr(Foo, 'bar')""")))
def testDeleteClassLocal(self):
self.assertEqual((0, 'False\n'), _GrumpRun(textwrap.dedent("""\
class Foo(object):
bar = 'baz'
del bar
print hasattr(Foo, 'bar')""")))
def testDeleteGlobal(self):
self.assertEqual((0, 'False\n'), _GrumpRun(textwrap.dedent("""\
foo = 42
del foo
print 'foo' in globals()""")))
def testDeleteLocal(self):
self.assertEqual((0, 'ok\n'), _GrumpRun(textwrap.dedent("""\
def foo():
bar = 123
del bar
try:
print bar
raise AssertionError
except UnboundLocalError:
print 'ok'
foo()""")))
def testDeleteNonexistentLocal(self):
self.assertRaisesRegexp(
util.ParseError, 'cannot delete nonexistent local',
_ParseAndVisit, 'def foo():\n del bar')
def testDeleteSubscript(self):
self.assertEqual((0, '{}\n'), _GrumpRun(textwrap.dedent("""\
foo = {'bar': 'baz'}
del foo['bar']
print foo""")))
def testExprCall(self):
self.assertEqual((0, 'bar\n'), _GrumpRun(textwrap.dedent("""\
def foo():
print 'bar'
foo()""")))
def testExprNameGlobal(self):
self.assertEqual((0, ''), _GrumpRun(textwrap.dedent("""\
foo = 42
foo""")))
def testExprNameLocal(self):
self.assertEqual((0, ''), _GrumpRun(textwrap.dedent("""\
foo = 42
def bar():
foo
bar()""")))
def testFor(self):
self.assertEqual((0, '1\n2\n3\n'), _GrumpRun(textwrap.dedent("""\
for i in (1, 2, 3):
print i""")))
def testForBreak(self):
self.assertEqual((0, '1\n'), _GrumpRun(textwrap.dedent("""\
for i in (1, 2, 3):
print i
break""")))
def testForContinue(self):
self.assertEqual((0, '1\n2\n3\n'), _GrumpRun(textwrap.dedent("""\
for i in (1, 2, 3):
print i
continue
raise AssertionError""")))
def testForElse(self):
self.assertEqual((0, 'foo\nbar\n'), _GrumpRun(textwrap.dedent("""\
for i in (1,):
print 'foo'
else:
print 'bar'""")))
def testForElseBreakNotNested(self):
self.assertRaisesRegexp(
util.ParseError, "'continue' not in loop",
_ParseAndVisit, 'for i in (1,):\n pass\nelse:\n continue')
def testForElseContinueNotNested(self):
self.assertRaisesRegexp(
util.ParseError, "'continue' not in loop",
_ParseAndVisit, 'for i in (1,):\n pass\nelse:\n continue')
def testFunctionDecorator(self):
self.assertEqual((0, '<b>foo</b>\n'), _GrumpRun(textwrap.dedent("""\
def bold(fn):
return lambda: '<b>' + fn() + '</b>'
@bold
def foo():
return 'foo'
print foo()""")))
def testFunctionDecoratorWithArg(self):
self.assertEqual((0, '<b id=red>foo</b>\n'), _GrumpRun(textwrap.dedent("""\
def tag(name):
def bold(fn):
return lambda: '<b id=' + name + '>' + fn() + '</b>'
return bold
@tag('red')
def foo():
return 'foo'
print foo()""")))
def testFunctionDef(self):
self.assertEqual((0, 'bar baz\n'), _GrumpRun(textwrap.dedent("""\
def foo(a, b):
print a, b
foo('bar', 'baz')""")))
def testFunctionDefGenerator(self):
self.assertEqual((0, "['foo', 'bar']\n"), _GrumpRun(textwrap.dedent("""\
def gen():
yield 'foo'
yield 'bar'
print list(gen())""")))
def testFunctionDefGeneratorReturnValue(self):
self.assertRaisesRegexp(
util.ParseError, 'returning a value in a generator function',
_ParseAndVisit, 'def foo():\n yield 1\n return 2')
def testFunctionDefLocal(self):
self.assertEqual((0, 'baz\n'), _GrumpRun(textwrap.dedent("""\
def foo():
def bar():
print 'baz'
bar()
foo()""")))
def testIf(self):
self.assertEqual((0, 'foo\n'), _GrumpRun(textwrap.dedent("""\
if 123:
print 'foo'
if '':
print 'bar'""")))
def testIfElif(self):
self.assertEqual((0, 'foo\nbar\n'), _GrumpRun(textwrap.dedent("""\
if True:
print 'foo'
elif False:
print 'bar'
if False:
print 'foo'
elif True:
print 'bar'""")))
def testIfElse(self):
self.assertEqual((0, 'foo\nbar\n'), _GrumpRun(textwrap.dedent("""\
if True:
print 'foo'
else:
print 'bar'
if False:
print 'foo'
else:
print 'bar'""")))
def testImport(self):
self.assertEqual((0, "<type 'dict'>\n"), _GrumpRun(textwrap.dedent("""\
import sys
print type(sys.modules)""")))
def testImportFutureLateRaises(self):
regexp = 'from __future__ imports must occur at the beginning of the file'
self.assertRaisesRegexp(util.ImportError, regexp, _ParseAndVisit,
'foo = bar\nfrom __future__ import print_function')
def testFutureUnicodeLiterals(self):
want = "u'foo'\n"
self.assertEqual((0, want), _GrumpRun(textwrap.dedent("""\
from __future__ import unicode_literals
print repr('foo')""")))
def testImportMember(self):
self.assertEqual((0, "<type 'dict'>\n"), _GrumpRun(textwrap.dedent("""\
from sys import modules
print type(modules)""")))
def testImportConflictingPackage(self):
self.assertEqual((0, ''), _GrumpRun(textwrap.dedent("""\
import time
from "__go__/time" import Now""")))
def testImportNative(self):
self.assertEqual((0, '1 1000000000\n'), _GrumpRun(textwrap.dedent("""\
from "__go__/time" import Nanosecond, Second
print Nanosecond, Second""")))
def testImportGrumpy(self):
self.assertEqual((0, ''), _GrumpRun(textwrap.dedent("""\
from "__go__/grumpy" import Assert
Assert(__frame__(), True, 'bad')""")))
def testImportNativeType(self):
self.assertEqual((0, "<type 'Duration'>\n"), _GrumpRun(textwrap.dedent("""\
from "__go__/time" import Duration
print Duration""")))
def testImportWildcardMemberRaises(self):
regexp = 'wildcard member import is not implemented'
self.assertRaisesRegexp(util.ImportError, regexp, _ParseAndVisit,
'from foo import *')
self.assertRaisesRegexp(util.ImportError, regexp, _ParseAndVisit,
'from "__go__/foo" import *')
def testPrintStatement(self):
self.assertEqual((0, 'abc 123\nfoo bar\n'), _GrumpRun(textwrap.dedent("""\
print 'abc',
print '123'
print 'foo', 'bar'""")))
def testPrintFunction(self):
want = "abc\n123\nabc 123\nabcx123\nabc 123 "
self.assertEqual((0, want), _GrumpRun(textwrap.dedent("""\
"module docstring is ok to proceed __future__"
from __future__ import print_function
print('abc')
print(123)
print('abc', 123)
print('abc', 123, sep='x')
print('abc', 123, end=' ')""")))
def testRaiseExitStatus(self):
self.assertEqual(1, _GrumpRun('raise Exception')[0])
def testRaiseInstance(self):
self.assertEqual((0, 'foo\n'), _GrumpRun(textwrap.dedent("""\
try:
raise RuntimeError('foo')
print 'bad'
except RuntimeError as e:
print e""")))
def testRaiseTypeAndArg(self):
self.assertEqual((0, 'foo\n'), _GrumpRun(textwrap.dedent("""\
try:
raise KeyError('foo')
print 'bad'
except KeyError as e:
print e""")))
def testRaiseAgain(self):
self.assertEqual((0, 'foo\n'), _GrumpRun(textwrap.dedent("""\
try:
try:
raise AssertionError('foo')
except AssertionError:
raise
except Exception as e:
print e""")))
def testRaiseTraceback(self):
self.assertEqual((0, ''), _GrumpRun(textwrap.dedent("""\
import sys
try:
try:
raise Exception
except:
e, _, tb = sys.exc_info()
raise e, None, tb
except:
e2, _, tb2 = sys.exc_info()
assert e is e2
assert tb is tb2""")))
def testReturn(self):
self.assertEqual((0, 'bar\n'), _GrumpRun(textwrap.dedent("""\
def foo():
return 'bar'
print foo()""")))
def testTryBareExcept(self):
self.assertEqual((0, ''), _GrumpRun(textwrap.dedent("""\
try:
raise AssertionError
except:
pass""")))
def testTryElse(self):
self.assertEqual((0, 'foo baz\n'), _GrumpRun(textwrap.dedent("""\
try:
print 'foo',
except:
print 'bar'
else:
print 'baz'""")))
def testTryMultipleExcept(self):
self.assertEqual((0, 'bar\n'), _GrumpRun(textwrap.dedent("""\
try:
raise AssertionError
except RuntimeError:
print 'foo'
except AssertionError:
print 'bar'
except:
print 'baz'""")))
def testTryFinally(self):
result = _GrumpRun(textwrap.dedent("""\
try:
print 'foo',
finally:
print 'bar'
try:
print 'foo',
raise Exception
finally:
print 'bar'"""))
self.assertEqual(1, result[0])
self.assertIn('foo bar\nfoo bar\n', result[1])
self.assertIn('Exception\n', result[1])
def testWhile(self):
self.assertEqual((0, '2\n1\n'), _GrumpRun(textwrap.dedent("""\
i = 2
while i:
print i
i -= 1""")))
def testWhileElse(self):
self.assertEqual((0, 'bar\n'), _GrumpRun(textwrap.dedent("""\
while False:
print 'foo'
else:
print 'bar'""")))
def testWith(self):
self.assertEqual((0, 'enter\n1\nexit\nenter\n2\nexit\n3\n'),
_GrumpRun(textwrap.dedent("""\
class ContextManager(object):
def __enter__(self):
print "enter"
def __exit__(self, exc_type, value, traceback):
print "exit"
a = ContextManager()
with a:
print 1
try:
with a:
print 2
raise RuntimeError
except RuntimeError:
print 3
""")))
def testWithAs(self):
self.assertEqual((0, '1 2 3\n'),
_GrumpRun(textwrap.dedent("""\
class ContextManager(object):
def __enter__(self):
return (1, (2, 3))
def __exit__(self, *args):
pass
with ContextManager() as [x, (y, z)]:
print x, y, z
""")))
def testWriteExceptDispatcherBareExcept(self):
visitor = stmt.StatementVisitor(_MakeModuleBlock())
handlers = [ast.ExceptHandler(type=ast.Name(id='foo')),
ast.ExceptHandler(type=None)]
self.assertEqual(visitor._write_except_dispatcher( # pylint: disable=protected-access
'exc', 'tb', handlers), [1, 2])
expected = re.compile(r'ResolveGlobal\(.*foo.*\bIsInstance\(.*'
r'goto Label1.*goto Label2', re.DOTALL)
self.assertRegexpMatches(visitor.writer.getvalue(), expected)
def testWriteExceptDispatcherBareExceptionNotLast(self):
visitor = stmt.StatementVisitor(_MakeModuleBlock())
handlers = [ast.ExceptHandler(type=None),
ast.ExceptHandler(type=ast.Name(id='foo'))]
self.assertRaisesRegexp(util.ParseError, r"default 'except:' must be last",
visitor._write_except_dispatcher, # pylint: disable=protected-access
'exc', 'tb', handlers)
def testWriteExceptDispatcherMultipleExcept(self):
visitor = stmt.StatementVisitor(_MakeModuleBlock())
handlers = [ast.ExceptHandler(type=ast.Name(id='foo')),
ast.ExceptHandler(type=ast.Name(id='bar'))]
self.assertEqual(visitor._write_except_dispatcher( # pylint: disable=protected-access
'exc', 'tb', handlers), [1, 2])
expected = re.compile(
r'ResolveGlobal\(.*foo.*\bif .*\bIsInstance\(.*\{.*goto Label1.*'
r'ResolveGlobal\(.*bar.*\bif .*\bIsInstance\(.*\{.*goto Label2.*'
r'\bRaise\(exc\.ToObject\(\), nil, tb\.ToObject\(\)\)', re.DOTALL)
self.assertRegexpMatches(visitor.writer.getvalue(), expected)
def _MakeModuleBlock():
return block.ModuleBlock(None, '__main__', '<test>', '',
imputil.FutureFeatures())
def _ParseAndVisit(source):
mod = pythonparser.parse(source)
_, future_features = imputil.parse_future_features(mod)
importer = imputil.Importer(None, 'foo', 'foo.py', False)
b = block.ModuleBlock(importer, '__main__', '<test>',
source, future_features)
visitor = stmt.StatementVisitor(b)
visitor.visit(mod)
return visitor
def _GrumpRun(cmd):
p = subprocess.Popen(['grumprun'], stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = p.communicate(cmd)
return p.returncode, out
if __name__ == '__main__':
shard_test.main()
| google/grumpy | compiler/stmt_test.py | Python | apache-2.0 | 16,868 | [
"VisIt"
] | ff3b46467f241915f671d931f7c95a47848eccc52d948f71d29d731340eba077 |
import sys
import cPickle as pickle
import numpy as np
def augmentVisit(visit, code, treeList):
for tree in treeList:
if code in tree:
visit.extend(tree[code][1:])
break
return
def countCooccurrenceProduct(visit, coMap):
codeSet = set(visit)
for code1 in codeSet:
for code2 in codeSet:
if code1 == code2: continue
product = visit.count(code1) * visit.count(code2)
key1 = (code1, code2)
key2 = (code2, code1)
if key1 in coMap: coMap[key1] += product
else: coMap[key1] = product
if key2 in coMap: coMap[key2] += product
else: coMap[key2] = product
if __name__=='__main__':
seqFile = sys.argv[1]
treeFile = sys.argv[2]
outFile = 'cooccurrenceMap.pk'
maxLevel = 5
seqs = pickle.load(open(seqFile, 'rb'))
treeList = [pickle.load(open(treeFile+'.level'+str(i)+'.pk', 'rb')) for i in range(1,maxLevel+1)]
coMap = {}
count = 0
for patient in seqs:
if count % 1000 == 0: print count
count += 1
for visit in patient:
for code in visit:
augmentVisit(visit, code, treeList)
countCooccurrenceProduct(visit, coMap)
pickle.dump(coMap, open(outFile, 'wb'), -1)
| mp2893/gram | create_glove_comap.py | Python | bsd-3-clause | 1,120 | [
"VisIt"
] | 814f0c6e3fc780ea4f1d124cb9fdfb07e394ef88b12cad0f27cf349135837d5c |
import sys
try:
from setuptools import setup
except ImportError as e:
print 'setuptools package is not installed'
print 'On linux install with the following command:'
print 'wget https://bootstrap.pypa.io/ez_setup.py -O - | sudo python '
print 'For more info please visit: https://pypi.python.org/pypi/setuptools'
sys.exit(1)
setup(name='pyVEP',
version='0.0.1',
description='Python interface to Variant Effect Predictor',
url='https://github.com/kantale/pyVEP',
author='Alexandros Kanterakis',
author_email='alexandros.kanterakis@gmail.com',
license='MIT',
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Topic :: Scientific/Engineering :: Bio-Informatics',
],
install_requires=[
'requests',
],
# http://stackoverflow.com/questions/3472430/how-can-i-make-setuptools-install-a-package-thats-not-on-pypi
# dependency_links=['https://github.com/counsyl/hgvs/tarball/master#egg=pyhgvs-2.0.0',],
packages=['pyVEP'],
)
| kantale/pyVEP | setup.py | Python | mit | 1,061 | [
"VisIt"
] | c830694419f99f87c22cfd67fffa37dcc7e5c406bbc9ceb753e442efe751aa48 |
# Orca
#
# Copyright (C) 2015 Igalia, S.L.
#
# Author: Joanmarie Diggs <jdiggs@igalia.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2015 Igalia, S.L."
__license__ = "LGPL"
import orca.braille as braille
import orca.scripts.toolkits.WebKitGtk as WebKitGtk
class BrailleGenerator(WebKitGtk.BrailleGenerator):
def __init__(self, script):
super().__init__(script)
self._cache = {}
def _isMessageListToggleCell(self, obj):
cached = self._cache.get(hash(obj), {})
rv = cached.get("isMessageListToggleCell")
if rv == None:
rv = self._script.utilities.isMessageListToggleCell(obj)
cached["isMessageListToggleCell"] = rv
self._cache[hash(obj)] = cached
return rv
def _generateRealActiveDescendantDisplayedText(self, obj, **args):
if self._isMessageListToggleCell(obj):
return []
return super()._generateRealActiveDescendantDisplayedText(obj, **args)
def generateBraille(self, obj, **args):
self._cache = {}
result, focusedRegion = super().generateBraille(obj, **args)
self._cache = {}
if not result or focusedRegion != result[0]:
return [result, focusedRegion]
hasObj = lambda x: isinstance(x, (braille.Component, braille.Text))
isObj = lambda x: self._script.utilities.isSameObject(obj, x.accessible)
matches = [r for r in result if hasObj(r) and isObj(r)]
if matches:
focusedRegion = matches[0]
return [result, focusedRegion]
| pvagner/orca | src/orca/scripts/apps/evolution/braille_generator.py | Python | lgpl-2.1 | 2,352 | [
"ORCA"
] | f05385e654d218eab3b340f414eebbb0afe3aff73a71a772eae4c4d04c44b3b2 |
LOEC_URL = 'https://github.com/KAMI911/loec'
def main():
import sys
import getopt
import argparse
parser = argparse.ArgumentParser(description='Update an EuroOffice Extension Creator project. Works by comparing file modification dates and overwrites files in the project that are older than the corresponding template file. The central ``myextension.py'' file is never overwritten.', epilog='To learn more or get in touch, visit the Launchpad page of LibreOffice Extension Creator at ' + LOEC_URL)
parser.add_argument('--quiet', '-q', action='store_true', help='do not ask permission for overwriting')
parser.add_argument('project', type=str, metavar='project-name', help='project name')
args = parser.parse_args()
quiet = args.quiet
project = args.project
import os
cwd = os.getcwd()
outdir = os.path.join( cwd, project )
if not os.path.exists( outdir ):
print ('%s does not exist!' % outdir)
sys.exit( 2 )
home = os.path.split( sys.argv[0] )[0]
template = os.path.join( home, 'template' )
# load vendor, prefix and url
execfile( os.path.join( outdir, 'eoec.config' ), globals(), globals() )
import LOECUtil
replicator = LOECUtil.LOECUtil(project, vendor, prefix, url, LOEC_URL)
def update( dry ):
for root, dirs, files in os.walk( template ):
for d in dirs:
d_out = os.path.join( replicator.substitute( root ).replace( template, outdir ), replicator.substitute( d ) )
if not os.path.exists( d_out ):
if dry:
print ('directory will be created: %s' % d_out)
else:
print ('Creating directory %s' % d_out)
os.mkdir( d_out )
for f in files:
if 'dontupdate' in globals() and f in dontupdate:
continue
f_in = os.path.join( root, f )
f_out = os.path.join( replicator.substitute( root ).replace( template, outdir ), replicator.substitute( f ) )
if not os.path.exists( f_out ):
if dry:
print ('file will be created: %s' % f_out)
else:
print ('creating file %s' % f_out)
contents = file( f_in, 'rb' ).read()
file( f_out, 'wb' ).write( replicator.substitute( contents ) )
elif os.stat( f_in ).st_mtime > os.stat( f_out ).st_mtime:
if f == '%extensionname%.py':
print ('skipping file: %s' % f_out)
else:
if dry:
print ('File will be overwritten: %s' % f_out)
else:
print ('Overwriting %s' % f_out)
contents = file( f_in, 'rb' ).read()
file( f_out, 'wb' ).write( replicator.substitute( contents ) )
if not quiet:
update( dry=True )
if raw_input( 'apply update? [y/N] ' ).lower().startswith( 'y' ):
update( dry=False )
else:
update( dry=False )
if __name__ == '__main__':
main()
| KAMI911/loec | update.py | Python | gpl-3.0 | 2,666 | [
"VisIt"
] | e12aa2339e6f648ea00abc6b5a23f7ea7895d087fa7851d3653db5fddfdc4971 |
#!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
from Plugins.Extensions.OpenWebif.local import tstrings
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.4'
__CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0)
__CHEETAH_genTime__ = 1453357629.892384
__CHEETAH_genTimestamp__ = 'Thu Jan 21 15:27:09 2016'
__CHEETAH_src__ = '/home/babel/Build/Test/OpenPLi5/openpli5.0/build/tmp/work/tmnanoseplus-oe-linux/enigma2-plugin-extensions-openwebif/1+gitAUTOINC+186ea358f6-r0/git/plugin/controllers/views/ajax/boxinfo.tmpl'
__CHEETAH_srcLastModified__ = 'Thu Jan 21 15:27:08 2016'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class boxinfo(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(boxinfo, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
write(u'''<!-- box_info -->
<div id="content_main">
\t<div id="info">
\t\t<h3>''')
_v = VFFSL(SL,"tstrings",True)['box_info'] # u"$tstrings['box_info']" on line 5, col 7
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['box_info']")) # from line 5, col 7.
write(u'''</h3>
\t\t<hr />
\t\t<img src="images/boxes/''')
_v = VFFSL(SL,"boximage",True) # u'${boximage}' on line 7, col 26
if _v is not None: write(_filter(_v, rawExpr=u'${boximage}')) # from line 7, col 26.
write(u'''" id="box_image" alt="box_info">
\t\t<hr />
\t\t<br/>
\t\t<table width="100%">
\t\t\t<tr>
\t\t\t\t<td width="100%">
\t\t\t\t\t<table cellspacing="0" class="infomain" >
\t\t\t\t\t\t<tr>
\t\t\t\t\t\t\t<th colspan="2" class="infoHeader">''')
_v = VFFSL(SL,"tstrings",True)['box'] # u"$tstrings['box']" on line 15, col 43
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['box']")) # from line 15, col 43.
write(u'''</th>
\t\t\t\t\t\t</tr>
\t\t\t\t\t\t<tr>
\t\t\t\t\t\t\t<td class="infoleft">''')
_v = VFFSL(SL,"tstrings",True)['brand'] # u"$tstrings['brand']" on line 18, col 29
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['brand']")) # from line 18, col 29.
write(u''':</td>
\t\t\t\t\t\t\t<td class="inforight">''')
_v = VFFSL(SL,"brand",True) # u'$brand' on line 19, col 30
if _v is not None: write(_filter(_v, rawExpr=u'$brand')) # from line 19, col 30.
write(u'''</td>
\t\t\t\t\t\t</tr>
\t\t\t\t\t\t<tr>
\t\t\t\t\t\t\t<td class="infoleft">''')
_v = VFFSL(SL,"tstrings",True)['model'] # u"$tstrings['model']" on line 22, col 29
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['model']")) # from line 22, col 29.
write(u''':</td>
\t\t\t\t\t\t\t<td class="inforight">''')
_v = VFFSL(SL,"model",True) # u'$model' on line 23, col 30
if _v is not None: write(_filter(_v, rawExpr=u'$model')) # from line 23, col 30.
write(u'''</td>
\t\t\t\t\t\t</tr>
\t\t\t\t\t\t<tr>
\t\t\t\t\t\t\t<td class="infoleft">''')
_v = VFFSL(SL,"tstrings",True)['chipset'] # u"$tstrings['chipset']" on line 26, col 29
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['chipset']")) # from line 26, col 29.
write(u''':</td>
\t\t\t\t\t\t\t<td class="inforight">''')
_v = VFFSL(SL,"chipset",True) # u'$chipset' on line 27, col 30
if _v is not None: write(_filter(_v, rawExpr=u'$chipset')) # from line 27, col 30.
write(u'''</td>
\t\t\t\t\t\t</tr>
\t\t\t\t\t\t<tr>
\t\t\t\t\t\t\t<td class="infoleft">''')
_v = VFFSL(SL,"tstrings",True)['fp_version'] # u"$tstrings['fp_version']" on line 30, col 29
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['fp_version']")) # from line 30, col 29.
write(u''':</td>
\t\t\t\t\t\t\t<td class="inforight">''')
_v = VFFSL(SL,"str",False)(VFFSL(SL,"fp_version",True)) # u'$str($fp_version)' on line 31, col 30
if _v is not None: write(_filter(_v, rawExpr=u'$str($fp_version)')) # from line 31, col 30.
write(u'''</td>
\t\t\t\t\t\t</tr>
\t\t\t\t\t\t<tr>
\t\t\t\t\t\t\t<td class="infoleft">''')
_v = VFFSL(SL,"tstrings",True)['total_memory'] # u"$tstrings['total_memory']" on line 34, col 29
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['total_memory']")) # from line 34, col 29.
write(u''':</td>
\t\t\t\t\t\t\t<td class="inforight">''')
_v = VFFSL(SL,"mem1",True) # u'$mem1' on line 35, col 30
if _v is not None: write(_filter(_v, rawExpr=u'$mem1')) # from line 35, col 30.
write(u'''</td>
\t\t\t\t\t\t</tr>
\t\t\t\t\t\t<tr>
\t\t\t\t\t\t\t<td class="infoleft">''')
_v = VFFSL(SL,"tstrings",True)['free_memory'] # u"$tstrings['free_memory']" on line 38, col 29
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['free_memory']")) # from line 38, col 29.
write(u''':</td>
\t\t\t\t\t\t\t<td class="inforight">''')
_v = VFFSL(SL,"mem2",True) # u'$mem2' on line 39, col 30
if _v is not None: write(_filter(_v, rawExpr=u'$mem2')) # from line 39, col 30.
write(u'''</td>
\t\t\t\t\t\t</tr>
\t\t\t\t\t\t<tr>
\t\t\t\t\t\t\t<td class="infoleft">''')
_v = VFFSL(SL,"tstrings",True)['box_uptime'] # u"$tstrings['box_uptime']" on line 42, col 29
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['box_uptime']")) # from line 42, col 29.
write(u''':</td>
\t\t\t\t\t\t\t<td class="inforight">''')
_v = VFFSL(SL,"uptime",True) # u'$uptime' on line 43, col 30
if _v is not None: write(_filter(_v, rawExpr=u'$uptime')) # from line 43, col 30.
write(u'''</td>
\t\t\t\t\t\t</tr>
\t\t\t\t\t</table>
\t\t\t\t</td>
\t\t\t</tr>
\t\t\t<tr>
\t\t\t\t<td width="100%">
\t\t\t\t\t<table cellspacing="0" class="infomain" >
\t\t\t\t\t\t<tr>
\t\t\t\t\t\t\t<th colspan="2" class="infoHeader">''')
_v = VFFSL(SL,"tstrings",True)['software'] # u"$tstrings['software']" on line 52, col 43
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['software']")) # from line 52, col 43.
write(u'''</th>
\t\t\t\t\t\t</tr>
\t\t\t\t\t\t<tr>
\t\t\t\t\t\t\t<td class="infoleft">''')
_v = VFFSL(SL,"tstrings",True)['oe_version'] # u"$tstrings['oe_version']" on line 55, col 29
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['oe_version']")) # from line 55, col 29.
write(u''':</td>
\t\t\t\t\t\t\t<td class="inforight">''')
_v = VFFSL(SL,"oever",True) # u'$oever' on line 56, col 30
if _v is not None: write(_filter(_v, rawExpr=u'$oever')) # from line 56, col 30.
write(u'''</td>
\t\t\t\t\t\t</tr>
\t\t\t\t\t\t<tr>
\t\t\t\t\t\t\t<td class="infoleft">''')
_v = VFFSL(SL,"tstrings",True)['distro_version'] # u"$tstrings['distro_version']" on line 59, col 29
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['distro_version']")) # from line 59, col 29.
write(u''':</td>
\t\t\t\t\t\t\t<td class="inforight">''')
_v = VFFSL(SL,"imagedistro",True) # u'$imagedistro' on line 60, col 30
if _v is not None: write(_filter(_v, rawExpr=u'$imagedistro')) # from line 60, col 30.
write(u'''</td>
\t\t\t\t\t\t</tr>
\t\t\t\t\t\t<tr>
\t\t\t\t\t\t\t<td class="infoleft">''')
_v = VFFSL(SL,"tstrings",True)['firmware_version'] # u"$tstrings['firmware_version']" on line 63, col 29
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['firmware_version']")) # from line 63, col 29.
write(u''':</td>
\t\t\t\t\t\t\t<td class="inforight">''')
_v = VFFSL(SL,"imagever",True) # u'$imagever' on line 64, col 30
if _v is not None: write(_filter(_v, rawExpr=u'$imagever')) # from line 64, col 30.
write(u'''</td>
\t\t\t\t\t\t</tr>
\t\t\t\t\t\t<tr>
\t\t\t\t\t\t\t<td class="infoleft">''')
_v = VFFSL(SL,"tstrings",True)['driver_date'] # u"$tstrings['driver_date']" on line 67, col 29
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['driver_date']")) # from line 67, col 29.
write(u''':</td>
\t\t\t\t\t\t\t<td class="inforight">''')
_v = VFFSL(SL,"driverdate",True) # u'$driverdate' on line 68, col 30
if _v is not None: write(_filter(_v, rawExpr=u'$driverdate')) # from line 68, col 30.
write(u'''</td>
\t\t\t\t\t\t</tr>
\t\t\t\t\t\t<tr>
\t\t\t\t\t\t\t<td class="infoleft">''')
_v = VFFSL(SL,"tstrings",True)['kernel_version'] # u"$tstrings['kernel_version']" on line 71, col 29
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['kernel_version']")) # from line 71, col 29.
write(u''':</td>
\t\t\t\t\t\t\t<td class="inforight">''')
_v = VFFSL(SL,"kernelver",True) # u'$kernelver' on line 72, col 30
if _v is not None: write(_filter(_v, rawExpr=u'$kernelver')) # from line 72, col 30.
write(u'''</td>
\t\t\t\t\t\t</tr>
\t\t\t\t\t\t<tr>
\t\t\t\t\t\t\t<td class="infoleft">''')
_v = VFFSL(SL,"tstrings",True)['gui_version'] # u"$tstrings['gui_version']" on line 75, col 29
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['gui_version']")) # from line 75, col 29.
write(u''':</td>
\t\t\t\t\t\t\t<td class="inforight">''')
_v = VFFSL(SL,"enigmaver",True) # u'$enigmaver' on line 76, col 30
if _v is not None: write(_filter(_v, rawExpr=u'$enigmaver')) # from line 76, col 30.
write(u'''</td>
\t\t\t\t\t\t</tr>
\t\t\t\t\t</table>
\t\t\t\t</td>
\t\t\t</tr>
\t\t\t<tr>
\t\t\t\t<td width="100%">
\t\t\t\t\t<table cellspacing="0" class="infomain" >
\t\t\t\t\t\t<tr>
\t\t\t\t\t\t\t<th colspan="2" class="infoHeader">''')
_v = VFFSL(SL,"tstrings",True)['tuners'] # u"$tstrings['tuners']" on line 85, col 43
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['tuners']")) # from line 85, col 43.
write(u'''</th>
\t\t\t\t\t\t</tr>
''')
for tuner in VFFSL(SL,"tuners",True): # generated from line 87, col 7
write(u'''\t\t\t\t\t\t<tr>
\t\t\t\t\t\t\t<td class="infoleft">''')
_v = VFFSL(SL,"tuner.name",True) # u'$tuner.name' on line 89, col 29
if _v is not None: write(_filter(_v, rawExpr=u'$tuner.name')) # from line 89, col 29.
write(u''':</td>
\t\t\t\t\t\t\t<td class="inforight">''')
_v = VFFSL(SL,"tuner.type",True) # u'$tuner.type' on line 90, col 30
if _v is not None: write(_filter(_v, rawExpr=u'$tuner.type')) # from line 90, col 30.
write(u'''</td>
\t\t\t\t\t\t</tr>
''')
write(u'''\t\t\t\t\t</table>
\t\t\t\t</td>
\t\t\t</tr>
''')
for hd in VFFSL(SL,"hdd",True): # generated from line 96, col 4
write(u'''\t\t\t<tr>
\t\t\t\t<td width="100%">
\t\t\t\t\t<table cellspacing="0" class="infomain" >
\t\t\t\t\t\t<tr>
\t\t\t\t\t\t\t<th colspan="2" class="infoHeader">''')
_v = VFFSL(SL,"tstrings",True)['hdd_model'] # u"$tstrings['hdd_model']" on line 101, col 43
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['hdd_model']")) # from line 101, col 43.
write(u''': ''')
_v = VFFSL(SL,"hd.model",True) # u'$hd.model' on line 101, col 67
if _v is not None: write(_filter(_v, rawExpr=u'$hd.model')) # from line 101, col 67.
write(u'''</th>
\t\t\t\t\t\t</tr>
\t\t\t\t\t\t<tr>
\t\t\t\t\t\t\t<td class="infoleft">''')
_v = VFFSL(SL,"tstrings",True)['capacity'] # u"$tstrings['capacity']" on line 104, col 29
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['capacity']")) # from line 104, col 29.
write(u''':</td>
\t\t\t\t\t\t\t<td class="inforight">''')
_v = VFFSL(SL,"hd.capacity",True) # u'$hd.capacity' on line 105, col 30
if _v is not None: write(_filter(_v, rawExpr=u'$hd.capacity')) # from line 105, col 30.
write(u''' ("''')
_v = VFFSL(SL,"hd.labelled_capacity",True) # u'$hd.labelled_capacity' on line 105, col 45
if _v is not None: write(_filter(_v, rawExpr=u'$hd.labelled_capacity')) # from line 105, col 45.
write(u'''")</td>
\t\t\t\t\t\t</tr>
\t\t\t\t\t\t<tr>
\t\t\t\t\t\t\t<td class="infoleft">''')
_v = VFFSL(SL,"tstrings",True)['free'] # u"$tstrings['free']" on line 108, col 29
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['free']")) # from line 108, col 29.
write(u''':</td>
\t\t\t\t\t\t\t<td class="inforight">''')
_v = VFFSL(SL,"hd.free",True) # u'$hd.free' on line 109, col 30
if _v is not None: write(_filter(_v, rawExpr=u'$hd.free')) # from line 109, col 30.
write(u'''</td>
\t\t\t\t\t\t</tr>
\t\t\t\t\t</table>
\t\t\t\t</td>
\t\t\t</tr>
''')
for iface in VFFSL(SL,"ifaces",True): # generated from line 115, col 4
write(u'''\t\t\t<tr>
\t\t\t\t<td width="100%">
\t\t\t\t\t<table cellspacing="0" class="infomain" >
\t\t\t\t\t\t<tr>
\t\t\t\t\t\t\t<th colspan="2" class="infoHeader">''')
_v = VFFSL(SL,"tstrings",True)['network_interface'] # u"$tstrings['network_interface']" on line 120, col 43
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['network_interface']")) # from line 120, col 43.
write(u''': ''')
_v = VFFSL(SL,"iface.name",True) # u'$iface.name' on line 120, col 75
if _v is not None: write(_filter(_v, rawExpr=u'$iface.name')) # from line 120, col 75.
write(u'''</th>
\t\t\t\t\t\t</tr>
\t\t\t\t\t\t<tr>
\t\t\t\t\t\t\t<td class="infoleft">''')
_v = VFFSL(SL,"tstrings",True)['dhcp'] # u"$tstrings['dhcp']" on line 123, col 29
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['dhcp']")) # from line 123, col 29.
write(u''':</td>
\t\t\t\t\t\t\t<td class="inforight">''')
_v = VFFSL(SL,"iface.dhcp",True) # u'$iface.dhcp' on line 124, col 30
if _v is not None: write(_filter(_v, rawExpr=u'$iface.dhcp')) # from line 124, col 30.
write(u'''</td>
\t\t\t\t\t\t</tr>
\t\t\t\t\t\t<tr>
\t\t\t\t\t\t\t<td class="infoleft">''')
_v = VFFSL(SL,"tstrings",True)['ip_address'] # u"$tstrings['ip_address']" on line 127, col 29
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['ip_address']")) # from line 127, col 29.
write(u''':</td>
\t\t\t\t\t\t\t<td class="inforight">''')
_v = VFFSL(SL,"iface.ip",True) # u'$iface.ip' on line 128, col 30
if _v is not None: write(_filter(_v, rawExpr=u'$iface.ip')) # from line 128, col 30.
write(u'''/''')
_v = VFFSL(SL,"iface.v4prefix",True) # u'$iface.v4prefix' on line 128, col 40
if _v is not None: write(_filter(_v, rawExpr=u'$iface.v4prefix')) # from line 128, col 40.
write(u'''</td>
\t\t\t\t\t\t</tr>
\t\t\t\t\t\t<tr>
\t\t\t\t\t\t\t<td class="infoleft">''')
_v = VFFSL(SL,"tstrings",True)['subnet_mask'] # u"$tstrings['subnet_mask']" on line 131, col 29
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['subnet_mask']")) # from line 131, col 29.
write(u''':</td>
\t\t\t\t\t\t\t<td class="inforight">''')
_v = VFFSL(SL,"iface.mask",True) # u'$iface.mask' on line 132, col 30
if _v is not None: write(_filter(_v, rawExpr=u'$iface.mask')) # from line 132, col 30.
write(u'''</td>
\t\t\t\t\t\t</tr>
\t\t\t\t\t\t<tr>
\t\t\t\t\t\t\t<td class="infoleft">''')
_v = VFFSL(SL,"tstrings",True)['gateway'] # u"$tstrings['gateway']" on line 135, col 29
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['gateway']")) # from line 135, col 29.
write(u''':</td>
\t\t\t\t\t\t\t<td class="inforight">''')
_v = VFFSL(SL,"iface.gw",True) # u'$iface.gw' on line 136, col 30
if _v is not None: write(_filter(_v, rawExpr=u'$iface.gw')) # from line 136, col 30.
write(u'''</td>
\t\t\t\t\t\t</tr>
\t\t\t\t\t\t<tr>
\t\t\t\t\t\t\t<td class="infoleft">''')
_v = VFFSL(SL,"tstrings",True)['mac_address'] # u"$tstrings['mac_address']" on line 139, col 29
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['mac_address']")) # from line 139, col 29.
write(u''':</td>
\t\t\t\t\t\t\t<td class="inforight">''')
_v = VFFSL(SL,"iface.mac",True) # u'$iface.mac' on line 140, col 30
if _v is not None: write(_filter(_v, rawExpr=u'$iface.mac')) # from line 140, col 30.
write(u'''</td>
\t\t\t\t\t\t</tr>
\t\t\t\t\t\t<tr>
\t\t\t\t\t\t\t<td class="infoleft">''')
_v = VFFSL(SL,"tstrings",True)['ipv6_address'] # u"$tstrings['ipv6_address']" on line 143, col 29
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['ipv6_address']")) # from line 143, col 29.
write(u''':</td>
\t\t\t\t\t\t\t<td class="inforight">''')
_v = VFFSL(SL,"iface.ipv6",True) # u'$iface.ipv6' on line 144, col 30
if _v is not None: write(_filter(_v, rawExpr=u'$iface.ipv6')) # from line 144, col 30.
write(u'''</td>
\t\t\t\t\t\t</tr>
\t\t\t\t\t</table>
\t\t\t\t</td>
\t\t\t</tr>
''')
write(u'''\t\t</table>
\t</div>
</div>\t
<!-- /box_info -->
''')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_boxinfo= 'respond'
## END CLASS DEFINITION
if not hasattr(boxinfo, '_initCheetahAttributes'):
templateAPIClass = getattr(boxinfo, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(boxinfo)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=boxinfo()).run()
| MOA-2011/e2openplugin-OpenWebif | plugin/controllers/views/ajax/boxinfo.py | Python | gpl-2.0 | 20,479 | [
"VisIt"
] | 4d07dd313cdf9e3fa3bc6cc74082d14f38328e3102450abb15ff0b46fb578257 |
"""
file kept for reference only. Do NOT use this file. use dropbox.py instead
as all methods are now in there
"""
# Include the Dropbox SDK libraries
from dropbox import client, rest, session
# Get your app key and secret from the Dropbox developer website
APP_KEY = 'y7cxubkm19o3f9b'
APP_SECRET = '8rguqnx7oqwjqtm'
# ACCESS_TYPE should be 'dropbox' or 'app_folder' as configured for your app
ACCESS_TYPE = 'app_folder'
sess = session.DropboxSession(APP_KEY, APP_SECRET, ACCESS_TYPE)
request_token = sess.obtain_request_token()
url = sess.build_authorize_url(request_token)
print "This program is to receive a token to allow dropbox to upload content to your Dropbox"
print "Do this then delete content of your dropbox_token.txt!!!"
print
print "READ Warning above. Press Enter to Continue"
raw_input()
# Make the user sign in and authorize this token
print "url:", url
print "Please visit this website and press the 'Allow' button, then hit 'Enter' here."
raw_input()
# This will fail if the user didn't visit the above URL and hit 'Allow'
access_token = sess.obtain_access_token(request_token)
#Okay, now we are ready to save the access_token
TOKENS = 'dropbox_token.txt'
token_file = open(TOKENS, 'w')
token_file.write("%s|%s" % (access_token.key,access_token.secret))
token_file.close()
print "you are now ready to use the token in your application"
| levibostian/VSAS | leviTesting/dropbox_sdk/testScripts/request_dropbox_token.py | Python | mit | 1,367 | [
"VisIt"
] | 115331f2211cac7ff576fe801b43194173c15bdde91a2b96917566c79d338e90 |
########################################################################
# This program is copyright (c) Upinder S. Bhalla, NCBS, 2015.
# It is licenced under the GPL 2.1 or higher.
# There is no warranty of any kind. You are welcome to make copies under
# the provisions of the GPL.
# This programme illustrates building a panel of multiscale models to
# test neuronal plasticity in different contexts. The simulation is set
# to settle for 5 seconds, then a 2 second tetanus is delivered, then
# the simulation continues for another 50 seconds.
# By default we set it to run the smallest model, that takes about 4 minutes
# to run 57 seconds of simulation time, on an Intel core I7 at
# 2.2 GHz. The big model, VHC-neuron, takes almost 90 minutes.
# This program dumps data to text files for further analysis.
########################################################################
import moogli
import numpy
import time
import pylab
import moose
from moose import neuroml
from PyQt4 import Qt, QtCore, QtGui
import matplotlib.pyplot as plt
import sys
import os
from moose.neuroml.ChannelML import ChannelML
sys.path.append('/home/bhalla/moose/trunk/Demos/util')
import rdesigneur as rd
PI = 3.14159265359
useGssa = True
combineSegments = False
#### Choose your favourite model here. #################
elecFileNames = ( "ca1_minimal.p", )
#elecFileNames = ( "ca1_minimal.p", "h10.CNG.swc" )
#elecFileNames = ( "CA1.morph.xml", "ca1_minimal.p", "VHC-neuron.CNG.swc", "h10.CNG.swc" )
synSpineList = []
synDendList = []
probeInterval = 0.1
probeAmplitude = 1.0
tetanusFrequency = 100.0
tetanusAmplitude = 1000
tetanusAmplitudeForSpines = 1000
baselineTime = 5
tetTime = 2
postTetTime = 50
def buildRdesigneur():
##################################################################
# Here we define which prototypes are to be loaded in to the system.
# Each specification has the format
# source [localName]
# source can be any of
# filename.extension, # Identify type of file by extension, load it.
# function(), # func( name ) builds object of specified name
# file.py:function() , # load Python file, run function(name) in it.
# moose.Classname # Make obj moose.Classname, assign to name.
# path # Already loaded into library or on path.
# After loading the prototypes, there should be an object called 'name'
# in the library.
##################################################################
chanProto = [
['./chans/hd.xml'], \
['./chans/kap.xml'], \
['./chans/kad.xml'], \
['./chans/kdr.xml'], \
['./chans/na3.xml'], \
['./chans/nax.xml'], \
['./chans/CaConc.xml'], \
['./chans/Ca.xml'], \
['./chans/NMDA.xml'], \
['./chans/Glu.xml'] \
]
spineProto = [ \
['makeSpineProto()', 'spine' ]
]
chemProto = [ \
['./chem/' + 'psd53.g', 'ltpModel'] \
]
##################################################################
# Here we define what goes where, and any parameters. Each distribution
# has the format
# protoName, path, field, expr, [field, expr]...
# where
# protoName identifies the prototype to be placed on the cell
# path is a MOOSE wildcard path specifying where to put things
# field is the field to assign.
# expr is a math expression to define field value. This uses the
# muParser. Built-in variables are p, g, L, len, dia.
# The muParser provides most math functions, and the Heaviside
# function H(x) = 1 for x > 0 is also provided.
##################################################################
passiveDistrib = [
[ ".", "#", "RM", "2.8", "CM", "0.01", "RA", "1.5", \
"Em", "-58e-3", "initVm", "-65e-3" ], \
[ ".", "#axon#", "RA", "0.5" ] \
]
chanDistrib = [ \
["hd", "#dend#,#apical#", "Gbar", "5e-2*(1+(p*3e4))" ], \
["kdr", "#", "Gbar", "p < 50e-6 ? 500 : 100" ], \
["na3", "#soma#,#dend#,#apical#", "Gbar", "250" ], \
["nax", "#soma#,#axon#", "Gbar", "1250" ], \
["kap", "#axon#,#soma#", "Gbar", "300" ], \
["kap", "#dend#,#apical#", "Gbar", \
"300*(H(100-p*1e6)) * (1+(p*1e4))" ], \
["Ca_conc", "#dend#,#apical#", "tau", "0.0133" ], \
["kad", "#soma#,#dend#,#apical#", "Gbar", \
"300*H(p - 100e-6)*(1+p*1e4)" ], \
["Ca", "#dend#,#apical#", "Gbar", "50" ], \
["glu", "#dend#,#apical#", "Gbar", "200*H(p-200e-6)" ], \
["NMDA", "#dend#,#apical#", "Gbar", "2*H(p-200e-6)" ] \
]
spineDistrib = [ \
["spine", '#apical#', "spineSpacing", "20e-6", \
"spineSpacingDistrib", "2e-6", \
"angle", "0", \
"angleDistrib", str( 2*PI ), \
"size", "1", \
"sizeDistrib", "0.5" ] \
]
chemDistrib = [ \
[ "ltpModel", "#apical#", "install", "1"]
]
######################################################################
# Here we define the mappings across scales. Format:
# sourceObj sourceField destObj destField couplingExpr [wildcard][spatialExpn]
# where the coupling expression is anything a muParser can evaluate,
# using the input variable x. For example: 8e-5 + 300*x
# For now, let's use existing adaptors which take an offset and scale.
######################################################################
adaptorList = [
[ 'Ca_conc', 'Ca', 'psd/Ca_input', 'concInit', 8e-5, 1 ],
[ 'Ca_conc', 'Ca', 'dend/Ca_dend_input', 'concInit', 8e-5, 1 ],
[ 'psd/tot_PSD_R', 'n', 'glu', 'Gbar', 0, 0.01 ],
]
######################################################################
# Having defined everything, now to create the rdesigneur and proceed
# with creating the model.
######################################################################
rdes = rd.rdesigneur(
useGssa = useGssa, \
combineSegments = combineSegments, \
stealCellFromLibrary = True, \
passiveDistrib = passiveDistrib, \
spineDistrib = spineDistrib, \
chanDistrib = chanDistrib, \
chemDistrib = chemDistrib, \
spineProto = spineProto, \
chanProto = chanProto, \
chemProto = chemProto, \
adaptorList = adaptorList
)
return rdes
def buildPlots( rdes ):
numPlots = 10
caPsd = moose.vec( '/model/chem/psd/Ca' )
caHead = moose.vec( '/model/chem/spine/Ca' )
psdR = moose.vec( '/model/chem/psd/tot_PSD_R' )
numSpines = rdes.spineCompt.mesh.num
assert( 2 * numSpines == len( rdes.spineComptElist ) )
if not moose.exists( '/graphs' ):
moose.Neutral( '/graphs' )
assert( len( caPsd ) == numSpines )
assert( len( caHead ) == numSpines )
if numSpines < numPlots:
caPsdTab = moose.Table2( '/graphs/caPsdTab', numSpines ).vec
caHeadTab = moose.Table2( '/graphs/caHeadTab', numSpines ).vec
psdRtab = moose.Table2( '/graphs/psdRtab', numSpines ).vec
for i in range( numSpines ):
moose.connect( caPsdTab[i], 'requestOut', caPsd[i], 'getConc' )
moose.connect( caHeadTab[i], 'requestOut', caHead[i], 'getConc')
moose.connect( psdRtab[i], 'requestOut', psdR[i], 'getN' )
else:
caPsdTab = moose.Table2( '/graphs/caPsdTab', numPlots ).vec
caHeadTab = moose.Table2( '/graphs/caHeadTab', numPlots ).vec
psdRtab = moose.Table2( '/graphs/psdRtab', numPlots ).vec
dx = numSpines / numPlots
for i in range( numPlots ):
moose.connect( caPsdTab[i], 'requestOut', caPsd[i*dx], 'getConc' )
moose.connect( caHeadTab[i], 'requestOut', caHead[i*dx], 'getConc' )
moose.connect( psdRtab[i], 'requestOut', psdR[i*dx], 'getN' )
vtab = moose.Table( '/graphs/vtab' )
moose.connect( vtab, 'requestOut', rdes.soma, 'getVm' )
eSpineCaTab = moose.Table( '/graphs/eSpineCaTab' )
path = rdes.spineComptElist[1].path + "/Ca_conc"
moose.connect( eSpineCaTab, 'requestOut', path, 'getCa' )
eSpineVmTab = moose.Table( '/graphs/eSpineVmTab' )
moose.connect( eSpineVmTab, 'requestOut', rdes.spineComptElist[1], 'getVm' )
eSpineGkTab = moose.Table( '/graphs/eSpineGkTab' )
path = rdes.spineComptElist[1].path + "/NMDA"
moose.connect( eSpineGkTab, 'requestOut', path, 'getGk' )
def saveAndClearPlots( name ):
print 'saveAndClearPlots( ', name, ' )'
for i in moose.wildcardFind( "/graphs/#" ):
#plot stuff
i.xplot( name + '.xplot', i.name )
moose.delete( "/graphs" )
def printPsd( name ):
# Print the vol, the path dist from soma, the electrotonic dist, and N
psdR = moose.vec( '/model/chem/psd/tot_PSD_R' )
neuronVoxel = moose.element( '/model/chem/spine' ).neuronVoxel
elecComptMap = moose.element( '/model/chem/dend' ).elecComptMap
print "len( neuronVoxel = ", len( neuronVoxel), min( neuronVoxel), max( neuronVoxel)
print len( elecComptMap), elecComptMap[0], elecComptMap[12]
neuron = moose.element( '/model/elec' )
ncompts = neuron.compartments
d = {}
j = 0
for i in ncompts:
#print i
d[i] = j
j += 1
f = open( name + ".txt", 'w' )
for i in range( len( psdR ) ):
n = psdR[i].n
conc = psdR[i].conc
vol = psdR[i].volume
compt = elecComptMap[ neuronVoxel[i] ]
#print compt
segIndex = d[compt[0]]
p = neuron.geometricalDistanceFromSoma[ segIndex ]
L = neuron.electrotonicDistanceFromSoma[ segIndex ]
s = str( i ) + " " + str(n) + " " + str( conc ) + " " + str(p) + " " + str(L) + "\n"
f.write( s )
f.close()
def probeStimulus( time ):
for t in numpy.arange( 0, time, probeInterval ):
moose.start( probeInterval )
for i in synSpineList:
i.activation( probeAmplitude )
def tetanicStimulus( time ):
tetInterval = 1.0/tetanusFrequency
for t in numpy.arange( 0, time, tetInterval ):
moose.start( tetInterval )
for i in synDendList:
i.activation( tetanusAmplitude )
for i in synSpineList:
i.activation( tetanusAmplitudeForSpines )
def main():
global synSpineList
global synDendList
numpy.random.seed( 1234 )
rdes = buildRdesigneur()
for i in elecFileNames:
print i
rdes.cellProtoList = [ ['./cells/' + i, 'elec'] ]
rdes.buildModel( '/model' )
assert( moose.exists( '/model' ) )
synSpineList = moose.wildcardFind( "/model/elec/#head#/glu,/model/elec/#head#/NMDA" )
temp = set( moose.wildcardFind( "/model/elec/#/glu,/model/elec/#/NMDA" ) )
synDendList = list( temp - set( synSpineList ) )
moose.reinit()
buildPlots( rdes )
# Run for baseline, tetanus, and post-tetanic settling time
t1 = time.time()
probeStimulus( baselineTime )
tetanicStimulus( tetTime )
probeStimulus( postTetTime )
print 'real time = ', time.time() - t1
printPsd( i + ".fig5" )
saveAndClearPlots( i + ".fig5" )
moose.delete( '/model' )
rdes.elecid = moose.element( '/' )
if __name__ == '__main__':
main()
| dilawar/moose-full | moose-examples/paper-2015/Fig5_CellMultiscale/Fig5BCD.py | Python | gpl-2.0 | 11,428 | [
"MOOSE",
"NEURON"
] | 345924cc8bd3386b0a10a4a9f9d29b3694306dba390a8a5a1d7549e46e20bc4f |
from sqlalchemy.sql import table, column, ClauseElement, operators
from sqlalchemy.sql.expression import _clone, _from_objects
from sqlalchemy import func, select, Integer, Table, \
Column, MetaData, extract, String, bindparam, tuple_, and_, union, text,\
case, ForeignKey, literal_column
from sqlalchemy.testing import fixtures, AssertsExecutionResults, \
AssertsCompiledSQL
from sqlalchemy import testing
from sqlalchemy.sql.visitors import ClauseVisitor, CloningVisitor, \
cloned_traverse, ReplacingCloningVisitor
from sqlalchemy import exc
from sqlalchemy.sql import util as sql_util
from sqlalchemy.testing import eq_, is_, is_not_, assert_raises, assert_raises_message
A = B = t1 = t2 = t3 = table1 = table2 = table3 = table4 = None
class TraversalTest(fixtures.TestBase, AssertsExecutionResults):
"""test ClauseVisitor's traversal, particularly its
ability to copy and modify a ClauseElement in place."""
@classmethod
def setup_class(cls):
global A, B
# establish two fictitious ClauseElements.
# define deep equality semantics as well as deep
# identity semantics.
class A(ClauseElement):
__visit_name__ = 'a'
def __init__(self, expr):
self.expr = expr
def is_other(self, other):
return other is self
__hash__ = ClauseElement.__hash__
def __eq__(self, other):
return other.expr == self.expr
def __ne__(self, other):
return other.expr != self.expr
def __str__(self):
return "A(%s)" % repr(self.expr)
class B(ClauseElement):
__visit_name__ = 'b'
def __init__(self, *items):
self.items = items
def is_other(self, other):
if other is not self:
return False
for i1, i2 in zip(self.items, other.items):
if i1 is not i2:
return False
return True
__hash__ = ClauseElement.__hash__
def __eq__(self, other):
for i1, i2 in zip(self.items, other.items):
if i1 != i2:
return False
return True
def __ne__(self, other):
for i1, i2 in zip(self.items, other.items):
if i1 != i2:
return True
return False
def _copy_internals(self, clone=_clone):
self.items = [clone(i) for i in self.items]
def get_children(self, **kwargs):
return self.items
def __str__(self):
return "B(%s)" % repr([str(i) for i in self.items])
def test_test_classes(self):
a1 = A("expr1")
struct = B(a1, A("expr2"), B(A("expr1b"), A("expr2b")), A("expr3"))
struct2 = B(a1, A("expr2"), B(A("expr1b"), A("expr2b")), A("expr3"))
struct3 = B(a1, A("expr2"), B(A("expr1b"),
A("expr2bmodified")), A("expr3"))
assert a1.is_other(a1)
assert struct.is_other(struct)
assert struct == struct2
assert struct != struct3
assert not struct.is_other(struct2)
assert not struct.is_other(struct3)
def test_clone(self):
struct = B(A("expr1"), A("expr2"), B(A("expr1b"),
A("expr2b")), A("expr3"))
class Vis(CloningVisitor):
def visit_a(self, a):
pass
def visit_b(self, b):
pass
vis = Vis()
s2 = vis.traverse(struct)
assert struct == s2
assert not struct.is_other(s2)
def test_no_clone(self):
struct = B(A("expr1"), A("expr2"), B(A("expr1b"),
A("expr2b")), A("expr3"))
class Vis(ClauseVisitor):
def visit_a(self, a):
pass
def visit_b(self, b):
pass
vis = Vis()
s2 = vis.traverse(struct)
assert struct == s2
assert struct.is_other(s2)
def test_clone_anon_label(self):
from sqlalchemy.sql.elements import Grouping
c1 = Grouping(literal_column('q'))
s1 = select([c1])
class Vis(CloningVisitor):
def visit_grouping(self, elem):
pass
vis = Vis()
s2 = vis.traverse(s1)
eq_(list(s2.inner_columns)[0].anon_label, c1.anon_label)
def test_change_in_place(self):
struct = B(A("expr1"), A("expr2"), B(A("expr1b"),
A("expr2b")), A("expr3"))
struct2 = B(A("expr1"), A("expr2modified"), B(A("expr1b"),
A("expr2b")), A("expr3"))
struct3 = B(A("expr1"), A("expr2"), B(A("expr1b"),
A("expr2bmodified")), A("expr3"))
class Vis(CloningVisitor):
def visit_a(self, a):
if a.expr == "expr2":
a.expr = "expr2modified"
def visit_b(self, b):
pass
vis = Vis()
s2 = vis.traverse(struct)
assert struct != s2
assert not struct.is_other(s2)
assert struct2 == s2
class Vis2(CloningVisitor):
def visit_a(self, a):
if a.expr == "expr2b":
a.expr = "expr2bmodified"
def visit_b(self, b):
pass
vis2 = Vis2()
s3 = vis2.traverse(struct)
assert struct != s3
assert struct3 == s3
def test_visit_name(self):
# override fns in testlib/schema.py
from sqlalchemy import Column
class CustomObj(Column):
pass
assert CustomObj.__visit_name__ == Column.__visit_name__ == 'column'
foo, bar = CustomObj('foo', String), CustomObj('bar', String)
bin = foo == bar
set(ClauseVisitor().iterate(bin))
assert set(ClauseVisitor().iterate(bin)) == set([foo, bar, bin])
class BinaryEndpointTraversalTest(fixtures.TestBase):
"""test the special binary product visit"""
def _assert_traversal(self, expr, expected):
canary = []
def visit(binary, l, r):
canary.append((binary.operator, l, r))
print(binary.operator, l, r)
sql_util.visit_binary_product(visit, expr)
eq_(
canary, expected
)
def test_basic(self):
a, b = column("a"), column("b")
self._assert_traversal(
a == b,
[
(operators.eq, a, b)
]
)
def test_with_tuples(self):
a, b, c, d, b1, b1a, b1b, e, f = (
column("a"),
column("b"),
column("c"),
column("d"),
column("b1"),
column("b1a"),
column("b1b"),
column("e"),
column("f")
)
expr = tuple_(
a, b, b1 == tuple_(b1a, b1b == d), c
) > tuple_(
func.go(e + f)
)
self._assert_traversal(
expr,
[
(operators.gt, a, e),
(operators.gt, a, f),
(operators.gt, b, e),
(operators.gt, b, f),
(operators.eq, b1, b1a),
(operators.eq, b1b, d),
(operators.gt, c, e),
(operators.gt, c, f)
]
)
def test_composed(self):
a, b, e, f, q, j, r = (
column("a"),
column("b"),
column("e"),
column("f"),
column("q"),
column("j"),
column("r"),
)
expr = and_(
(a + b) == q + func.sum(e + f),
and_(
j == r,
f == q
)
)
self._assert_traversal(
expr,
[
(operators.eq, a, q),
(operators.eq, a, e),
(operators.eq, a, f),
(operators.eq, b, q),
(operators.eq, b, e),
(operators.eq, b, f),
(operators.eq, j, r),
(operators.eq, f, q),
]
)
def test_subquery(self):
a, b, c = column("a"), column("b"), column("c")
subq = select([c]).where(c == a).as_scalar()
expr = and_(a == b, b == subq)
self._assert_traversal(
expr,
[
(operators.eq, a, b),
(operators.eq, b, subq),
]
)
class ClauseTest(fixtures.TestBase, AssertsCompiledSQL):
"""test copy-in-place behavior of various ClauseElements."""
__dialect__ = 'default'
@classmethod
def setup_class(cls):
global t1, t2, t3
t1 = table("table1",
column("col1"),
column("col2"),
column("col3"),
)
t2 = table("table2",
column("col1"),
column("col2"),
column("col3"),
)
t3 = Table('table3', MetaData(),
Column('col1', Integer),
Column('col2', Integer)
)
def test_binary(self):
clause = t1.c.col2 == t2.c.col2
eq_(str(clause), str(CloningVisitor().traverse(clause)))
def test_binary_anon_label_quirk(self):
t = table('t1', column('col1'))
f = t.c.col1 * 5
self.assert_compile(select([f]),
"SELECT t1.col1 * :col1_1 AS anon_1 FROM t1")
f.anon_label
a = t.alias()
f = sql_util.ClauseAdapter(a).traverse(f)
self.assert_compile(
select(
[f]),
"SELECT t1_1.col1 * :col1_1 AS anon_1 FROM t1 AS t1_1")
def test_join(self):
clause = t1.join(t2, t1.c.col2 == t2.c.col2)
c1 = str(clause)
assert str(clause) == str(CloningVisitor().traverse(clause))
class Vis(CloningVisitor):
def visit_binary(self, binary):
binary.right = t2.c.col3
clause2 = Vis().traverse(clause)
assert c1 == str(clause)
assert str(clause2) == str(t1.join(t2, t1.c.col2 == t2.c.col3))
def test_aliased_column_adapt(self):
clause = t1.select()
aliased = t1.select().alias()
aliased2 = t1.alias()
adapter = sql_util.ColumnAdapter(aliased)
f = select([
adapter.columns[c]
for c in aliased2.c
]).select_from(aliased)
s = select([aliased2]).select_from(aliased)
eq_(str(s), str(f))
f = select([
adapter.columns[func.count(aliased2.c.col1)]
]).select_from(aliased)
eq_(
str(select([func.count(aliased2.c.col1)]).select_from(aliased)),
str(f)
)
def test_aliased_cloned_column_adapt_inner(self):
clause = select([t1.c.col1, func.foo(t1.c.col2).label('foo')])
aliased1 = select([clause.c.col1, clause.c.foo])
aliased2 = clause
aliased2.c.col1, aliased2.c.foo
aliased3 = cloned_traverse(aliased2, {}, {})
# fixed by [ticket:2419]. the inside columns
# on aliased3 have _is_clone_of pointers to those of
# aliased2. corresponding_column checks these
# now.
adapter = sql_util.ColumnAdapter(aliased1)
f1 = select([
adapter.columns[c]
for c in aliased2._raw_columns
])
f2 = select([
adapter.columns[c]
for c in aliased3._raw_columns
])
eq_(
str(f1), str(f2)
)
def test_aliased_cloned_column_adapt_exported(self):
clause = select([t1.c.col1, func.foo(t1.c.col2).label('foo')])
aliased1 = select([clause.c.col1, clause.c.foo])
aliased2 = clause
aliased2.c.col1, aliased2.c.foo
aliased3 = cloned_traverse(aliased2, {}, {})
# also fixed by [ticket:2419]. When we look at the
# *outside* columns of aliased3, they previously did not
# have an _is_clone_of pointer. But we now modified _make_proxy
# to assign this.
adapter = sql_util.ColumnAdapter(aliased1)
f1 = select([
adapter.columns[c]
for c in aliased2.c
])
f2 = select([
adapter.columns[c]
for c in aliased3.c
])
eq_(
str(f1), str(f2)
)
def test_aliased_cloned_schema_column_adapt_exported(self):
clause = select([t3.c.col1, func.foo(t3.c.col2).label('foo')])
aliased1 = select([clause.c.col1, clause.c.foo])
aliased2 = clause
aliased2.c.col1, aliased2.c.foo
aliased3 = cloned_traverse(aliased2, {}, {})
# also fixed by [ticket:2419]. When we look at the
# *outside* columns of aliased3, they previously did not
# have an _is_clone_of pointer. But we now modified _make_proxy
# to assign this.
adapter = sql_util.ColumnAdapter(aliased1)
f1 = select([
adapter.columns[c]
for c in aliased2.c
])
f2 = select([
adapter.columns[c]
for c in aliased3.c
])
eq_(
str(f1), str(f2)
)
def test_text(self):
clause = text(
"select * from table where foo=:bar",
bindparams=[bindparam('bar')])
c1 = str(clause)
class Vis(CloningVisitor):
def visit_textclause(self, text):
text.text = text.text + " SOME MODIFIER=:lala"
text._bindparams['lala'] = bindparam('lala')
clause2 = Vis().traverse(clause)
assert c1 == str(clause)
assert str(clause2) == c1 + " SOME MODIFIER=:lala"
assert list(clause._bindparams.keys()) == ['bar']
assert set(clause2._bindparams.keys()) == set(['bar', 'lala'])
def test_select(self):
s2 = select([t1])
s2_assert = str(s2)
s3_assert = str(select([t1], t1.c.col2 == 7))
class Vis(CloningVisitor):
def visit_select(self, select):
select.append_whereclause(t1.c.col2 == 7)
s3 = Vis().traverse(s2)
assert str(s3) == s3_assert
assert str(s2) == s2_assert
print(str(s2))
print(str(s3))
class Vis(ClauseVisitor):
def visit_select(self, select):
select.append_whereclause(t1.c.col2 == 7)
Vis().traverse(s2)
assert str(s2) == s3_assert
s4_assert = str(select([t1], and_(t1.c.col2 == 7, t1.c.col3 == 9)))
class Vis(CloningVisitor):
def visit_select(self, select):
select.append_whereclause(t1.c.col3 == 9)
s4 = Vis().traverse(s3)
print(str(s3))
print(str(s4))
assert str(s4) == s4_assert
assert str(s3) == s3_assert
s5_assert = str(select([t1], and_(t1.c.col2 == 7, t1.c.col1 == 9)))
class Vis(CloningVisitor):
def visit_binary(self, binary):
if binary.left is t1.c.col3:
binary.left = t1.c.col1
binary.right = bindparam("col1", unique=True)
s5 = Vis().traverse(s4)
print(str(s4))
print(str(s5))
assert str(s5) == s5_assert
assert str(s4) == s4_assert
def test_union(self):
u = union(t1.select(), t2.select())
u2 = CloningVisitor().traverse(u)
assert str(u) == str(u2)
assert [str(c) for c in u2.c] == [str(c) for c in u.c]
u = union(t1.select(), t2.select())
cols = [str(c) for c in u.c]
u2 = CloningVisitor().traverse(u)
assert str(u) == str(u2)
assert [str(c) for c in u2.c] == cols
s1 = select([t1], t1.c.col1 == bindparam('id_param'))
s2 = select([t2])
u = union(s1, s2)
u2 = u.params(id_param=7)
u3 = u.params(id_param=10)
assert str(u) == str(u2) == str(u3)
assert u2.compile().params == {'id_param': 7}
assert u3.compile().params == {'id_param': 10}
def test_in(self):
expr = t1.c.col1.in_(['foo', 'bar'])
expr2 = CloningVisitor().traverse(expr)
assert str(expr) == str(expr2)
def test_over(self):
expr = func.row_number().over(order_by=t1.c.col1)
expr2 = CloningVisitor().traverse(expr)
assert str(expr) == str(expr2)
def test_funcfilter(self):
expr = func.count(1).filter(t1.c.col1 > 1)
expr2 = CloningVisitor().traverse(expr)
assert str(expr) == str(expr2)
def test_adapt_union(self):
u = union(
t1.select().where(t1.c.col1 == 4),
t1.select().where(t1.c.col1 == 5)
).alias()
assert sql_util.ClauseAdapter(u).traverse(t1) is u
def test_binds(self):
"""test that unique bindparams change their name upon clone()
to prevent conflicts"""
s = select([t1], t1.c.col1 == bindparam(None, unique=True)).alias()
s2 = CloningVisitor().traverse(s).alias()
s3 = select([s], s.c.col2 == s2.c.col2)
self.assert_compile(
s3, "SELECT anon_1.col1, anon_1.col2, anon_1.col3 FROM "
"(SELECT table1.col1 AS col1, table1.col2 AS col2, "
"table1.col3 AS col3 FROM table1 WHERE table1.col1 = :param_1) "
"AS anon_1, "
"(SELECT table1.col1 AS col1, table1.col2 AS col2, table1.col3 "
"AS col3 FROM table1 WHERE table1.col1 = :param_2) AS anon_2 "
"WHERE anon_1.col2 = anon_2.col2")
s = select([t1], t1.c.col1 == 4).alias()
s2 = CloningVisitor().traverse(s).alias()
s3 = select([s], s.c.col2 == s2.c.col2)
self.assert_compile(
s3, "SELECT anon_1.col1, anon_1.col2, anon_1.col3 FROM "
"(SELECT table1.col1 AS col1, table1.col2 AS col2, "
"table1.col3 AS col3 FROM table1 WHERE table1.col1 = :col1_1) "
"AS anon_1, "
"(SELECT table1.col1 AS col1, table1.col2 AS col2, table1.col3 "
"AS col3 FROM table1 WHERE table1.col1 = :col1_2) AS anon_2 "
"WHERE anon_1.col2 = anon_2.col2")
def test_extract(self):
s = select([extract('foo', t1.c.col1).label('col1')])
self.assert_compile(
s,
"SELECT EXTRACT(foo FROM table1.col1) AS col1 FROM table1")
s2 = CloningVisitor().traverse(s).alias()
s3 = select([s2.c.col1])
self.assert_compile(
s,
"SELECT EXTRACT(foo FROM table1.col1) AS col1 FROM table1")
self.assert_compile(s3,
"SELECT anon_1.col1 FROM (SELECT EXTRACT(foo FROM "
"table1.col1) AS col1 FROM table1) AS anon_1")
@testing.emits_warning('.*replaced by another column with the same key')
def test_alias(self):
subq = t2.select().alias('subq')
s = select([t1.c.col1, subq.c.col1],
from_obj=[t1, subq,
t1.join(subq, t1.c.col1 == subq.c.col2)]
)
orig = str(s)
s2 = CloningVisitor().traverse(s)
assert orig == str(s) == str(s2)
s4 = CloningVisitor().traverse(s2)
assert orig == str(s) == str(s2) == str(s4)
s3 = sql_util.ClauseAdapter(table('foo')).traverse(s)
assert orig == str(s) == str(s3)
s4 = sql_util.ClauseAdapter(table('foo')).traverse(s3)
assert orig == str(s) == str(s3) == str(s4)
subq = subq.alias('subq')
s = select([t1.c.col1, subq.c.col1],
from_obj=[t1, subq,
t1.join(subq, t1.c.col1 == subq.c.col2)]
)
s5 = CloningVisitor().traverse(s)
assert orig == str(s) == str(s5)
def test_correlated_select(self):
s = select([literal_column('*')], t1.c.col1 == t2.c.col1,
from_obj=[t1, t2]).correlate(t2)
class Vis(CloningVisitor):
def visit_select(self, select):
select.append_whereclause(t1.c.col2 == 7)
self.assert_compile(
select([t2]).where(t2.c.col1 == Vis().traverse(s)),
"SELECT table2.col1, table2.col2, table2.col3 "
"FROM table2 WHERE table2.col1 = "
"(SELECT * FROM table1 WHERE table1.col1 = table2.col1 "
"AND table1.col2 = :col2_1)"
)
def test_this_thing(self):
s = select([t1]).where(t1.c.col1 == 'foo').alias()
s2 = select([s.c.col1])
self.assert_compile(s2,
'SELECT anon_1.col1 FROM (SELECT '
'table1.col1 AS col1, table1.col2 AS col2, '
'table1.col3 AS col3 FROM table1 WHERE '
'table1.col1 = :col1_1) AS anon_1')
t1a = t1.alias()
s2 = sql_util.ClauseAdapter(t1a).traverse(s2)
self.assert_compile(s2,
'SELECT anon_1.col1 FROM (SELECT '
'table1_1.col1 AS col1, table1_1.col2 AS '
'col2, table1_1.col3 AS col3 FROM table1 '
'AS table1_1 WHERE table1_1.col1 = '
':col1_1) AS anon_1')
def test_select_fromtwice_one(self):
t1a = t1.alias()
s = select([1], t1.c.col1 == t1a.c.col1, from_obj=t1a).correlate(t1a)
s = select([t1]).where(t1.c.col1 == s)
self.assert_compile(
s, "SELECT table1.col1, table1.col2, table1.col3 FROM table1 "
"WHERE table1.col1 = "
"(SELECT 1 FROM table1, table1 AS table1_1 "
"WHERE table1.col1 = table1_1.col1)")
s = CloningVisitor().traverse(s)
self.assert_compile(
s, "SELECT table1.col1, table1.col2, table1.col3 FROM table1 "
"WHERE table1.col1 = "
"(SELECT 1 FROM table1, table1 AS table1_1 "
"WHERE table1.col1 = table1_1.col1)")
def test_select_fromtwice_two(self):
s = select([t1]).where(t1.c.col1 == 'foo').alias()
s2 = select([1], t1.c.col1 == s.c.col1, from_obj=s).correlate(t1)
s3 = select([t1]).where(t1.c.col1 == s2)
self.assert_compile(
s3, "SELECT table1.col1, table1.col2, table1.col3 "
"FROM table1 WHERE table1.col1 = "
"(SELECT 1 FROM "
"(SELECT table1.col1 AS col1, table1.col2 AS col2, "
"table1.col3 AS col3 FROM table1 "
"WHERE table1.col1 = :col1_1) "
"AS anon_1 WHERE table1.col1 = anon_1.col1)")
s4 = ReplacingCloningVisitor().traverse(s3)
self.assert_compile(
s4, "SELECT table1.col1, table1.col2, table1.col3 "
"FROM table1 WHERE table1.col1 = "
"(SELECT 1 FROM "
"(SELECT table1.col1 AS col1, table1.col2 AS col2, "
"table1.col3 AS col3 FROM table1 "
"WHERE table1.col1 = :col1_1) "
"AS anon_1 WHERE table1.col1 = anon_1.col1)")
class ColumnAdapterTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = 'default'
@classmethod
def setup_class(cls):
global t1, t2
t1 = table("table1",
column("col1"),
column("col2"),
column("col3"),
column("col4")
)
t2 = table("table2",
column("col1"),
column("col2"),
column("col3"),
)
def test_traverse_memoizes_w_columns(self):
t1a = t1.alias()
adapter = sql_util.ColumnAdapter(t1a, anonymize_labels=True)
expr = select([t1a.c.col1]).label('x')
expr_adapted = adapter.traverse(expr)
is_not_(expr, expr_adapted)
is_(
adapter.columns[expr],
expr_adapted
)
def test_traverse_memoizes_w_itself(self):
t1a = t1.alias()
adapter = sql_util.ColumnAdapter(t1a, anonymize_labels=True)
expr = select([t1a.c.col1]).label('x')
expr_adapted = adapter.traverse(expr)
is_not_(expr, expr_adapted)
is_(
adapter.traverse(expr),
expr_adapted
)
def test_columns_memoizes_w_itself(self):
t1a = t1.alias()
adapter = sql_util.ColumnAdapter(t1a, anonymize_labels=True)
expr = select([t1a.c.col1]).label('x')
expr_adapted = adapter.columns[expr]
is_not_(expr, expr_adapted)
is_(
adapter.columns[expr],
expr_adapted
)
def test_wrapping_fallthrough(self):
t1a = t1.alias(name="t1a")
t2a = t2.alias(name="t2a")
a1 = sql_util.ColumnAdapter(t1a)
s1 = select([t1a.c.col1, t2a.c.col1]).apply_labels().alias()
a2 = sql_util.ColumnAdapter(s1)
a3 = a2.wrap(a1)
a4 = a1.wrap(a2)
a5 = a1.chain(a2)
# t1.c.col1 -> s1.c.t1a_col1
# adapted by a2
is_(
a3.columns[t1.c.col1], s1.c.t1a_col1
)
is_(
a4.columns[t1.c.col1], s1.c.t1a_col1
)
# chaining can't fall through because a1 grabs it
# first
is_(
a5.columns[t1.c.col1], t1a.c.col1
)
# t2.c.col1 -> s1.c.t2a_col1
# adapted by a2
is_(
a3.columns[t2.c.col1], s1.c.t2a_col1
)
is_(
a4.columns[t2.c.col1], s1.c.t2a_col1
)
# chaining, t2 hits s1
is_(
a5.columns[t2.c.col1], s1.c.t2a_col1
)
# t1.c.col2 -> t1a.c.col2
# fallthrough to a1
is_(
a3.columns[t1.c.col2], t1a.c.col2
)
is_(
a4.columns[t1.c.col2], t1a.c.col2
)
# chaining hits a1
is_(
a5.columns[t1.c.col2], t1a.c.col2
)
# t2.c.col2 -> t2.c.col2
# fallthrough to no adaption
is_(
a3.columns[t2.c.col2], t2.c.col2
)
is_(
a4.columns[t2.c.col2], t2.c.col2
)
def test_wrapping_ordering(self):
"""illustrate an example where order of wrappers matters.
This test illustrates both the ordering being significant
as well as a scenario where multiple translations are needed
(e.g. wrapping vs. chaining).
"""
stmt = select([t1.c.col1, t2.c.col1]).apply_labels()
sa = stmt.alias()
stmt2 = select([t2, sa])
a1 = sql_util.ColumnAdapter(stmt)
a2 = sql_util.ColumnAdapter(stmt2)
a2_to_a1 = a2.wrap(a1)
a1_to_a2 = a1.wrap(a2)
# when stmt2 and stmt represent the same column
# in different contexts, order of wrapping matters
# t2.c.col1 via a2 is stmt2.c.col1; then ignored by a1
is_(
a2_to_a1.columns[t2.c.col1], stmt2.c.col1
)
# t2.c.col1 via a1 is stmt.c.table2_col1; a2 then
# sends this to stmt2.c.table2_col1
is_(
a1_to_a2.columns[t2.c.col1], stmt2.c.table2_col1
)
# for mutually exclusive columns, order doesn't matter
is_(
a2_to_a1.columns[t1.c.col1], stmt2.c.table1_col1
)
is_(
a1_to_a2.columns[t1.c.col1], stmt2.c.table1_col1
)
is_(
a2_to_a1.columns[t2.c.col2], stmt2.c.col2
)
def test_wrapping_multiple(self):
"""illustrate that wrapping runs both adapters"""
t1a = t1.alias(name="t1a")
t2a = t2.alias(name="t2a")
a1 = sql_util.ColumnAdapter(t1a)
a2 = sql_util.ColumnAdapter(t2a)
a3 = a2.wrap(a1)
stmt = select([t1.c.col1, t2.c.col2])
self.assert_compile(
a3.traverse(stmt),
"SELECT t1a.col1, t2a.col2 FROM table1 AS t1a, table2 AS t2a"
)
# chaining does too because these adapters don't share any
# columns
a4 = a2.chain(a1)
self.assert_compile(
a4.traverse(stmt),
"SELECT t1a.col1, t2a.col2 FROM table1 AS t1a, table2 AS t2a"
)
def test_wrapping_inclusions(self):
"""test wrapping and inclusion rules together,
taking into account multiple objects with equivalent hash identity."""
t1a = t1.alias(name="t1a")
t2a = t2.alias(name="t2a")
a1 = sql_util.ColumnAdapter(
t1a,
include_fn=lambda col: "a1" in col._annotations)
s1 = select([t1a, t2a]).apply_labels().alias()
a2 = sql_util.ColumnAdapter(
s1,
include_fn=lambda col: "a2" in col._annotations)
a3 = a2.wrap(a1)
c1a1 = t1.c.col1._annotate(dict(a1=True))
c1a2 = t1.c.col1._annotate(dict(a2=True))
c1aa = t1.c.col1._annotate(dict(a1=True, a2=True))
c2a1 = t2.c.col1._annotate(dict(a1=True))
c2a2 = t2.c.col1._annotate(dict(a2=True))
c2aa = t2.c.col1._annotate(dict(a1=True, a2=True))
is_(
a3.columns[c1a1], t1a.c.col1
)
is_(
a3.columns[c1a2], s1.c.t1a_col1
)
is_(
a3.columns[c1aa], s1.c.t1a_col1
)
# not covered by a1, accepted by a2
is_(
a3.columns[c2aa], s1.c.t2a_col1
)
# not covered by a1, accepted by a2
is_(
a3.columns[c2a2], s1.c.t2a_col1
)
# not covered by a1, rejected by a2
is_(
a3.columns[c2a1], c2a1
)
class ClauseAdapterTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = 'default'
@classmethod
def setup_class(cls):
global t1, t2
t1 = table("table1",
column("col1"),
column("col2"),
column("col3"),
)
t2 = table("table2",
column("col1"),
column("col2"),
column("col3"),
)
def test_correlation_on_clone(self):
t1alias = t1.alias('t1alias')
t2alias = t2.alias('t2alias')
vis = sql_util.ClauseAdapter(t1alias)
s = select([literal_column('*')], from_obj=[t1alias, t2alias]).as_scalar()
assert t2alias in s._froms
assert t1alias in s._froms
self.assert_compile(select([literal_column('*')], t2alias.c.col1 == s),
'SELECT * FROM table2 AS t2alias WHERE '
't2alias.col1 = (SELECT * FROM table1 AS '
't1alias)')
s = vis.traverse(s)
assert t2alias not in s._froms # not present because it's been
# cloned
assert t1alias in s._froms # present because the adapter placed
# it there
# correlate list on "s" needs to take into account the full
# _cloned_set for each element in _froms when correlating
self.assert_compile(select([literal_column('*')], t2alias.c.col1 == s),
'SELECT * FROM table2 AS t2alias WHERE '
't2alias.col1 = (SELECT * FROM table1 AS '
't1alias)')
s = select([literal_column('*')], from_obj=[t1alias,
t2alias]).correlate(t2alias).as_scalar()
self.assert_compile(select([literal_column('*')], t2alias.c.col1 == s),
'SELECT * FROM table2 AS t2alias WHERE '
't2alias.col1 = (SELECT * FROM table1 AS '
't1alias)')
s = vis.traverse(s)
self.assert_compile(select([literal_column('*')], t2alias.c.col1 == s),
'SELECT * FROM table2 AS t2alias WHERE '
't2alias.col1 = (SELECT * FROM table1 AS '
't1alias)')
s = CloningVisitor().traverse(s)
self.assert_compile(select([literal_column('*')], t2alias.c.col1 == s),
'SELECT * FROM table2 AS t2alias WHERE '
't2alias.col1 = (SELECT * FROM table1 AS '
't1alias)')
s = select([literal_column('*')]).where(t1.c.col1 == t2.c.col1).as_scalar()
self.assert_compile(select([t1.c.col1, s]),
'SELECT table1.col1, (SELECT * FROM table2 '
'WHERE table1.col1 = table2.col1) AS '
'anon_1 FROM table1')
vis = sql_util.ClauseAdapter(t1alias)
s = vis.traverse(s)
self.assert_compile(select([t1alias.c.col1, s]),
'SELECT t1alias.col1, (SELECT * FROM '
'table2 WHERE t1alias.col1 = table2.col1) '
'AS anon_1 FROM table1 AS t1alias')
s = CloningVisitor().traverse(s)
self.assert_compile(select([t1alias.c.col1, s]),
'SELECT t1alias.col1, (SELECT * FROM '
'table2 WHERE t1alias.col1 = table2.col1) '
'AS anon_1 FROM table1 AS t1alias')
s = select([literal_column('*')]).where(t1.c.col1
== t2.c.col1).correlate(t1).as_scalar()
self.assert_compile(select([t1.c.col1, s]),
'SELECT table1.col1, (SELECT * FROM table2 '
'WHERE table1.col1 = table2.col1) AS '
'anon_1 FROM table1')
vis = sql_util.ClauseAdapter(t1alias)
s = vis.traverse(s)
self.assert_compile(select([t1alias.c.col1, s]),
'SELECT t1alias.col1, (SELECT * FROM '
'table2 WHERE t1alias.col1 = table2.col1) '
'AS anon_1 FROM table1 AS t1alias')
s = CloningVisitor().traverse(s)
self.assert_compile(select([t1alias.c.col1, s]),
'SELECT t1alias.col1, (SELECT * FROM '
'table2 WHERE t1alias.col1 = table2.col1) '
'AS anon_1 FROM table1 AS t1alias')
@testing.fails_on_everything_except()
def test_joins_dont_adapt(self):
# adapting to a join, i.e. ClauseAdapter(t1.join(t2)), doesn't
# make much sense. ClauseAdapter doesn't make any changes if
# it's against a straight join.
users = table('users', column('id'))
addresses = table('addresses', column('id'), column('user_id'))
ualias = users.alias()
s = select([func.count(addresses.c.id)], users.c.id
== addresses.c.user_id).correlate(users)
s = sql_util.ClauseAdapter(ualias).traverse(s)
j1 = addresses.join(ualias, addresses.c.user_id == ualias.c.id)
self.assert_compile(sql_util.ClauseAdapter(j1).traverse(s),
'SELECT count(addresses.id) AS count_1 '
'FROM addresses WHERE users_1.id = '
'addresses.user_id')
def test_table_to_alias_1(self):
t1alias = t1.alias('t1alias')
vis = sql_util.ClauseAdapter(t1alias)
ff = vis.traverse(func.count(t1.c.col1).label('foo'))
assert list(_from_objects(ff)) == [t1alias]
def test_table_to_alias_2(self):
t1alias = t1.alias('t1alias')
vis = sql_util.ClauseAdapter(t1alias)
self.assert_compile(vis.traverse(select([literal_column('*')], from_obj=[t1])),
'SELECT * FROM table1 AS t1alias')
def test_table_to_alias_3(self):
t1alias = t1.alias('t1alias')
vis = sql_util.ClauseAdapter(t1alias)
self.assert_compile(select([literal_column('*')], t1.c.col1 == t2.c.col2),
'SELECT * FROM table1, table2 WHERE '
'table1.col1 = table2.col2')
def test_table_to_alias_4(self):
t1alias = t1.alias('t1alias')
vis = sql_util.ClauseAdapter(t1alias)
self.assert_compile(vis.traverse(select([literal_column('*')], t1.c.col1
== t2.c.col2)),
'SELECT * FROM table1 AS t1alias, table2 '
'WHERE t1alias.col1 = table2.col2')
def test_table_to_alias_5(self):
t1alias = t1.alias('t1alias')
vis = sql_util.ClauseAdapter(t1alias)
self.assert_compile(
vis.traverse(
select(
[literal_column('*')],
t1.c.col1 == t2.c.col2,
from_obj=[
t1,
t2])),
'SELECT * FROM table1 AS t1alias, table2 '
'WHERE t1alias.col1 = table2.col2')
def test_table_to_alias_6(self):
t1alias = t1.alias('t1alias')
vis = sql_util.ClauseAdapter(t1alias)
self.assert_compile(
select([t1alias, t2]).where(
t1alias.c.col1 == vis.traverse(
select([literal_column('*')], t1.c.col1 == t2.c.col2, from_obj=[t1, t2]).
correlate(t1)
)
),
"SELECT t1alias.col1, t1alias.col2, t1alias.col3, "
"table2.col1, table2.col2, table2.col3 "
"FROM table1 AS t1alias, table2 WHERE t1alias.col1 = "
"(SELECT * FROM table2 WHERE t1alias.col1 = table2.col2)"
)
def test_table_to_alias_7(self):
t1alias = t1.alias('t1alias')
vis = sql_util.ClauseAdapter(t1alias)
self.assert_compile(
select([t1alias, t2]).
where(t1alias.c.col1 == vis.traverse(
select([literal_column('*')], t1.c.col1 == t2.c.col2, from_obj=[t1, t2]).
correlate(t2))),
"SELECT t1alias.col1, t1alias.col2, t1alias.col3, "
"table2.col1, table2.col2, table2.col3 "
"FROM table1 AS t1alias, table2 "
"WHERE t1alias.col1 = "
"(SELECT * FROM table1 AS t1alias "
"WHERE t1alias.col1 = table2.col2)")
def test_table_to_alias_8(self):
t1alias = t1.alias('t1alias')
vis = sql_util.ClauseAdapter(t1alias)
self.assert_compile(
vis.traverse(case([(t1.c.col1 == 5, t1.c.col2)], else_=t1.c.col1)),
'CASE WHEN (t1alias.col1 = :col1_1) THEN '
't1alias.col2 ELSE t1alias.col1 END')
def test_table_to_alias_9(self):
t1alias = t1.alias('t1alias')
vis = sql_util.ClauseAdapter(t1alias)
self.assert_compile(
vis.traverse(
case(
[
(5,
t1.c.col2)],
value=t1.c.col1,
else_=t1.c.col1)),
'CASE t1alias.col1 WHEN :param_1 THEN '
't1alias.col2 ELSE t1alias.col1 END')
def test_table_to_alias_10(self):
s = select([literal_column('*')], from_obj=[t1]).alias('foo')
self.assert_compile(s.select(),
'SELECT foo.* FROM (SELECT * FROM table1) '
'AS foo')
def test_table_to_alias_11(self):
s = select([literal_column('*')], from_obj=[t1]).alias('foo')
t1alias = t1.alias('t1alias')
vis = sql_util.ClauseAdapter(t1alias)
self.assert_compile(vis.traverse(s.select()),
'SELECT foo.* FROM (SELECT * FROM table1 '
'AS t1alias) AS foo')
def test_table_to_alias_12(self):
s = select([literal_column('*')], from_obj=[t1]).alias('foo')
self.assert_compile(s.select(),
'SELECT foo.* FROM (SELECT * FROM table1) '
'AS foo')
def test_table_to_alias_13(self):
t1alias = t1.alias('t1alias')
vis = sql_util.ClauseAdapter(t1alias)
ff = vis.traverse(func.count(t1.c.col1).label('foo'))
self.assert_compile(select([ff]),
'SELECT count(t1alias.col1) AS foo FROM '
'table1 AS t1alias')
assert list(_from_objects(ff)) == [t1alias]
# def test_table_to_alias_2(self):
# TODO: self.assert_compile(vis.traverse(select([func.count(t1.c
# .col1).l abel('foo')]), clone=True), "SELECT
# count(t1alias.col1) AS foo FROM table1 AS t1alias")
def test_table_to_alias_14(self):
t1alias = t1.alias('t1alias')
vis = sql_util.ClauseAdapter(t1alias)
t2alias = t2.alias('t2alias')
vis.chain(sql_util.ClauseAdapter(t2alias))
self.assert_compile(vis.traverse(select([literal_column('*')], t1.c.col1
== t2.c.col2)),
'SELECT * FROM table1 AS t1alias, table2 '
'AS t2alias WHERE t1alias.col1 = '
't2alias.col2')
def test_table_to_alias_15(self):
t1alias = t1.alias('t1alias')
vis = sql_util.ClauseAdapter(t1alias)
t2alias = t2.alias('t2alias')
vis.chain(sql_util.ClauseAdapter(t2alias))
self.assert_compile(
vis.traverse(
select(
['*'],
t1.c.col1 == t2.c.col2,
from_obj=[
t1,
t2])),
'SELECT * FROM table1 AS t1alias, table2 '
'AS t2alias WHERE t1alias.col1 = '
't2alias.col2')
def test_table_to_alias_16(self):
t1alias = t1.alias('t1alias')
vis = sql_util.ClauseAdapter(t1alias)
t2alias = t2.alias('t2alias')
vis.chain(sql_util.ClauseAdapter(t2alias))
self.assert_compile(
select([t1alias, t2alias]).where(
t1alias.c.col1 ==
vis.traverse(select(['*'],
t1.c.col1 == t2.c.col2,
from_obj=[t1, t2]).correlate(t1))
),
"SELECT t1alias.col1, t1alias.col2, t1alias.col3, "
"t2alias.col1, t2alias.col2, t2alias.col3 "
"FROM table1 AS t1alias, table2 AS t2alias "
"WHERE t1alias.col1 = "
"(SELECT * FROM table2 AS t2alias "
"WHERE t1alias.col1 = t2alias.col2)"
)
def test_table_to_alias_17(self):
t1alias = t1.alias('t1alias')
vis = sql_util.ClauseAdapter(t1alias)
t2alias = t2.alias('t2alias')
vis.chain(sql_util.ClauseAdapter(t2alias))
self.assert_compile(
t2alias.select().where(
t2alias.c.col2 == vis.traverse(
select(
['*'],
t1.c.col1 == t2.c.col2,
from_obj=[
t1,
t2]).correlate(t2))),
'SELECT t2alias.col1, t2alias.col2, t2alias.col3 '
'FROM table2 AS t2alias WHERE t2alias.col2 = '
'(SELECT * FROM table1 AS t1alias WHERE '
't1alias.col1 = t2alias.col2)')
def test_include_exclude(self):
m = MetaData()
a = Table('a', m,
Column('id', Integer, primary_key=True),
Column('xxx_id', Integer,
ForeignKey('a.id', name='adf', use_alter=True)
)
)
e = (a.c.id == a.c.xxx_id)
assert str(e) == "a.id = a.xxx_id"
b = a.alias()
e = sql_util.ClauseAdapter(b, include_fn=lambda x: x in set([a.c.id]),
equivalents={a.c.id: set([a.c.id])}
).traverse(e)
assert str(e) == "a_1.id = a.xxx_id"
def test_recursive_equivalents(self):
m = MetaData()
a = Table('a', m, Column('x', Integer), Column('y', Integer))
b = Table('b', m, Column('x', Integer), Column('y', Integer))
c = Table('c', m, Column('x', Integer), Column('y', Integer))
# force a recursion overflow, by linking a.c.x<->c.c.x, and
# asking for a nonexistent col. corresponding_column should prevent
# endless depth.
adapt = sql_util.ClauseAdapter(
b, equivalents={a.c.x: set([c.c.x]), c.c.x: set([a.c.x])})
assert adapt._corresponding_column(a.c.x, False) is None
def test_multilevel_equivalents(self):
m = MetaData()
a = Table('a', m, Column('x', Integer), Column('y', Integer))
b = Table('b', m, Column('x', Integer), Column('y', Integer))
c = Table('c', m, Column('x', Integer), Column('y', Integer))
alias = select([a]).select_from(a.join(b, a.c.x == b.c.x)).alias()
# two levels of indirection from c.x->b.x->a.x, requires recursive
# corresponding_column call
adapt = sql_util.ClauseAdapter(
alias, equivalents={b.c.x: set([a.c.x]), c.c.x: set([b.c.x])})
assert adapt._corresponding_column(a.c.x, False) is alias.c.x
assert adapt._corresponding_column(c.c.x, False) is alias.c.x
def test_join_to_alias(self):
metadata = MetaData()
a = Table('a', metadata,
Column('id', Integer, primary_key=True))
b = Table('b', metadata,
Column('id', Integer, primary_key=True),
Column('aid', Integer, ForeignKey('a.id')),
)
c = Table('c', metadata,
Column('id', Integer, primary_key=True),
Column('bid', Integer, ForeignKey('b.id')),
)
d = Table('d', metadata,
Column('id', Integer, primary_key=True),
Column('aid', Integer, ForeignKey('a.id')),
)
j1 = a.outerjoin(b)
j2 = select([j1], use_labels=True)
j3 = c.join(j2, j2.c.b_id == c.c.bid)
j4 = j3.outerjoin(d)
self.assert_compile(j4,
'c JOIN (SELECT a.id AS a_id, b.id AS '
'b_id, b.aid AS b_aid FROM a LEFT OUTER '
'JOIN b ON a.id = b.aid) ON b_id = c.bid '
'LEFT OUTER JOIN d ON a_id = d.aid')
j5 = j3.alias('foo')
j6 = sql_util.ClauseAdapter(j5).copy_and_process([j4])[0]
# this statement takes c join(a join b), wraps it inside an
# aliased "select * from c join(a join b) AS foo". the outermost
# right side "left outer join d" stays the same, except "d"
# joins against foo.a_id instead of plain "a_id"
self.assert_compile(j6,
'(SELECT c.id AS c_id, c.bid AS c_bid, '
'a_id AS a_id, b_id AS b_id, b_aid AS '
'b_aid FROM c JOIN (SELECT a.id AS a_id, '
'b.id AS b_id, b.aid AS b_aid FROM a LEFT '
'OUTER JOIN b ON a.id = b.aid) ON b_id = '
'c.bid) AS foo LEFT OUTER JOIN d ON '
'foo.a_id = d.aid')
def test_derived_from(self):
assert select([t1]).is_derived_from(t1)
assert not select([t2]).is_derived_from(t1)
assert not t1.is_derived_from(select([t1]))
assert t1.alias().is_derived_from(t1)
s1 = select([t1, t2]).alias('foo')
s2 = select([s1]).limit(5).offset(10).alias()
assert s2.is_derived_from(s1)
s2 = s2._clone()
assert s2.is_derived_from(s1)
def test_aliasedselect_to_aliasedselect_straight(self):
# original issue from ticket #904
s1 = select([t1]).alias('foo')
s2 = select([s1]).limit(5).offset(10).alias()
self.assert_compile(sql_util.ClauseAdapter(s2).traverse(s1),
'SELECT foo.col1, foo.col2, foo.col3 FROM '
'(SELECT table1.col1 AS col1, table1.col2 '
'AS col2, table1.col3 AS col3 FROM table1) '
'AS foo LIMIT :param_1 OFFSET :param_2',
{'param_1': 5, 'param_2': 10})
def test_aliasedselect_to_aliasedselect_join(self):
s1 = select([t1]).alias('foo')
s2 = select([s1]).limit(5).offset(10).alias()
j = s1.outerjoin(t2, s1.c.col1 == t2.c.col1)
self.assert_compile(sql_util.ClauseAdapter(s2).traverse(j).select(),
'SELECT anon_1.col1, anon_1.col2, '
'anon_1.col3, table2.col1, table2.col2, '
'table2.col3 FROM (SELECT foo.col1 AS '
'col1, foo.col2 AS col2, foo.col3 AS col3 '
'FROM (SELECT table1.col1 AS col1, '
'table1.col2 AS col2, table1.col3 AS col3 '
'FROM table1) AS foo LIMIT :param_1 OFFSET '
':param_2) AS anon_1 LEFT OUTER JOIN '
'table2 ON anon_1.col1 = table2.col1',
{'param_1': 5, 'param_2': 10})
def test_aliasedselect_to_aliasedselect_join_nested_table(self):
s1 = select([t1]).alias('foo')
s2 = select([s1]).limit(5).offset(10).alias()
talias = t1.alias('bar')
assert not s2.is_derived_from(talias)
j = s1.outerjoin(talias, s1.c.col1 == talias.c.col1)
self.assert_compile(sql_util.ClauseAdapter(s2).traverse(j).select(),
'SELECT anon_1.col1, anon_1.col2, '
'anon_1.col3, bar.col1, bar.col2, bar.col3 '
'FROM (SELECT foo.col1 AS col1, foo.col2 '
'AS col2, foo.col3 AS col3 FROM (SELECT '
'table1.col1 AS col1, table1.col2 AS col2, '
'table1.col3 AS col3 FROM table1) AS foo '
'LIMIT :param_1 OFFSET :param_2) AS anon_1 '
'LEFT OUTER JOIN table1 AS bar ON '
'anon_1.col1 = bar.col1', {'param_1': 5,
'param_2': 10})
def test_functions(self):
self.assert_compile(
sql_util.ClauseAdapter(t1.alias()).
traverse(func.count(t1.c.col1)),
'count(table1_1.col1)')
s = select([func.count(t1.c.col1)])
self.assert_compile(sql_util.ClauseAdapter(t1.alias()).traverse(s),
'SELECT count(table1_1.col1) AS count_1 '
'FROM table1 AS table1_1')
def test_recursive(self):
metadata = MetaData()
a = Table('a', metadata,
Column('id', Integer, primary_key=True))
b = Table('b', metadata,
Column('id', Integer, primary_key=True),
Column('aid', Integer, ForeignKey('a.id')),
)
c = Table('c', metadata,
Column('id', Integer, primary_key=True),
Column('bid', Integer, ForeignKey('b.id')),
)
d = Table('d', metadata,
Column('id', Integer, primary_key=True),
Column('aid', Integer, ForeignKey('a.id')),
)
u = union(
a.join(b).select().apply_labels(),
a.join(d).select().apply_labels()
).alias()
self.assert_compile(
sql_util.ClauseAdapter(u).
traverse(select([c.c.bid]).where(c.c.bid == u.c.b_aid)),
"SELECT c.bid "
"FROM c, (SELECT a.id AS a_id, b.id AS b_id, b.aid AS b_aid "
"FROM a JOIN b ON a.id = b.aid UNION SELECT a.id AS a_id, d.id "
"AS d_id, d.aid AS d_aid "
"FROM a JOIN d ON a.id = d.aid) AS anon_1 "
"WHERE c.bid = anon_1.b_aid"
)
t1 = table("table1",
column("col1"),
column("col2"),
column("col3"),
)
t2 = table("table2",
column("col1"),
column("col2"),
column("col3"),
)
def test_label_anonymize_one(self):
t1a = t1.alias()
adapter = sql_util.ClauseAdapter(t1a, anonymize_labels=True)
expr = select([t1.c.col2]).where(t1.c.col3 == 5).label('expr')
expr_adapted = adapter.traverse(expr)
stmt = select([expr, expr_adapted]).order_by(expr, expr_adapted)
self.assert_compile(
stmt,
"SELECT "
"(SELECT table1.col2 FROM table1 WHERE table1.col3 = :col3_1) "
"AS expr, "
"(SELECT table1_1.col2 FROM table1 AS table1_1 "
"WHERE table1_1.col3 = :col3_2) AS anon_1 "
"ORDER BY expr, anon_1"
)
def test_label_anonymize_two(self):
t1a = t1.alias()
adapter = sql_util.ClauseAdapter(t1a, anonymize_labels=True)
expr = select([t1.c.col2]).where(t1.c.col3 == 5).label(None)
expr_adapted = adapter.traverse(expr)
stmt = select([expr, expr_adapted]).order_by(expr, expr_adapted)
self.assert_compile(
stmt,
"SELECT "
"(SELECT table1.col2 FROM table1 WHERE table1.col3 = :col3_1) "
"AS anon_1, "
"(SELECT table1_1.col2 FROM table1 AS table1_1 "
"WHERE table1_1.col3 = :col3_2) AS anon_2 "
"ORDER BY anon_1, anon_2"
)
def test_label_anonymize_three(self):
t1a = t1.alias()
adapter = sql_util.ColumnAdapter(
t1a, anonymize_labels=True,
allow_label_resolve=False)
expr = select([t1.c.col2]).where(t1.c.col3 == 5).label(None)
l1 = expr
is_(l1._order_by_label_element, l1)
eq_(l1._allow_label_resolve, True)
expr_adapted = adapter.traverse(expr)
l2 = expr_adapted
is_(l2._order_by_label_element, l2)
eq_(l2._allow_label_resolve, False)
l3 = adapter.traverse(expr)
is_(l3._order_by_label_element, l3)
eq_(l3._allow_label_resolve, False)
class SpliceJoinsTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = 'default'
@classmethod
def setup_class(cls):
global table1, table2, table3, table4
def _table(name):
return table(name, column('col1'), column('col2'),
column('col3'))
table1, table2, table3, table4 = [
_table(name) for name in (
'table1', 'table2', 'table3', 'table4')]
def test_splice(self):
t1, t2, t3, t4 = table1, table2, table1.alias(), table2.alias()
j = t1.join(
t2,
t1.c.col1 == t2.c.col1).join(
t3,
t2.c.col1 == t3.c.col1).join(
t4,
t4.c.col1 == t1.c.col1)
s = select([t1]).where(t1.c.col2 < 5).alias()
self.assert_compile(sql_util.splice_joins(s, j),
'(SELECT table1.col1 AS col1, table1.col2 '
'AS col2, table1.col3 AS col3 FROM table1 '
'WHERE table1.col2 < :col2_1) AS anon_1 '
'JOIN table2 ON anon_1.col1 = table2.col1 '
'JOIN table1 AS table1_1 ON table2.col1 = '
'table1_1.col1 JOIN table2 AS table2_1 ON '
'table2_1.col1 = anon_1.col1')
def test_stop_on(self):
t1, t2, t3 = table1, table2, table3
j1 = t1.join(t2, t1.c.col1 == t2.c.col1)
j2 = j1.join(t3, t2.c.col1 == t3.c.col1)
s = select([t1]).select_from(j1).alias()
self.assert_compile(sql_util.splice_joins(s, j2),
'(SELECT table1.col1 AS col1, table1.col2 '
'AS col2, table1.col3 AS col3 FROM table1 '
'JOIN table2 ON table1.col1 = table2.col1) '
'AS anon_1 JOIN table2 ON anon_1.col1 = '
'table2.col1 JOIN table3 ON table2.col1 = '
'table3.col1')
self.assert_compile(sql_util.splice_joins(s, j2, j1),
'(SELECT table1.col1 AS col1, table1.col2 '
'AS col2, table1.col3 AS col3 FROM table1 '
'JOIN table2 ON table1.col1 = table2.col1) '
'AS anon_1 JOIN table3 ON table2.col1 = '
'table3.col1')
def test_splice_2(self):
t2a = table2.alias()
t3a = table3.alias()
j1 = table1.join(
t2a,
table1.c.col1 == t2a.c.col1).join(
t3a,
t2a.c.col2 == t3a.c.col2)
t2b = table4.alias()
j2 = table1.join(t2b, table1.c.col3 == t2b.c.col3)
self.assert_compile(sql_util.splice_joins(table1, j1),
'table1 JOIN table2 AS table2_1 ON '
'table1.col1 = table2_1.col1 JOIN table3 '
'AS table3_1 ON table2_1.col2 = '
'table3_1.col2')
self.assert_compile(sql_util.splice_joins(table1, j2),
'table1 JOIN table4 AS table4_1 ON '
'table1.col3 = table4_1.col3')
self.assert_compile(
sql_util.splice_joins(
sql_util.splice_joins(
table1,
j1),
j2),
'table1 JOIN table2 AS table2_1 ON '
'table1.col1 = table2_1.col1 JOIN table3 '
'AS table3_1 ON table2_1.col2 = '
'table3_1.col2 JOIN table4 AS table4_1 ON '
'table1.col3 = table4_1.col3')
class SelectTest(fixtures.TestBase, AssertsCompiledSQL):
"""tests the generative capability of Select"""
__dialect__ = 'default'
@classmethod
def setup_class(cls):
global t1, t2
t1 = table("table1",
column("col1"),
column("col2"),
column("col3"),
)
t2 = table("table2",
column("col1"),
column("col2"),
column("col3"),
)
def test_columns(self):
s = t1.select()
self.assert_compile(s,
'SELECT table1.col1, table1.col2, '
'table1.col3 FROM table1')
select_copy = s.column(column('yyy'))
self.assert_compile(select_copy,
'SELECT table1.col1, table1.col2, '
'table1.col3, yyy FROM table1')
assert s.columns is not select_copy.columns
assert s._columns is not select_copy._columns
assert s._raw_columns is not select_copy._raw_columns
self.assert_compile(s,
'SELECT table1.col1, table1.col2, '
'table1.col3 FROM table1')
def test_froms(self):
s = t1.select()
self.assert_compile(s,
'SELECT table1.col1, table1.col2, '
'table1.col3 FROM table1')
select_copy = s.select_from(t2)
self.assert_compile(select_copy,
'SELECT table1.col1, table1.col2, '
'table1.col3 FROM table1, table2')
assert s._froms is not select_copy._froms
self.assert_compile(s,
'SELECT table1.col1, table1.col2, '
'table1.col3 FROM table1')
def test_prefixes(self):
s = t1.select()
self.assert_compile(s,
'SELECT table1.col1, table1.col2, '
'table1.col3 FROM table1')
select_copy = s.prefix_with('FOOBER')
self.assert_compile(select_copy,
'SELECT FOOBER table1.col1, table1.col2, '
'table1.col3 FROM table1')
self.assert_compile(s,
'SELECT table1.col1, table1.col2, '
'table1.col3 FROM table1')
def test_execution_options(self):
s = select().execution_options(foo='bar')
s2 = s.execution_options(bar='baz')
s3 = s.execution_options(foo='not bar')
# The original select should not be modified.
assert s._execution_options == dict(foo='bar')
# s2 should have its execution_options based on s, though.
assert s2._execution_options == dict(foo='bar', bar='baz')
assert s3._execution_options == dict(foo='not bar')
def test_invalid_options(self):
assert_raises(
exc.ArgumentError,
select().execution_options, compiled_cache={}
)
assert_raises(
exc.ArgumentError,
select().execution_options,
isolation_level='READ_COMMITTED'
)
# this feature not available yet
def _NOTYET_test_execution_options_in_kwargs(self):
s = select(execution_options=dict(foo='bar'))
s2 = s.execution_options(bar='baz')
# The original select should not be modified.
assert s._execution_options == dict(foo='bar')
# s2 should have its execution_options based on s, though.
assert s2._execution_options == dict(foo='bar', bar='baz')
# this feature not available yet
def _NOTYET_test_execution_options_in_text(self):
s = text('select 42', execution_options=dict(foo='bar'))
assert s._execution_options == dict(foo='bar')
class ValuesBaseTest(fixtures.TestBase, AssertsCompiledSQL):
"""Tests the generative capability of Insert, Update"""
__dialect__ = 'default'
# fixme: consolidate converage from elsewhere here and expand
@classmethod
def setup_class(cls):
global t1, t2
t1 = table("table1",
column("col1"),
column("col2"),
column("col3"),
)
t2 = table("table2",
column("col1"),
column("col2"),
column("col3"),
)
def test_prefixes(self):
i = t1.insert()
self.assert_compile(i,
"INSERT INTO table1 (col1, col2, col3) "
"VALUES (:col1, :col2, :col3)")
gen = i.prefix_with("foober")
self.assert_compile(gen,
"INSERT foober INTO table1 (col1, col2, col3) "
"VALUES (:col1, :col2, :col3)")
self.assert_compile(i,
"INSERT INTO table1 (col1, col2, col3) "
"VALUES (:col1, :col2, :col3)")
i2 = t1.insert(prefixes=['squiznart'])
self.assert_compile(i2,
"INSERT squiznart INTO table1 (col1, col2, col3) "
"VALUES (:col1, :col2, :col3)")
gen2 = i2.prefix_with("quux")
self.assert_compile(gen2,
"INSERT squiznart quux INTO "
"table1 (col1, col2, col3) "
"VALUES (:col1, :col2, :col3)")
def test_add_kwarg(self):
i = t1.insert()
eq_(i.parameters, None)
i = i.values(col1=5)
eq_(i.parameters, {"col1": 5})
i = i.values(col2=7)
eq_(i.parameters, {"col1": 5, "col2": 7})
def test_via_tuple_single(self):
i = t1.insert()
eq_(i.parameters, None)
i = i.values((5, 6, 7))
eq_(i.parameters, {"col1": 5, "col2": 6, "col3": 7})
def test_kw_and_dict_simulatenously_single(self):
i = t1.insert()
i = i.values({"col1": 5}, col2=7)
eq_(i.parameters, {"col1": 5, "col2": 7})
def test_via_tuple_multi(self):
i = t1.insert()
eq_(i.parameters, None)
i = i.values([(5, 6, 7), (8, 9, 10)])
eq_(i.parameters, [
{"col1": 5, "col2": 6, "col3": 7},
{"col1": 8, "col2": 9, "col3": 10},
]
)
def test_inline_values_single(self):
i = t1.insert(values={"col1": 5})
eq_(i.parameters, {"col1": 5})
is_(i._has_multi_parameters, False)
def test_inline_values_multi(self):
i = t1.insert(values=[{"col1": 5}, {"col1": 6}])
eq_(i.parameters, [{"col1": 5}, {"col1": 6}])
is_(i._has_multi_parameters, True)
def test_add_dictionary(self):
i = t1.insert()
eq_(i.parameters, None)
i = i.values({"col1": 5})
eq_(i.parameters, {"col1": 5})
is_(i._has_multi_parameters, False)
i = i.values({"col1": 6})
# note replaces
eq_(i.parameters, {"col1": 6})
is_(i._has_multi_parameters, False)
i = i.values({"col2": 7})
eq_(i.parameters, {"col1": 6, "col2": 7})
is_(i._has_multi_parameters, False)
def test_add_kwarg_disallowed_multi(self):
i = t1.insert()
i = i.values([{"col1": 5}, {"col1": 7}])
assert_raises_message(
exc.InvalidRequestError,
"This construct already has multiple parameter sets.",
i.values, col2=7
)
def test_cant_mix_single_multi_formats_dict_to_list(self):
i = t1.insert().values(col1=5)
assert_raises_message(
exc.ArgumentError,
"Can't mix single-values and multiple values "
"formats in one statement",
i.values, [{"col1": 6}]
)
def test_cant_mix_single_multi_formats_list_to_dict(self):
i = t1.insert().values([{"col1": 6}])
assert_raises_message(
exc.ArgumentError,
"Can't mix single-values and multiple values "
"formats in one statement",
i.values, {"col1": 5}
)
def test_erroneous_multi_args_dicts(self):
i = t1.insert()
assert_raises_message(
exc.ArgumentError,
"Only a single dictionary/tuple or list of "
"dictionaries/tuples is accepted positionally.",
i.values, {"col1": 5}, {"col1": 7}
)
def test_erroneous_multi_args_tuples(self):
i = t1.insert()
assert_raises_message(
exc.ArgumentError,
"Only a single dictionary/tuple or list of "
"dictionaries/tuples is accepted positionally.",
i.values, (5, 6, 7), (8, 9, 10)
)
def test_erroneous_multi_args_plus_kw(self):
i = t1.insert()
assert_raises_message(
exc.ArgumentError,
"Can't pass kwargs and multiple parameter sets simultaenously",
i.values, [{"col1": 5}], col2=7
)
def test_update_no_support_multi_values(self):
u = t1.update()
assert_raises_message(
exc.InvalidRequestError,
"This construct does not support multiple parameter sets.",
u.values, [{"col1": 5}, {"col1": 7}]
)
def test_update_no_support_multi_constructor(self):
assert_raises_message(
exc.InvalidRequestError,
"This construct does not support multiple parameter sets.",
t1.update, values=[{"col1": 5}, {"col1": 7}]
)
| malkoto1/just_cook | SQLAlchemy-1.0.4/test/sql/test_generative.py | Python | gpl-2.0 | 67,780 | [
"ADF",
"VisIt"
] | 0e4b38e142ff038258bc1296c9300d5cd857c12dd9c0c1663163d3bffe66b2ce |
"""
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# Noel Dawe <noel@dawe.me>
# Satrajit Gosh <satrajit.ghosh@gmail.com>
# Joly Arnaud <arnaud.v.joly@gmail.com>
# Fares Hedayati <fares.hedayati@gmail.com>
#
# Licence: BSD 3 clause
from __future__ import division
import numbers
from abc import ABCMeta
from abc import abstractmethod
import numpy as np
from scipy.sparse import issparse
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import RegressorMixin
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_array
from ..utils import check_random_state
from ..utils import compute_sample_weight
from ..utils.validation import NotFittedError
from ._criterion import Criterion
from ._splitter import Splitter
from ._tree import DepthFirstTreeBuilder
from ._tree import BestFirstTreeBuilder
from ._tree import Tree
from . import _tree, _splitter, _criterion
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _criterion.Gini, "entropy": _criterion.Entropy}
CRITERIA_REG = {"mse": _criterion.MSE, "friedman_mse": _criterion.FriedmanMSE}
DENSE_SPLITTERS = {"best": _splitter.BestSplitter,
"random": _splitter.RandomSplitter}
SPARSE_SPLITTERS = {"best": _splitter.BestSparseSplitter,
"random": _splitter.RandomSparseSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator,
_LearntSelectorMixin)):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
random_state,
class_weight=None,
presort=False):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.random_state = random_state
self.max_leaf_nodes = max_leaf_nodes
self.class_weight = class_weight
self.presort = presort
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.tree_ = None
self.max_features_ = None
def fit(self, X, y, sample_weight=None, check_input=True,
X_idx_sorted=None):
"""Build a decision tree from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression). In the regression case, use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
X_idx_sorted : array-like, shape = [n_samples, n_features], optional
The indexes of the sorted training input samples. If many tree
are grown on the same dataset, this allows the ordering to be
cached between trees. If None, the data will be sorted here.
Don't use this parameter unless you know what to do.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
expanded_class_weight = None
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
if self.class_weight is not None:
y_original = np.copy(y)
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
expanded_class_weight = compute_sample_weight(
self.class_weight, y_original)
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1, int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if self.min_samples_split <= 0:
raise ValueError("min_samples_split must be greater than zero.")
if self.min_samples_leaf <= 0:
raise ValueError("min_samples_leaf must be greater than zero.")
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either smaller than "
"0 or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
# Set min_samples_split sensibly
min_samples_split = max(self.min_samples_split,
2 * self.min_samples_leaf)
presort = self.presort
# Allow presort to be 'auto', which means True if the dataset is dense,
# otherwise it will be False.
if self.presort == 'auto' and issparse(X):
presort = False
elif self.presort == 'auto':
presort = True
if presort == True and issparse(X):
raise ValueError("Presorting is not supported for sparse matrices.")
# If multiple trees are built on the same dataset, we only want to
# presort once. Splitters now can accept presorted indices if desired,
# but do not handle any presorting themselves. Ensemble algorithms which
# desire presorting must do presorting themselves and pass that matrix
# into each tree.
if X_idx_sorted is None and presort:
X_idx_sorted = np.asfortranarray(np.argsort(X, axis=0),
dtype=np.int32)
if presort and X_idx_sorted.shape != X.shape:
raise ValueError("The shape of X (X.shape = {}) doesn't match "
"the shape of X_idx_sorted (X_idx_sorted"
".shape = {})".format(X.shape,
X_idx_sorted.shape))
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_)
SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
self.min_samples_leaf,
min_weight_leaf,
random_state,
self.presort)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
if max_leaf_nodes < 0:
builder = DepthFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth)
else:
builder = BestFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth,
max_leaf_nodes)
builder.build(self.tree_, X, y, sample_weight, X_idx_sorted)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def _validate_X_predict(self, X, check_input):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
if issparse(X) and (X.indices.dtype != np.intc or
X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based "
"sparse matrices")
n_features = X.shape[1]
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
return X
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
def apply(self, X, check_input=True):
"""
Returns the index of the leaf that each sample is predicted as.
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
X_leaves : array_like, shape = [n_samples,]
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
class_weight : dict, list of dicts, "balanced" or None, optional
(default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
presort : bool, optional (default=False)
Whether to presort the data to speed up the finding of best splits in
fitting. For the default settings of a decision tree on large
datasets, setting this to true may slow down the training process.
When using either a smaller dataset or a restricted depth, this may
speed up the training.
Attributes
----------
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
class_weight=None,
presort=False):
super(DecisionTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state,
presort=presort)
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A decision tree regressor.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error, which is equal to
variance reduction as feature selection criterion.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
presort : bool, optional (default=False)
Whether to presort the data to speed up the finding of best splits in
fitting. For the default settings of a decision tree on large
datasets, setting this to true may slow down the training process.
When using either a smaller dataset or a restricted depth, this may
speed up the training.
Attributes
----------
feature_importances_ : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
presort=False):
super(DecisionTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state,
presort=presort)
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(ExtraTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None):
super(ExtraTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
| mayblue9/scikit-learn | sklearn/tree/tree.py | Python | bsd-3-clause | 37,579 | [
"Brian"
] | 7d690f5a20b617a7c02bca1d390899239b3236d03938873a8ad0aa7696060b9f |
#!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
# Plotting the results of natural_reservoir.i
import os
import sys
import matplotlib.pyplot as plt
f = open("natural_reservoir_out.csv", "r")
header = f.readline().strip().split(",")
data = [list(map(float, line.strip().split(","))) for line in f.readlines()[1:]]
f.close()
index = {}
for i in range(len(header)):
index[header[i]] = i
years = [d[index["time"]] / 3600.0 / 24.0 / 365.0 for d in data]
pH = [d[index["pH"]] for d in data]
kg_solvent_H2O = [d[index["kg_solvent_H2O"]] for d in data]
all_minerals = ["Albite", "Anhydrite", "Anorthite", "Calcite", "Chalcedony", "Clinochl-7A", "Illite", "K-feldspar", "Kaolinite", "Quartz", "Paragonite", "Phlogopite", "Zoisite", "Laumontite", "mineral"]
cm3 = {}
for mineral in all_minerals:
cm3[mineral] = [x[index["cm3_" + mineral]] for x in data]
change = {}
for mineral in all_minerals:
change[mineral] = [c - cm3[mineral][0] for c in cm3[mineral]]
percentage_change = {}
for mineral in all_minerals:
percentage_change[mineral] = [100 * (c - cm3[mineral][0]) / cm3[mineral][0] for c in cm3[mineral]]
# Plot the absolute changes in mineral volume
sortit = sorted([[change[mineral][-1], mineral] for mineral in all_minerals[:-1]])
plotorder = [m[1] for m in sortit]
plt.figure(0)
for mineral in reversed(plotorder):
plt.semilogx(years, change[mineral], label=mineral)
plt.semilogx(years, change["mineral"], 'k--', label="Sum")
plt.legend()
plt.ylabel("Mineral volume change (cm$^{3}$)")
plt.xlabel("Years")
plt.title("Natural mineral volume change in the reservoir")
plt.tight_layout()
plt.savefig("../../../../geochemistry/doc/content/media/geochemistry/natural_reservoir_minerals.png")
# Plot the percentage changes in mineral volume
sortit = sorted([[percentage_change[mineral][-1], mineral] for mineral in all_minerals[:-1]])
plotorder = [m[1] for m in sortit]
plt.figure(1)
for mineral in reversed(plotorder):
plt.semilogx(years, percentage_change[mineral], label=mineral)
plt.semilogx(years, percentage_change["mineral"], 'k--', label="Sum")
plt.ylim(-100, 100)
plt.legend()
plt.ylabel("Percentage volume change (%)")
plt.xlabel("Years")
plt.title("Natural mineral volume change in the reservoir")
plt.tight_layout()
plt.savefig("../../../../geochemistry/doc/content/media/geochemistry/natural_reservoir_mineral_percentage.png")
plt.figure(2)
fig, ax1 = plt.subplots()
ax2 = ax1.twinx()
ax1.semilogx(years, kg_solvent_H2O, 'g-')
ax2.semilogx(years, pH, 'b-')
ax1.set_xlabel('Years')
ax1.set_ylabel('Solvent water mass (kg)', color='g')
ax2.set_ylabel('pH', color='b')
plt.title("Water changes in the reservoir")
plt.tight_layout()
plt.savefig("../../../../geochemistry/doc/content/media/geochemistry/natural_reservoir_solution.png")
plt.show()
sys.exit(0)
| harterj/moose | modules/combined/examples/geochem-porous_flow/forge/natural_reservoir.py | Python | lgpl-2.1 | 3,075 | [
"MOOSE"
] | 3d7258e7555124be559c45695315810a314e04487ba47349cf2643d9ae609d40 |
# Copyright Yair Benita Y.Benita@pharm.uu.nl
# Biopython (http://biopython.org) license applies
"""Simple protein analysis.
Example::
X = ProteinAnalysis("MAEGEITTFTALTEKFNLPPGNYKKPKLLYCSNGGHFLRILPDGTVDGTRDRSDQHIQLQLSAESVGEVYIKSTETGQYLAMDTSGLLYGSQTPSEECLFLERLEENHYNTYTSKKHAEKNWFVGLKKNGSCKRGPRTHYGQKAILFLPLPV")
print(X.count_amino_acids())
print(X.get_amino_acids_percent())
print(X.molecular_weight())
print(X.aromaticity())
print(X.instability_index())
print(X.flexibility())
print(X.isoelectric_point())
print(X.secondary_structure_fraction())
print(X.protein_scale(ProtParamData.kd, 9, 0.4))
"""
from __future__ import print_function
import sys
from Bio.SeqUtils import ProtParamData # Local
from Bio.SeqUtils import IsoelectricPoint # Local
from Bio.Seq import Seq
from Bio.Alphabet import IUPAC
from Bio.Data import IUPACData
from Bio.SeqUtils import molecular_weight
__docformat__ = "restructuredtext en"
class ProteinAnalysis(object):
"""Class containing methods for protein analysis.
The constructor takes two arguments.
The first is the protein sequence as a string, which is then converted to a
sequence object using the Bio.Seq module. This is done just to make sure
the sequence is a protein sequence and not anything else.
The second argument is optional. If set to True, the weight of the amino
acids will be calculated using their monoisotopic mass (the weight of the
most abundant isotopes for each element), instead of the average molecular
mass (the averaged weight of all stable isotopes for each element).
If set to false (the default value) or left out, the IUPAC average
molecular mass will be used for the calculation.
"""
def __init__(self, prot_sequence, monoisotopic=False):
if prot_sequence.islower():
self.sequence = Seq(prot_sequence.upper(), IUPAC.protein)
else:
self.sequence = Seq(prot_sequence, IUPAC.protein)
self.amino_acids_content = None
self.amino_acids_percent = None
self.length = len(self.sequence)
self.monoisotopic = monoisotopic
def count_amino_acids(self):
"""Count standard amino acids, returns a dict.
Counts the number times each amino acid is in the protein
sequence. Returns a dictionary {AminoAcid:Number}.
The return value is cached in self.amino_acids_content.
It is not recalculated upon subsequent calls.
"""
if self.amino_acids_content is None:
prot_dic = dict((k, 0) for k in IUPACData.protein_letters)
for aa in prot_dic:
prot_dic[aa] = self.sequence.count(aa)
self.amino_acids_content = prot_dic
return self.amino_acids_content
def get_amino_acids_percent(self):
"""Calculate the amino acid content in percentages.
The same as count_amino_acids only returns the Number in percentage of
entire sequence. Returns a dictionary of {AminoAcid:percentage}.
The return value is cached in self.amino_acids_percent.
input is the dictionary self.amino_acids_content.
output is a dictionary with amino acids as keys.
"""
if self.amino_acids_percent is None:
aa_counts = self.count_amino_acids()
percentages = {}
for aa in aa_counts:
percentages[aa] = aa_counts[aa] / float(self.length)
self.amino_acids_percent = percentages
return self.amino_acids_percent
def molecular_weight(self):
"""Calculate MW from Protein sequence"""
return molecular_weight(self.sequence, monoisotopic=self.monoisotopic)
def aromaticity(self):
"""Calculate the aromaticity according to Lobry, 1994.
Calculates the aromaticity value of a protein according to Lobry, 1994.
It is simply the relative frequency of Phe+Trp+Tyr.
"""
aromatic_aas = 'YWF'
aa_percentages = self.get_amino_acids_percent()
aromaticity = sum(aa_percentages[aa] for aa in aromatic_aas)
return aromaticity
def instability_index(self):
"""Calculate the instability index according to Guruprasad et al 1990.
Implementation of the method of Guruprasad et al. 1990 to test a
protein for stability. Any value above 40 means the protein is unstable
(has a short half life).
See: Guruprasad K., Reddy B.V.B., Pandit M.W.
Protein Engineering 4:155-161(1990).
"""
index = ProtParamData.DIWV
score = 0.0
for i in range(self.length - 1):
this, next = self.sequence[i:i + 2]
dipeptide_value = index[this][next]
score += dipeptide_value
return (10.0 / self.length) * score
def flexibility(self):
"""Calculate the flexibility according to Vihinen, 1994.
No argument to change window size because parameters are specific for a
window=9. The parameters used are optimized for determining the flexibility.
"""
flexibilities = ProtParamData.Flex
window_size = 9
weights = [0.25, 0.4375, 0.625, 0.8125, 1]
scores = []
for i in range(self.length - window_size):
subsequence = self.sequence[i:i + window_size]
score = 0.0
for j in range(window_size // 2):
front = subsequence[j]
back = subsequence[window_size - j - 1]
score += (flexibilities[front] + flexibilities[back]) * weights[j]
middle = subsequence[window_size // 2 + 1]
score += flexibilities[middle]
scores.append(score / 5.25)
return scores
def gravy(self):
"""Calculate the gravy according to Kyte and Doolittle."""
total_gravy = sum(ProtParamData.kd[aa] for aa in self.sequence)
return total_gravy / self.length
def _weight_list(self, window, edge):
"""Makes a list of relative weight of the
window edges compared to the window center. The weights are linear.
it actually generates half a list. For a window of size 9 and edge 0.4
you get a list of [0.4, 0.55, 0.7, 0.85].
"""
unit = 2 * (1.0 - edge) / (window - 1)
weights = [0.0] * (window // 2)
for i in range(window // 2):
weights[i] = edge + unit * i
return weights
def protein_scale(self, param_dict, window, edge=1.0):
"""Compute a profile by any amino acid scale.
An amino acid scale is defined by a numerical value assigned to each type of
amino acid. The most frequently used scales are the hydrophobicity or
hydrophilicity scales and the secondary structure conformational parameters
scales, but many other scales exist which are based on different chemical and
physical properties of the amino acids. You can set several parameters that
control the computation of a scale profile, such as the window size and the
window edge relative weight value.
WindowSize: The window size is the length
of the interval to use for the profile computation. For a window size n, we
use the i-(n-1)/2 neighboring residues on each side to compute
the score for residue i. The score for residue i is the sum of the scaled values
for these amino acids, optionally weighted according to their position in the
window.
Edge: The central amino acid of the window always has a weight of 1.
By default, the amino acids at the remaining window positions have the same
weight, but you can make the residue at the center of the window have a
larger weight than the others by setting the edge value for the residues at
the beginning and end of the interval to a value between 0 and 1. For
instance, for Edge=0.4 and a window size of 5 the weights will be: 0.4, 0.7,
1.0, 0.7, 0.4.
The method returns a list of values which can be plotted to
view the change along a protein sequence. Many scales exist. Just add your
favorites to the ProtParamData modules.
Similar to expasy's ProtScale: http://www.expasy.org/cgi-bin/protscale.pl
"""
# generate the weights
# _weight_list returns only one tail. If the list should be [0.4,0.7,1.0,0.7,0.4]
# what you actually get from _weights_list is [0.4,0.7]. The correct calculation is done
# in the loop.
weights = self._weight_list(window, edge)
scores = []
# the score in each Window is divided by the sum of weights
# (* 2 + 1) since the weight list is one sided:
sum_of_weights = sum(weights) * 2 + 1
for i in range(self.length - window + 1):
subsequence = self.sequence[i:i + window]
score = 0.0
for j in range(window // 2):
# walk from the outside of the Window towards the middle.
# Iddo: try/except clauses added to avoid raising an exception on a non-standard amino acid
try:
front = param_dict[subsequence[j]]
back = param_dict[subsequence[window - j - 1]]
score += weights[j] * front + weights[j] * back
except KeyError:
sys.stderr.write('warning: %s or %s is not a standard amino acid.\n' %
(subsequence[j], subsequence[window - j - 1]))
# Now add the middle value, which always has a weight of 1.
middle = subsequence[window // 2]
if middle in param_dict:
score += param_dict[middle]
else:
sys.stderr.write('warning: %s is not a standard amino acid.\n' % (middle))
scores.append(score / sum_of_weights)
return scores
def isoelectric_point(self):
"""Calculate the isoelectric point.
Uses the module IsoelectricPoint to calculate the pI of a protein.
"""
aa_content = self.count_amino_acids()
ie_point = IsoelectricPoint.IsoelectricPoint(self.sequence, aa_content)
return ie_point.pi()
def secondary_structure_fraction(self):
"""Calculate fraction of helix, turn and sheet.
Returns a list of the fraction of amino acids which tend
to be in Helix, Turn or Sheet.
Amino acids in helix: V, I, Y, F, W, L.
Amino acids in Turn: N, P, G, S.
Amino acids in sheet: E, M, A, L.
Returns a tuple of three integers (Helix, Turn, Sheet).
"""
aa_percentages = self.get_amino_acids_percent()
helix = sum(aa_percentages[r] for r in 'VIYFWL')
turn = sum(aa_percentages[r] for r in 'NPGS')
sheet = sum(aa_percentages[r] for r in 'EMAL')
return helix, turn, sheet
| updownlife/multipleK | dependencies/biopython-1.65/build/lib.linux-x86_64-2.7/Bio/SeqUtils/ProtParam.py | Python | gpl-2.0 | 10,964 | [
"Biopython"
] | fbf2315b8bd85f89ebe87dcc074562bf646d293c63884c0c9efed9eba8e2f3e5 |
#!/usr/bin/env python
import unittest
import numpy
from pyscf import lib, gto, scf, dft
from pyscf.gw import ugw_ac
mol = gto.Mole()
mol.verbose = 7
mol.output = '/dev/null'
mol.atom = 'O'
mol.basis = 'aug-cc-pvdz'
mol.spin = 2
mol.build()
mf = dft.UKS(mol)
mf.xc = 'pbe0'
mf.kernel()
def tearDownModule():
global mol, mf
mol.stdout.close()
del mol, mf
class KnownValues(unittest.TestCase):
def test_gwac_pade(self):
nocca = (mol.nelectron + mol.spin)//2
noccb = mol.nelectron - nocca
gw_obj = ugw_ac.UGWAC(mf, frozen=0)
gw_obj.linearized = False
gw_obj.ac = 'pade'
gw_obj.kernel(orbs=range(nocca-3, nocca+3))
self.assertAlmostEqual(gw_obj.mo_energy[0][nocca-1], -0.521932084529, 5)
self.assertAlmostEqual(gw_obj.mo_energy[0][nocca], 0.167547592784, 5)
self.assertAlmostEqual(gw_obj.mo_energy[1][noccb-1], -0.464605523684, 5)
self.assertAlmostEqual(gw_obj.mo_energy[1][noccb], -0.0133557793765, 5)
if __name__ == "__main__":
print("Full Tests for UGW")
unittest.main()
| sunqm/pyscf | pyscf/gw/test/test_ugw.py | Python | apache-2.0 | 1,084 | [
"PySCF"
] | a2d3f5488cc930e06a185632294b6a77a32fb097da8199c0d411e90191779f6b |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for templates module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gast
from tensorflow.contrib.autograph.pyct import anno
from tensorflow.contrib.autograph.pyct import parser
from tensorflow.contrib.autograph.pyct import transformer
from tensorflow.python.platform import test
class TransformerTest(test.TestCase):
def _simple_source_info(self):
return transformer.EntityInfo(
source_code=None,
source_file=None,
namespace=None,
arg_values=None,
arg_types=None,
owner_type=None)
def test_entity_scope_tracking(self):
class TestTransformer(transformer.Base):
# The choice of note to assign to is arbitrary. Using Assign because it's
# easy to find in the tree.
def visit_Assign(self, node):
anno.setanno(node, 'enclosing_entities', self.enclosing_entities)
return self.generic_visit(node)
# This will show up in the lambda function.
def visit_BinOp(self, node):
anno.setanno(node, 'enclosing_entities', self.enclosing_entities)
return self.generic_visit(node)
tr = TestTransformer(self._simple_source_info())
def test_function():
a = 0
class TestClass(object):
def test_method(self):
b = 0
def inner_function(x):
c = 0
d = lambda y: (x + y)
return c, d
return b, inner_function
return a, TestClass
node, _ = parser.parse_entity(test_function)
node = tr.visit(node)
test_function_node = node.body[0]
test_class = test_function_node.body[1]
test_method = test_class.body[0]
inner_function = test_method.body[1]
lambda_node = inner_function.body[1].value
a = test_function_node.body[0]
b = test_method.body[0]
c = inner_function.body[0]
lambda_expr = lambda_node.body
self.assertEqual(
(test_function_node,), anno.getanno(a, 'enclosing_entities'))
self.assertEqual((test_function_node, test_class, test_method),
anno.getanno(b, 'enclosing_entities'))
self.assertEqual(
(test_function_node, test_class, test_method, inner_function),
anno.getanno(c, 'enclosing_entities'))
self.assertEqual((test_function_node, test_class, test_method,
inner_function, lambda_node),
anno.getanno(lambda_expr, 'enclosing_entities'))
def test_local_scope_info_stack(self):
class TestTransformer(transformer.Base):
# Extract all string constants from the block.
def visit_Str(self, node):
self.set_local('string', self.get_local('string', default='') + node.s)
return self.generic_visit(node)
def _annotate_result(self, node):
self.enter_local_scope()
node = self.generic_visit(node)
anno.setanno(node, 'test', self.get_local('string'))
self.exit_local_scope()
return node
def visit_While(self, node):
return self._annotate_result(node)
def visit_For(self, node):
return self._annotate_result(node)
tr = TestTransformer(self._simple_source_info())
def test_function(a):
"""Docstring."""
assert a == 'This should not be counted'
for i in range(3):
_ = 'a'
if i > 2:
return 'b'
else:
_ = 'c'
while True:
raise '1'
return 'nor this'
node, _ = parser.parse_entity(test_function)
node = tr.visit(node)
for_node = node.body[0].body[2]
while_node = for_node.body[1].orelse[1]
self.assertFalse(anno.hasanno(for_node, 'string'))
self.assertEqual('abc', anno.getanno(for_node, 'test'))
self.assertFalse(anno.hasanno(while_node, 'string'))
self.assertEqual('1', anno.getanno(while_node, 'test'))
def test_local_scope_info_stack_checks_integrity(self):
class TestTransformer(transformer.Base):
def visit_If(self, node):
self.enter_local_scope()
return self.generic_visit(node)
def visit_For(self, node):
node = self.generic_visit(node)
self.exit_local_scope()
return node
tr = TestTransformer(self._simple_source_info())
def no_exit(a):
if a > 0:
print(a)
return None
node, _ = parser.parse_entity(no_exit)
with self.assertRaises(AssertionError):
tr.visit(node)
def no_entry(a):
for _ in a:
print(a)
node, _ = parser.parse_entity(no_entry)
with self.assertRaises(AssertionError):
tr.visit(node)
def test_visit_block_postprocessing(self):
class TestTransformer(transformer.Base):
def _process_body_item(self, node):
if isinstance(node, gast.Assign) and (node.value.id == 'y'):
if_node = gast.If(gast.Name('x', gast.Load(), None), [node], [])
return if_node, if_node.body
return node, None
def visit_FunctionDef(self, node):
node.body = self.visit_block(
node.body, after_visit=self._process_body_item)
return node
def test_function(x, y):
z = x
z = y
return z
tr = TestTransformer(self._simple_source_info())
node, _ = parser.parse_entity(test_function)
node = tr.visit(node)
node = node.body[0]
self.assertEqual(len(node.body), 2)
self.assertTrue(isinstance(node.body[0], gast.Assign))
self.assertTrue(isinstance(node.body[1], gast.If))
self.assertTrue(isinstance(node.body[1].body[0], gast.Assign))
self.assertTrue(isinstance(node.body[1].body[1], gast.Return))
if __name__ == '__main__':
test.main()
| drpngx/tensorflow | tensorflow/contrib/autograph/pyct/transformer_test.py | Python | apache-2.0 | 6,367 | [
"VisIt"
] | 788ce57901de7b922c10f0f8514c3c82ea8a684f587346f638dc7f7cabf07b04 |
"""Runs Grad-CAM (gradient-weighted class-activation maps)."""
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
import copy
import argparse
import numpy
import keras.models
from keras import backend as K
from gewittergefahr.gg_io import storm_tracking_io as tracking_io
from gewittergefahr.gg_utils import file_system_utils
from gewittergefahr.deep_learning import cnn
from gewittergefahr.deep_learning import testing_io
from gewittergefahr.deep_learning import training_validation_io as trainval_io
from gewittergefahr.deep_learning import model_interpretation
from gewittergefahr.deep_learning import gradcam
K.set_session(K.tf.Session(config=K.tf.ConfigProto(
intra_op_parallelism_threads=1, inter_op_parallelism_threads=1,
allow_soft_placement=False
)))
SEPARATOR_STRING = '\n\n' + '*' * 50 + '\n\n'
CONV_LAYER_TYPE_STRINGS = ['Conv1D', 'Conv2D', 'Conv3D']
DENSE_LAYER_TYPE_STRINGS = ['Dense']
MODEL_FILE_ARG_NAME = 'model_file_name'
TARGET_CLASS_ARG_NAME = 'target_class'
TARGET_LAYER_ARG_NAME = 'target_layer_name'
EXAMPLE_DIR_ARG_NAME = 'input_example_dir_name'
STORM_METAFILE_ARG_NAME = 'input_storm_metafile_name'
NUM_EXAMPLES_ARG_NAME = 'num_examples'
RANDOMIZE_ARG_NAME = 'randomize_weights'
CASCADING_ARG_NAME = 'cascading_random'
OUTPUT_FILE_ARG_NAME = 'output_file_name'
MODEL_FILE_HELP_STRING = (
'Path to file with trained CNN. Will be read by `cnn.read_model`.'
)
TARGET_CLASS_HELP_STRING = (
'Activation maps will be created for this class. Must be in 0...(K - 1), '
'where K = number of classes.'
)
TARGET_LAYER_HELP_STRING = (
'Name of target layer. Neuron-importance weights will be based on '
'activations in this layer.'
)
EXAMPLE_DIR_HELP_STRING = (
'Name of top-level directory with input examples. Files therein will be '
'found by `input_examples.find_example_file` and read by '
'`input_examples.read_example_file`.'
)
STORM_METAFILE_HELP_STRING = (
'Path to Pickle file with storm IDs and times. Will be read by '
'`storm_tracking_io.read_ids_and_times`.'
)
NUM_EXAMPLES_HELP_STRING = (
'Number of examples (storm objects) to read from `{0:s}`. If you want to '
'read all examples, make this non-positive.'
).format(STORM_METAFILE_ARG_NAME)
RANDOMIZE_HELP_STRING = (
'Boolean flag. If 1, will randomize weights in each convolutional and '
'dense layer before producing CAMs. This allows the '
'model-parameter-randomization test from Adebayo et al. (2018) to be '
'carried out.'
)
CASCADING_HELP_STRING = (
'[used only if `{0:s}` = 1] Boolean flag. If 1, will randomize weights in '
'a cascading manner, going from the deepest to shallowest layer. In this '
'case, when weights for layer L are randomized, weights for all deeper '
'layers are randomized as well. If 0, will do non-cascading randomization,'
' where weights for only one layer are randomized at a time.'
).format(RANDOMIZE_ARG_NAME)
OUTPUT_FILE_HELP_STRING = (
'Path to output file (will be written by `gradcam.write_standard_file`).'
)
INPUT_ARG_PARSER = argparse.ArgumentParser()
INPUT_ARG_PARSER.add_argument(
'--' + MODEL_FILE_ARG_NAME, type=str, required=True,
help=MODEL_FILE_HELP_STRING
)
INPUT_ARG_PARSER.add_argument(
'--' + TARGET_CLASS_ARG_NAME, type=int, required=False, default=1,
help=TARGET_CLASS_HELP_STRING
)
INPUT_ARG_PARSER.add_argument(
'--' + TARGET_LAYER_ARG_NAME, type=str, required=True,
help=TARGET_LAYER_HELP_STRING
)
INPUT_ARG_PARSER.add_argument(
'--' + EXAMPLE_DIR_ARG_NAME, type=str, required=True,
help=EXAMPLE_DIR_HELP_STRING
)
INPUT_ARG_PARSER.add_argument(
'--' + STORM_METAFILE_ARG_NAME, type=str, required=True,
help=STORM_METAFILE_HELP_STRING
)
INPUT_ARG_PARSER.add_argument(
'--' + NUM_EXAMPLES_ARG_NAME, type=int, required=False, default=-1,
help=NUM_EXAMPLES_HELP_STRING
)
INPUT_ARG_PARSER.add_argument(
'--' + RANDOMIZE_ARG_NAME, type=int, required=False, default=0,
help=RANDOMIZE_HELP_STRING
)
INPUT_ARG_PARSER.add_argument(
'--' + CASCADING_ARG_NAME, type=int, required=False, default=0,
help=CASCADING_HELP_STRING
)
INPUT_ARG_PARSER.add_argument(
'--' + OUTPUT_FILE_ARG_NAME, type=str, required=True,
help=OUTPUT_FILE_HELP_STRING
)
def _find_conv_and_dense_layers(model_object):
"""Finds convolutional and dense layers in model object.
:param model_object: Trained instance of `keras.models.Model` or
`keras.models.Sequential`.
:return: layer_names: 1-D list with names of convolutional and dense layers.
"""
layer_names = [l.name for l in model_object.layers]
layer_type_strings = [type(l).__name__ for l in model_object.layers]
conv_or_dense_flags = numpy.array([
t in CONV_LAYER_TYPE_STRINGS + DENSE_LAYER_TYPE_STRINGS
for t in layer_type_strings
], dtype=bool)
conv_or_dense_indices = numpy.where(conv_or_dense_flags)[0]
return [layer_names[k] for k in conv_or_dense_indices]
def _reset_weights_in_layer(model_object, layer_name):
"""Resets (or "reinitializes" or "randomizes") weights in one layer.
:param model_object: Trained instance of `keras.models.Model` or
`keras.models.Sequential`.
:param layer_name: Name of layer in which to reset weights.
"""
session_object = K.get_session()
layer_object = model_object.get_layer(name=layer_name)
layer_object.kernel.initializer.run(session=session_object)
def _run_gradcam_one_weight_set(
model_object, target_class, target_layer_name, predictor_matrices,
training_option_dict):
"""Runs Grad-CAM with one set of weights.
T = number of input tensors to model
:param model_object: Trained CNN (instance of `keras.models.Model` or
`keras.models.Sequential`).
:param target_class: See documentation at top of file.
:param target_layer_name: Same.
:param predictor_matrices: length-T list of numpy arrays, containing
normalized predictor matrices.
:param training_option_dict: Dictionary returned by
`cnn.read_model_metadata`.
:return: cam_matrices: length-T list of numpy arrays, containing unguided
class activations.
:return: guided_cam_matrices: length-T list of numpy arrays, containing
guided class activations.
"""
num_matrices = len(predictor_matrices)
num_examples = predictor_matrices[0].shape[0]
cam_matrices = [None] * num_matrices
guided_cam_matrices = [None] * num_matrices
new_model_object = None
for i in range(num_examples):
print('Running Grad-CAM for example {0:d} of {1:d}...'.format(
i + 1, num_examples
))
these_predictor_matrices = [a[[i], ...] for a in predictor_matrices]
these_cam_matrices = gradcam.run_gradcam(
model_object=model_object,
list_of_input_matrices=these_predictor_matrices,
target_class=target_class, target_layer_name=target_layer_name
)
print('Running guided Grad-CAM for example {0:d} of {1:d}...'.format(
i + 1, num_examples
))
these_guided_cam_matrices, new_model_object = (
gradcam.run_guided_gradcam(
orig_model_object=model_object,
list_of_input_matrices=these_predictor_matrices,
target_layer_name=target_layer_name,
list_of_cam_matrices=these_cam_matrices,
new_model_object=new_model_object)
)
if all([a is None for a in cam_matrices]):
for k in range(num_matrices):
if these_cam_matrices[k] is None:
continue
these_dim = numpy.array(
(num_examples,) + these_cam_matrices[k].shape[1:], dtype=int
)
cam_matrices[k] = numpy.full(these_dim, numpy.nan)
these_dim = numpy.array(
(num_examples,) + these_guided_cam_matrices[k].shape[1:],
dtype=int
)
guided_cam_matrices[k] = numpy.full(these_dim, numpy.nan)
for k in range(num_matrices):
if these_cam_matrices[k] is None:
continue
cam_matrices[k][i, ...] = these_cam_matrices[k][0, ...]
guided_cam_matrices[k][i, ...] = (
these_guided_cam_matrices[k][0, ...]
)
upsample_refl = training_option_dict[trainval_io.UPSAMPLE_REFLECTIVITY_KEY]
if upsample_refl:
cam_matrices[0] = numpy.expand_dims(cam_matrices[0], axis=-1)
num_channels = predictor_matrices[0].shape[-1]
cam_matrices[0] = numpy.repeat(
a=cam_matrices[0], repeats=num_channels, axis=-1
)
cam_matrices = trainval_io.separate_shear_and_reflectivity(
list_of_input_matrices=cam_matrices,
training_option_dict=training_option_dict
)
cam_matrices[0] = cam_matrices[0][..., 0]
cam_matrices[1] = cam_matrices[1][..., 0]
guided_cam_matrices = trainval_io.separate_shear_and_reflectivity(
list_of_input_matrices=guided_cam_matrices,
training_option_dict=training_option_dict
)
return cam_matrices, guided_cam_matrices
def _run(model_file_name, target_class, target_layer_name, top_example_dir_name,
storm_metafile_name, num_examples, randomize_weights, cascading_random,
output_file_name):
"""Runs Grad-CAM (gradient-weighted class-activation maps).
This is effectively the main method.
:param model_file_name: See documentation at top of file.
:param target_class: Same.
:param target_layer_name: Same.
:param top_example_dir_name: Same.
:param storm_metafile_name: Same.
:param num_examples: Same.
:param randomize_weights: Same.
:param cascading_random: Same.
:param output_file_name: Same.
"""
file_system_utils.mkdir_recursive_if_necessary(file_name=output_file_name)
# Read model and metadata.
print('Reading model from: "{0:s}"...'.format(model_file_name))
model_object = cnn.read_model(model_file_name)
model_metafile_name = '{0:s}/model_metadata.p'.format(
os.path.split(model_file_name)[0]
)
print('Reading model metadata from: "{0:s}"...'.format(model_metafile_name))
model_metadata_dict = cnn.read_model_metadata(model_metafile_name)
training_option_dict = model_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY]
training_option_dict[trainval_io.REFLECTIVITY_MASK_KEY] = None
output_dir_name, pathless_output_file_name = os.path.split(output_file_name)
extensionless_output_file_name, output_file_extension = os.path.splitext(
pathless_output_file_name)
if randomize_weights:
conv_dense_layer_names = _find_conv_and_dense_layers(model_object)
conv_dense_layer_names.reverse()
num_sets = len(conv_dense_layer_names)
else:
conv_dense_layer_names = []
num_sets = 1
print('Reading storm metadata from: "{0:s}"...'.format(storm_metafile_name))
full_storm_id_strings, storm_times_unix_sec = (
tracking_io.read_ids_and_times(storm_metafile_name)
)
print(SEPARATOR_STRING)
if 0 < num_examples < len(full_storm_id_strings):
full_storm_id_strings = full_storm_id_strings[:num_examples]
storm_times_unix_sec = storm_times_unix_sec[:num_examples]
example_dict = testing_io.read_predictors_specific_examples(
top_example_dir_name=top_example_dir_name,
desired_full_id_strings=full_storm_id_strings,
desired_times_unix_sec=storm_times_unix_sec,
option_dict=training_option_dict,
layer_operation_dicts=model_metadata_dict[cnn.LAYER_OPERATIONS_KEY]
)
print(SEPARATOR_STRING)
predictor_matrices = example_dict[testing_io.INPUT_MATRICES_KEY]
sounding_pressure_matrix_pa = (
example_dict[testing_io.SOUNDING_PRESSURES_KEY]
)
print('Denormalizing model inputs...')
denorm_predictor_matrices = trainval_io.separate_shear_and_reflectivity(
list_of_input_matrices=copy.deepcopy(predictor_matrices),
training_option_dict=training_option_dict
)
denorm_predictor_matrices = model_interpretation.denormalize_data(
list_of_input_matrices=denorm_predictor_matrices,
model_metadata_dict=model_metadata_dict
)
print(SEPARATOR_STRING)
for k in range(num_sets):
if randomize_weights:
if cascading_random:
_reset_weights_in_layer(
model_object=model_object,
layer_name=conv_dense_layer_names[k]
)
this_model_object = model_object
this_output_file_name = (
'{0:s}/{1:s}_cascading-random_{2:s}{3:s}'
).format(
output_dir_name, extensionless_output_file_name,
conv_dense_layer_names[k].replace('_', '-'),
output_file_extension
)
else:
this_model_object = keras.models.Model.from_config(
model_object.get_config()
)
this_model_object.set_weights(model_object.get_weights())
_reset_weights_in_layer(
model_object=this_model_object,
layer_name=conv_dense_layer_names[k]
)
this_output_file_name = '{0:s}/{1:s}_random_{2:s}{3:s}'.format(
output_dir_name, extensionless_output_file_name,
conv_dense_layer_names[k].replace('_', '-'),
output_file_extension
)
else:
this_model_object = model_object
this_output_file_name = output_file_name
# print(K.eval(this_model_object.get_layer(name='dense_53').weights[0]))
these_cam_matrices, these_guided_cam_matrices = (
_run_gradcam_one_weight_set(
model_object=this_model_object,
target_class=target_class, target_layer_name=target_layer_name,
predictor_matrices=predictor_matrices,
training_option_dict=training_option_dict)
)
print('Writing results to file: "{0:s}"...'.format(
this_output_file_name
))
gradcam.write_standard_file(
pickle_file_name=this_output_file_name,
denorm_predictor_matrices=denorm_predictor_matrices,
cam_matrices=these_cam_matrices,
guided_cam_matrices=these_guided_cam_matrices,
full_storm_id_strings=full_storm_id_strings,
storm_times_unix_sec=storm_times_unix_sec,
model_file_name=model_file_name, target_class=target_class,
target_layer_name=target_layer_name,
sounding_pressure_matrix_pa=sounding_pressure_matrix_pa
)
print(SEPARATOR_STRING)
if __name__ == '__main__':
INPUT_ARG_OBJECT = INPUT_ARG_PARSER.parse_args()
_run(
model_file_name=getattr(INPUT_ARG_OBJECT, MODEL_FILE_ARG_NAME),
target_class=getattr(INPUT_ARG_OBJECT, TARGET_CLASS_ARG_NAME),
target_layer_name=getattr(INPUT_ARG_OBJECT, TARGET_LAYER_ARG_NAME),
top_example_dir_name=getattr(INPUT_ARG_OBJECT, EXAMPLE_DIR_ARG_NAME),
storm_metafile_name=getattr(INPUT_ARG_OBJECT, STORM_METAFILE_ARG_NAME),
num_examples=getattr(INPUT_ARG_OBJECT, NUM_EXAMPLES_ARG_NAME),
randomize_weights=bool(getattr(INPUT_ARG_OBJECT, RANDOMIZE_ARG_NAME)),
cascading_random=bool(getattr(INPUT_ARG_OBJECT, CASCADING_ARG_NAME)),
output_file_name=getattr(INPUT_ARG_OBJECT, OUTPUT_FILE_ARG_NAME)
)
| thunderhoser/GewitterGefahr | gewittergefahr/scripts/run_gradcam.py | Python | mit | 15,741 | [
"NEURON"
] | aee36b90cc207699bbdc810b9f509e4c8b028d22f638d9fb6acc9d46ac4b50f3 |
## \file
## \ingroup tutorial_roofit
## \notebook -nodraw
## Likelihood and minimization: fitting with constraints
##
## \macro_code
##
## \date February 2018
## \authors Clemens Lange, Wouter Verkerke (C++ version)
from __future__ import print_function
import ROOT
# Create model and dataset
# ----------------------------------------------
# Construct a Gaussian pdf
x = ROOT.RooRealVar("x", "x", -10, 10)
m = ROOT.RooRealVar("m", "m", 0, -10, 10)
s = ROOT.RooRealVar("s", "s", 2, 0.1, 10)
gauss = ROOT.RooGaussian("gauss", "gauss(x,m,s)", x, m, s)
# Construct a flat pdf (polynomial of 0th order)
poly = ROOT.RooPolynomial("poly", "poly(x)", x)
# model = f*gauss + (1-f)*poly
f = ROOT.RooRealVar("f", "f", 0.5, 0., 1.)
model = ROOT.RooAddPdf(
"model",
"model",
ROOT.RooArgList(
gauss,
poly),
ROOT.RooArgList(f))
# Generate small dataset for use in fitting below
d = model.generate(ROOT.RooArgSet(x), 50)
# Create constraint pdf
# -----------------------------------------
# Construct Gaussian constraint pdf on parameter f at 0.8 with
# resolution of 0.1
fconstraint = ROOT.RooGaussian(
"fconstraint",
"fconstraint",
f,
ROOT.RooFit.RooConst(0.8),
ROOT.RooFit.RooConst(0.1))
# Method 1 - add internal constraint to model
# -------------------------------------------------------------------------------------
# Multiply constraint term with regular pdf using ROOT.RooProdPdf
# Specify in fitTo() that internal constraints on parameter f should be
# used
# Multiply constraint with pdf
modelc = ROOT.RooProdPdf(
"modelc", "model with constraint", ROOT.RooArgList(model, fconstraint))
# Fit model (without use of constraint term)
r1 = model.fitTo(d, ROOT.RooFit.Save())
# Fit modelc with constraint term on parameter f
r2 = modelc.fitTo(
d,
ROOT.RooFit.Constrain(
ROOT.RooArgSet(f)),
ROOT.RooFit.Save())
# Method 2 - specify external constraint when fitting
# ------------------------------------------------------------------------------------------
# Construct another Gaussian constraint pdf on parameter f at 0.8 with
# resolution of 0.1
fconstext = ROOT.RooGaussian("fconstext", "fconstext", f, ROOT.RooFit.RooConst(
0.2), ROOT.RooFit.RooConst(0.1))
# Fit with external constraint
r3 = model.fitTo(d, ROOT.RooFit.ExternalConstraints(
ROOT.RooArgSet(fconstext)), ROOT.RooFit.Save())
# Print the fit results
print("fit result without constraint (data generated at f=0.5)")
r1.Print("v")
print("fit result with internal constraint (data generated at f=0.5, is f=0.8+/-0.2)")
r2.Print("v")
print("fit result with (another) external constraint (data generated at f=0.5, is f=0.2+/-0.1)")
r3.Print("v")
| root-mirror/root | tutorials/roofit/rf604_constraints.py | Python | lgpl-2.1 | 2,705 | [
"Gaussian"
] | eecbbfac08a77a48026b03e94a6e38df2d7c4350dcc99484bd6355a9d229b006 |
# Copyright (C) 2020 Atsushi Togo
# All rights reserved.
#
# This file is part of phono3py.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the phonopy project nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import textwrap
import numpy as np
from phonopy.phonon.group_velocity import GroupVelocity
from phonopy.harmonic.force_constants import similarity_transformation
from phonopy.phonon.thermal_properties import mode_cv as get_mode_cv
from phonopy.units import THzToEv, EV, THz, Angstrom
from phono3py.file_IO import write_pp_to_hdf5
from phono3py.phonon3.triplets import (get_grid_address, reduce_grid_points,
get_ir_grid_points,
from_coarse_to_dense_grid_points,
get_grid_points_by_rotations,
get_all_triplets)
from phono3py.other.isotope import Isotope
unit_to_WmK = ((THz * Angstrom) ** 2 / (Angstrom ** 3) * EV / THz /
(2 * np.pi)) # 2pi comes from definition of lifetime.
def all_bands_exist(interaction):
band_indices = interaction.band_indices
num_band = len(interaction.primitive) * 3
if len(band_indices) == num_band:
if (band_indices - np.arange(num_band) == 0).all():
return True
return False
def write_pp(conductivity,
pp,
i,
filename=None,
compression="gzip"):
grid_point = conductivity.get_grid_points()[i]
sigmas = conductivity.get_sigmas()
sigma_cutoff = conductivity.get_sigma_cutoff_width()
mesh = conductivity.get_mesh_numbers()
triplets, weights, map_triplets, _ = pp.get_triplets_at_q()
grid_address = pp.get_grid_address()
bz_map = pp.get_bz_map()
if map_triplets is None:
all_triplets = None
else:
all_triplets = get_all_triplets(grid_point,
grid_address,
bz_map,
mesh)
if len(sigmas) > 1:
print("Multiple smearing parameters were given. The last one in ")
print("ph-ph interaction calculations was written in the file.")
write_pp_to_hdf5(mesh,
pp=pp.get_interaction_strength(),
g_zero=pp.get_zero_value_positions(),
grid_point=grid_point,
triplet=triplets,
weight=weights,
triplet_map=map_triplets,
triplet_all=all_triplets,
sigma=sigmas[-1],
sigma_cutoff=sigma_cutoff,
filename=filename,
compression=compression)
class Conductivity(object):
def __init__(self,
interaction,
symmetry,
grid_points=None,
temperatures=None,
sigmas=None,
sigma_cutoff=None,
is_isotope=False,
mass_variances=None,
mesh_divisors=None,
coarse_mesh_shifts=None,
boundary_mfp=None, # in micrometre
is_kappa_star=True,
gv_delta_q=None, # finite difference for group veolocity
is_full_pp=False,
log_level=0):
if sigmas is None:
self._sigmas = []
else:
self._sigmas = sigmas
self._sigma_cutoff = sigma_cutoff
self._pp = interaction
self._is_full_pp = is_full_pp
self._collision = None # has to be set derived class
if temperatures is None:
self._temperatures = None
else:
self._temperatures = np.array(temperatures, dtype='double')
self._is_kappa_star = is_kappa_star
self._gv_delta_q = gv_delta_q
self._log_level = log_level
self._primitive = self._pp.primitive
self._dm = self._pp.dynamical_matrix
self._frequency_factor_to_THz = self._pp.frequency_factor_to_THz
self._cutoff_frequency = self._pp.cutoff_frequency
self._boundary_mfp = boundary_mfp
self._symmetry = symmetry
if not self._is_kappa_star:
self._point_operations = np.array([np.eye(3, dtype='intc')],
dtype='intc')
else:
self._point_operations = symmetry.get_reciprocal_operations()
rec_lat = np.linalg.inv(self._primitive.get_cell())
self._rotations_cartesian = np.array(
[similarity_transformation(rec_lat, r)
for r in self._point_operations], dtype='double')
self._grid_points = None
self._grid_weights = None
self._grid_address = None
self._ir_grid_points = None
self._ir_grid_weights = None
self._read_gamma = False
self._read_gamma_iso = False
self._kappa = None
self._mode_kappa = None
self._frequencies = None
self._cv = None
self._gv = None
self._gv_sum2 = None
self._gamma = None
self._gamma_iso = None
self._num_sampling_grid_points = 0
self._mesh = None
self._mesh_divisors = None
self._coarse_mesh = None
self._coarse_mesh_shifts = None
self._set_mesh_numbers(mesh_divisors=mesh_divisors,
coarse_mesh_shifts=coarse_mesh_shifts)
volume = self._primitive.get_volume()
self._conversion_factor = unit_to_WmK / volume
self._isotope = None
self._mass_variances = None
self._is_isotope = is_isotope
if mass_variances is not None:
self._is_isotope = True
if self._is_isotope:
self._set_isotope(mass_variances)
self._grid_point_count = None
self._set_grid_properties(grid_points)
if (self._dm.is_nac() and
self._dm.get_nac_method() == 'gonze' and
self._gv_delta_q is None):
self._gv_delta_q = 1e-5
if self._log_level:
msg = "Group velocity calculation:\n"
text = ("Analytical derivative of dynamical matrix is not "
"implemented for NAC by Gonze et al. Instead "
"numerical derivative of it is used with dq=1e-5 "
"for group velocity calculation.")
msg += textwrap.fill(text,
initial_indent=" ",
subsequent_indent=" ",
width=70)
print(msg)
self._gv_obj = GroupVelocity(
self._dm,
q_length=self._gv_delta_q,
symmetry=self._symmetry,
frequency_factor_to_THz=self._frequency_factor_to_THz)
# gv_delta_q may be changed.
self._gv_delta_q = self._gv_obj.get_q_length()
def __iter__(self):
return self
def __next__(self):
if self._grid_point_count == len(self._grid_points):
if self._log_level:
print("=================== End of collection of collisions "
"===================")
raise StopIteration
else:
self._run_at_grid_point()
self._grid_point_count += 1
return self._grid_point_count - 1
def next(self):
return self.__next__()
def get_mesh_divisors(self):
return self._mesh_divisors
@property
def mesh_numbers(self):
return self._mesh
def get_mesh_numbers(self):
return self.mesh_numbers
def get_mode_heat_capacities(self):
return self._cv
def get_group_velocities(self):
return self._gv
def get_gv_by_gv(self):
return self._gv_sum2
def get_frequencies(self):
return self._frequencies[self._grid_points]
def get_qpoints(self):
return self._qpoints
def get_grid_points(self):
return self._grid_points
def get_grid_weights(self):
return self._grid_weights
@property
def temperatures(self):
return self._temperatures
def get_temperatures(self):
return self.temperatures
def set_temperatures(self, temperatures):
self._temperatures = temperatures
self._allocate_values()
def set_gamma(self, gamma):
self._gamma = gamma
self._read_gamma = True
def set_gamma_isotope(self, gamma_iso):
self._gamma_iso = gamma_iso
self._read_gamma_iso = True
@property
def gamma(self):
return self._gamma
def get_gamma(self):
return self.gamma
@property
def gamma_isotope(self):
return self._gamma_iso
def get_gamma_isotope(self):
return self.gamma_isotope
@property
def kappa(self):
return self._kappa
def get_kappa(self):
return self.kappa
@property
def mode_kappa(self):
return self._mode_kappa
def get_mode_kappa(self):
return self.mode_kappa
def get_sigmas(self):
return self._sigmas
def get_sigma_cutoff_width(self):
return self._sigma_cutoff
def get_grid_point_count(self):
return self._grid_point_count
def get_averaged_pp_interaction(self):
return self._averaged_pp_interaction
def _run_at_grid_point(self):
"""This has to be implementated in the derived class"""
pass
def _allocate_values(self):
"""This has to be implementated in the derived class"""
pass
def _set_grid_properties(self, grid_points):
self._grid_address = self._pp.grid_address
self._pp.set_nac_q_direction(nac_q_direction=None)
if grid_points is not None: # Specify grid points
self._grid_points = reduce_grid_points(
self._mesh_divisors,
self._grid_address,
grid_points,
coarse_mesh_shifts=self._coarse_mesh_shifts)
(self._ir_grid_points,
self._ir_grid_weights) = self._get_ir_grid_points()
elif not self._is_kappa_star: # All grid points
coarse_grid_address = get_grid_address(self._coarse_mesh)
coarse_grid_points = np.arange(np.prod(self._coarse_mesh),
dtype='uintp')
self._grid_points = from_coarse_to_dense_grid_points(
self._mesh,
self._mesh_divisors,
coarse_grid_points,
coarse_grid_address,
coarse_mesh_shifts=self._coarse_mesh_shifts)
self._grid_weights = np.ones(len(self._grid_points), dtype='intc')
self._ir_grid_points = self._grid_points
self._ir_grid_weights = self._grid_weights
else: # Automatic sampling
self._grid_points, self._grid_weights = self._get_ir_grid_points()
self._ir_grid_points = self._grid_points
self._ir_grid_weights = self._grid_weights
self._qpoints = np.array(self._grid_address[self._grid_points] /
self._mesh.astype('double'),
dtype='double', order='C')
self._grid_point_count = 0
self._frequencies, self._eigenvectors, _ = self._pp.get_phonons()
def _get_gamma_isotope_at_sigmas(self, i):
gamma_iso = []
bz_map = self._pp.get_bz_map()
pp_freqs, pp_eigvecs, pp_phonon_done = self._pp.get_phonons()
for j, sigma in enumerate(self._sigmas):
if self._log_level:
text = "Calculating Gamma of ph-isotope with "
if sigma is None:
text += "tetrahedron method"
else:
text += "sigma=%s" % sigma
print(text)
self._isotope.set_sigma(sigma)
self._isotope.set_phonons(self._grid_address,
bz_map,
pp_freqs,
pp_eigvecs,
pp_phonon_done,
dm=self._dm)
gp = self._grid_points[i]
self._isotope.set_grid_point(gp)
self._isotope.run()
gamma_iso.append(self._isotope.get_gamma())
return np.array(gamma_iso, dtype='double', order='C')
def _set_mesh_numbers(self, mesh_divisors=None, coarse_mesh_shifts=None):
self._mesh = self._pp.mesh_numbers
if mesh_divisors is None:
self._mesh_divisors = np.array([1, 1, 1], dtype='intc')
else:
self._mesh_divisors = []
for i, (m, n) in enumerate(zip(self._mesh, mesh_divisors)):
if m % n == 0:
self._mesh_divisors.append(n)
else:
self._mesh_divisors.append(1)
print(("Mesh number %d for the " +
["first", "second", "third"][i] +
" axis is not dividable by divisor %d.") % (m, n))
self._mesh_divisors = np.array(self._mesh_divisors, dtype='intc')
if coarse_mesh_shifts is None:
self._coarse_mesh_shifts = [False, False, False]
else:
self._coarse_mesh_shifts = coarse_mesh_shifts
for i in range(3):
if (self._coarse_mesh_shifts[i] and
(self._mesh_divisors[i] % 2 != 0)):
print("Coarse grid along " +
["first", "second", "third"][i] +
" axis can not be shifted. Set False.")
self._coarse_mesh_shifts[i] = False
self._coarse_mesh = self._mesh // self._mesh_divisors
if self._log_level:
print("Lifetime sampling mesh: [ %d %d %d ]" %
tuple(self._mesh // self._mesh_divisors))
def _get_ir_grid_points(self):
if self._coarse_mesh_shifts is None:
mesh_shifts = [False, False, False]
else:
mesh_shifts = self._coarse_mesh_shifts
(coarse_grid_points,
coarse_grid_weights,
coarse_grid_address, _) = get_ir_grid_points(
self._coarse_mesh,
self._symmetry.get_pointgroup_operations(),
mesh_shifts=mesh_shifts)
grid_points = from_coarse_to_dense_grid_points(
self._mesh,
self._mesh_divisors,
coarse_grid_points,
coarse_grid_address,
coarse_mesh_shifts=self._coarse_mesh_shifts)
grid_weights = coarse_grid_weights
assert grid_weights.sum() == np.prod(self._mesh // self._mesh_divisors)
return grid_points, grid_weights
def _set_isotope(self, mass_variances):
if mass_variances is True:
mv = None
else:
mv = mass_variances
self._isotope = Isotope(
self._mesh,
self._primitive,
mass_variances=mv,
frequency_factor_to_THz=self._frequency_factor_to_THz,
symprec=self._symmetry.get_symmetry_tolerance(),
cutoff_frequency=self._cutoff_frequency,
lapack_zheev_uplo=self._pp.get_lapack_zheev_uplo())
self._mass_variances = self._isotope.get_mass_variances()
def _set_harmonic_properties(self, i_irgp, i_data):
grid_point = self._grid_points[i_irgp]
freqs = self._frequencies[grid_point][self._pp.band_indices]
self._cv[:, i_data, :] = self._get_cv(freqs)
gv = self._get_gv(self._qpoints[i_irgp])
self._gv[i_data] = gv[self._pp.band_indices, :]
# Outer product of group velocities (v x v) [num_k*, num_freqs, 3, 3]
gv_by_gv_tensor, order_kstar = self._get_gv_by_gv(i_irgp, i_data)
self._num_sampling_grid_points += order_kstar
# Sum all vxv at k*
for j, vxv in enumerate(
([0, 0], [1, 1], [2, 2], [1, 2], [0, 2], [0, 1])):
self._gv_sum2[i_data, :, j] = gv_by_gv_tensor[:, vxv[0], vxv[1]]
def _get_gv(self, q):
self._gv_obj.run([q])
return self._gv_obj.get_group_velocity()[0]
def _get_gv_by_gv(self, i_irgp, i_data):
rotation_map = get_grid_points_by_rotations(
self._grid_address[self._grid_points[i_irgp]],
self._point_operations,
self._mesh)
gv = self._gv[i_data]
gv_by_gv = np.zeros((len(gv), 3, 3), dtype='double')
for r in self._rotations_cartesian:
gvs_rot = np.dot(gv, r.T)
gv_by_gv += [np.outer(r_gv, r_gv) for r_gv in gvs_rot]
gv_by_gv /= len(rotation_map) // len(np.unique(rotation_map))
order_kstar = len(np.unique(rotation_map))
if self._grid_weights is not None:
if order_kstar != self._grid_weights[i_irgp]:
if self._log_level:
text = ("Number of elements in k* is unequal "
"to number of equivalent grid-points. "
"This means that the mesh sampling grids break "
"symmetry. Please check carefully "
"the convergence over grid point densities.")
msg = textwrap.fill(text,
initial_indent=" ",
subsequent_indent=" ",
width=70)
print("*" * 30 + "Warning" + "*" * 30)
print(msg)
print("*" * 67)
return gv_by_gv, order_kstar
def _get_cv(self, freqs):
cv = np.zeros((len(self._temperatures), len(freqs)), dtype='double')
# T/freq has to be large enough to avoid divergence.
# Otherwise just set 0.
for i, f in enumerate(freqs):
finite_t = (self._temperatures > f / 100)
if f > self._cutoff_frequency:
cv[:, i] = np.where(
finite_t, get_mode_cv(
np.where(finite_t, self._temperatures, 10000),
f * THzToEv), 0)
return cv
def _get_main_diagonal(self, i, j, k):
num_band = self._primitive.get_number_of_atoms() * 3
main_diagonal = self._gamma[j, k, i].copy()
if self._gamma_iso is not None:
main_diagonal += self._gamma_iso[j, i]
if self._boundary_mfp is not None:
main_diagonal += self._get_boundary_scattering(i)
# if self._boundary_mfp is not None:
# for l in range(num_band):
# # Acoustic modes at Gamma are avoided.
# if i == 0 and l < 3:
# continue
# gv_norm = np.linalg.norm(self._gv[i, l])
# mean_free_path = (gv_norm * Angstrom * 1e6 /
# (4 * np.pi * main_diagonal[l]))
# if mean_free_path > self._boundary_mfp:
# main_diagonal[l] = (
# gv_norm / (4 * np.pi * self._boundary_mfp))
return main_diagonal
def _get_boundary_scattering(self, i):
num_band = self._primitive.get_number_of_atoms() * 3
g_boundary = np.zeros(num_band, dtype='double')
for l in range(num_band):
g_boundary[l] = (np.linalg.norm(self._gv[i, l]) * Angstrom * 1e6 /
(4 * np.pi * self._boundary_mfp))
return g_boundary
def _show_log_header(self, i):
if self._log_level:
gp = self._grid_points[i]
print("======================= Grid point %d (%d/%d) "
"=======================" %
(gp, i + 1, len(self._grid_points)))
print("q-point: (%5.2f %5.2f %5.2f)" % tuple(self._qpoints[i]))
if self._boundary_mfp is not None:
if self._boundary_mfp > 1000:
print("Boundary mean free path (millimetre): %.3f" %
(self._boundary_mfp / 1000.0))
else:
print("Boundary mean free path (micrometre): %.5f" %
self._boundary_mfp)
if self._is_isotope:
print(("Mass variance parameters: " +
"%5.2e " * len(self._mass_variances)) %
tuple(self._mass_variances))
| atztogo/phono3py | phono3py/phonon3/conductivity.py | Python | bsd-3-clause | 21,914 | [
"phonopy"
] | 88a07219cbdcd148a1ea1e8f70164e24e4439da0e87faf56af34810b4cd9ac14 |
# -*- coding: utf-8
from __future__ import print_function
import pprint
iso_two_to_three = {
"AF": "AFG",
"AX": "ALA",
"AL": "ALB",
"DZ": "DZA",
"AS": "ASM",
"AD": "AND",
"AO": "AGO",
"AI": "AIA",
"AQ": "ATA",
"AG": "ATG",
"AR": "ARG",
"AM": "ARM",
"AW": "ABW",
"AU": "AUS",
"AT": "AUT",
"AZ": "AZE",
"BS": "BHS",
"BH": "BHR",
"BD": "BGD",
"BB": "BRB",
"BY": "BLR",
"BE": "BEL",
"BZ": "BLZ",
"BJ": "BEN",
"BM": "BMU",
"BT": "BTN",
"BO": "BOL",
"BA": "BIH",
"BW": "BWA",
"BV": "BVT",
"BQ": "BES",
"BR": "BRA",
"VG": "VGB",
"IO": "IOT",
"BN": "BRN",
"BG": "BGR",
"BF": "BFA",
"BI": "BDI",
"KH": "KHM",
"CM": "CMR",
"CA": "CAN",
"CV": "CPV",
"KY": "CYM",
"CF": "CAF",
"TD": "TCD",
"CL": "CHL",
"CN": "CHN",
"HK": "HKG",
"MO": "MAC",
"CX": "CXR",
"CC": "CCK",
"CO": "COL",
"KM": "COM",
"CG": "COG",
"CD": "COD",
"CK": "COK",
"CR": "CRI",
"CI": "CIV",
"HR": "HRV",
"CU": "CUB",
"CY": "CYP",
"CZ": "CZE",
"DK": "DNK",
"DJ": "DJI",
"DM": "DMA",
"DO": "DOM",
"EC": "ECU",
"EG": "EGY",
"SV": "SLV",
"GQ": "GNQ",
"ER": "ERI",
"EE": "EST",
"ET": "ETH",
"FK": "FLK",
"FO": "FRO",
"FJ": "FJI",
"FI": "FIN",
"FR": "FRA",
"GF": "GUF",
"PF": "PYF",
"TF": "ATF",
"GA": "GAB",
"GM": "GMB",
"GE": "GEO",
"DE": "DEU",
"GH": "GHA",
"GI": "GIB",
"GR": "GRC",
"GL": "GRL",
"GD": "GRD",
"GP": "GLP",
"GU": "GUM",
"GT": "GTM",
"GG": "GGY",
"GN": "GIN",
"GW": "GNB",
"GY": "GUY",
"HT": "HTI",
"HM": "HMD",
"VA": "VAT",
"HN": "HND",
"HU": "HUN",
"IS": "ISL",
"IN": "IND",
"ID": "IDN",
"IR": "IRN",
"IQ": "IRQ",
"IE": "IRL",
"IM": "IMN",
"IL": "ISR",
"IT": "ITA",
"JM": "JAM",
"JP": "JPN",
"JE": "JEY",
"JO": "JOR",
"KZ": "KAZ",
"KE": "KEN",
"KI": "KIR",
"KP": "PRK",
"KR": "KOR",
"KW": "KWT",
"KG": "KGZ",
"LA": "LAO",
"LV": "LVA",
"LB": "LBN",
"LS": "LSO",
"LR": "LBR",
"LY": "LBY",
"LI": "LIE",
"LT": "LTU",
"LU": "LUX",
"MK": "MKD",
"MG": "MDG",
"MW": "MWI",
"MY": "MYS",
"MV": "MDV",
"ML": "MLI",
"MT": "MLT",
"MH": "MHL",
"MQ": "MTQ",
"MR": "MRT",
"MU": "MUS",
"YT": "MYT",
"MX": "MEX",
"FM": "FSM",
"MD": "MDA",
"MC": "MCO",
"MN": "MNG",
"ME": "MNE",
"MS": "MSR",
"MA": "MAR",
"MZ": "MOZ",
"MM": "MMR",
"NA": "NAM",
"NR": "NRU",
"NP": "NPL",
"NL": "NLD",
"AN": "ANT",
"NC": "NCL",
"NZ": "NZL",
"NI": "NIC",
"NE": "NER",
"NG": "NGA",
"NU": "NIU",
"NF": "NFK",
"MP": "MNP",
"NO": "NOR",
"OM": "OMN",
"PK": "PAK",
"PW": "PLW",
"PS": "PSE",
"PA": "PAN",
"PG": "PNG",
"PY": "PRY",
"PE": "PER",
"PH": "PHL",
"PN": "PCN",
"PL": "POL",
"PT": "PRT",
"PR": "PRI",
"QA": "QAT",
"RE": "REU",
"RO": "ROU",
"RU": "RUS",
"RW": "RWA",
"BL": "BLM",
"SH": "SHN",
"KN": "KNA",
"LC": "LCA",
"MF": "MAF",
"PM": "SPM",
"VC": "VCT",
"WS": "WSM",
"SM": "SMR",
"ST": "STP",
"SA": "SAU",
"SN": "SEN",
"RS": "SRB",
"SC": "SYC",
"SL": "SLE",
"SG": "SGP",
"SK": "SVK",
"SI": "SVN",
"SB": "SLB",
"SO": "SOM",
"ZA": "ZAF",
"GS": "SGS",
"SS": "SSD",
"ES": "ESP",
"LK": "LKA",
"SD": "SDN",
"SR": "SUR",
"SJ": "SJM",
"SZ": "SWZ",
"SE": "SWE",
"SX": "SXM",
"CH": "CHE",
"SY": "SYR",
"TW": "TWN",
"TJ": "TJK",
"TZ": "TZA",
"TH": "THA",
"TL": "TLS",
"TG": "TGO",
"TK": "TKL",
"TO": "TON",
"TT": "TTO",
"TN": "TUN",
"TR": "TUR",
"TM": "TKM",
"TC": "TCA",
"TV": "TUV",
"UG": "UGA",
"UA": "UKR",
"AE": "ARE",
"GB": "GBR",
"US": "USA",
"UM": "UMI",
"UY": "URY",
"UZ": "UZB",
"VU": "VUT",
"VE": "VEN",
"VN": "VNM",
"VI": "VIR",
"WF": "WLF",
"EH": "ESH",
"YE": "YEM",
"ZM": "ZMB",
"ZW": "ZWE"
}
kl_none = {
"na": "None",
"need data": "None",
"needs data": "None",
"no data": "None",
"(no data available)": "None",
"undefined": "None",
"unknown": "None",
"unplaced": "None",
"unrecorded": "None",
"unspecified": "None",
"": "None",
"[no higher geography data]": "None",
"no higher geography recorded": "None",
"not applicable": "None",
"null": "None",
"nodata": "None",
"*": "None",
"no higher geography": "None",
"no locality data": "None",
"0": "None",
"`": "None",
"-": "None",
"[]": "None",
"unknown location": "None",
"unspecified": "None",
"not on sheet": "None",
"unkown": "None",
"unlisted": "None",
"uncertain": "None",
"tbd": "None",
"state not listed": "None",
"not given": "None",
"(not listed)": "None",
"not readable": "None",
"none": "None",
"[none]": "None",
"[illegible]": "None",
}
kl = {
"continent": {
u"eurasia": "asia",
u"africa, asia ?": "asia",
u"southeast asia": "asia",
u"asia?": "asia",
u"asia (?)": "asia",
u"asia ?": "asia",
u"africa?": "africa",
u"africa/asia?": "asia",
u"s. w. africa": "africa",
u"south east asia": "asia",
u"neotropical / south america": "south america",
u"west africa": "africa",
u"[asia]": "asia",
u"australian": "australia",
u"east central africa": "africa",
u"east coast africa": "africa",
u"west coast africa": "africa",
u"indoaustralia": "australia",
u"european seas": "europe",
u"asiape": "asia",
u"asia/pacific ocean": "asia",
u"south america?": "south america",
u"e. africa": "africa",
u"australia (continent)": "australia",
u"europe (?)": "europe",
u"asia/indoaustralia?": "asia",
u"east africa": "africa",
u"north america?": "north america",
u"south america ?": "south america",
u"europe?": "europe",
u"asia minor": "asia",
u"north america (?)": "north america",
u"australasia": "asia",
u"indoaustralia?": "australia",
u"asia?indies": "asia",
u"african": "africa",
u"africa (?)": "africa",
u"s. america": "south america",
u"north and central america": "north america",
u"middle east": "asia",
u"n. america": "north america",
u"madagascar": "africa",
u"cental america": "north america",
u"central america": "north america",
u"central america?": "north america",
u"afica": "africa",
u"afirca": "africa",
u"africa, asia": "asia",
u"africa/asia": "asia",
u"africa ()": "africa",
u"asia ()": "asia",
u"europe ()": "europe",
u"asiaindies": "asia",
u"asia/indoaustralia": "asia",
u"north america ()": "north america",
u"afrotropical": "africa",
u"neotropical": "south america",
u"neotropical / central america": "north america",
u"oriental": "asia",
u"[pacific ocean": "pacific ocean",
u"pacific ocean": "pacific ocean",
u"south indian ocean": "indian ocean",
u"indo pacific": "pacific ocean",
u"eastern atlantic": "atlantic ocean",
u"indian & n pacific": "indian ocean",
u"south pacific ocean": "pacific ocean",
u"indian ocean (probably)": "indian ocean",
u"arctic region": "arctic ocean",
u"north indian ocean": "indian ocean",
u"w. pacific": "pacific ocean",
u"n. w. atlantic": "atlantic ocean",
u"indo-pacific": "pacific ocean",
u"sw atlantic": "atlantic ocean",
u"southwestern pacific": "pacific ocean",
u"indian ocean, eastern": "indian ocean",
u"north pacific": "pacific ocean",
u"western atlantic": "atlantic ocean",
u"central pacific": "pacific ocean",
u"s. w. pacific": "pacific ocean",
u"n. atlantic": "atlantic ocean",
u"south atlantic": "atlantic ocean",
u"western indian ocean": "indian ocean",
u"indian ocean, western": "indian ocean",
u"mediterranean/atl": "mediterranean sea",
u"northern atlantic": "atlantic ocean",
u"indian ocean": "indian ocean",
u"antartic-pacific": "pacific ocean",
u"mediterranean sea": "mediterranean sea",
u"caribbean ocean": "caribbean sea",
u"nw & central pacific": "pacific ocean",
u"s. atlantic": "atlantic ocean",
u"se pacific": "pacific ocean",
u"south atlantic ocean": "atlantic ocean",
u"eastern north atlantic": "atlantic ocean",
u"west indian ocean": "indian ocean",
u"se atlantic": "atlantic ocean",
u"north atlantic": "atlantic ocean",
u"atlantic, western central": "atlantic ocean",
u"north atlantic ocean": "atlantic ocean",
u"ne pacific": "pacific ocean",
u"east pacific ocean": "pacific ocean",
u"e. tropical pacific": "pacific ocean",
u"indo-pacific ocean": "pacific ocean",
u"arctic ocean": "arctic ocean",
u"indo-west pacific": "pacific ocean",
u"s. pacific": "pacific ocean",
u"southern pacific": "pacific ocean",
u"nearctic": "arctic ocean",
u"atlantic ocean": "atlantic ocean",
u"west pacific": "pacific ocean",
u"eastern pacific": "pacific ocean",
u"south pacific": "pacific ocean",
u"western pacific": "pacific ocean",
u"atlantic islands": "atlantic ocean",
u"atlantic/pacific": "atlantic ocean",
u"pacific/indian ocean": "indian ocean",
u"north pacific ocean": "pacific ocean",
u"palearctic": "arctic ocean",
u"east mediterranean sea": "mediterranean sea",
u"nw atlantic": "atlantic ocean",
u"pacific islands": "pacific ocean",
u"ne atlantic": "atlantic ocean",
u"caribbean sea": "caribbean sea",
u"windward island": "north america",
u"windward islands": "north america",
u"pacifuc": "pacific ocean",
u"wst indies": "north america",
u"pacifc ocean": "pacific ocean",
u"caribbean": "caribbean sea",
u"carribbean": "caribbean sea",
u"indian": "indian ocean",
u"pasific": "pacific ocean",
u"carribbean sea": "caribbean sea",
u"caribbenan": "caribbean sea",
u"behring sea": "bering sea",
u"mediterranea sea": "mediterranean sea",
u"mediteranean sea": "mediterranean sea",
u"west indied": "north america",
u"weat indies": "north america",
u"west indies": "north america",
u"carribean": "caribbean sea",
u"caribean": "caribbean sea",
u"indean ocean": "indian ocean",
u"atlantic": "atlantic ocean",
u"pacific": "pacific ocean",
u"indian": "indian ocean",
u"caribbean": "caribbean sea",
u"mediterranean": "mediterranean sea",
u"antarctic": "antarctic ocean",
u"arctic": "arctic ocean",
u"antartica": "antarctica",
u"southern": "antarctic ocean",
u"southern ocean": "antarctic ocean",
},
"country": {
u"usa": "united states",
u"u.s.a.": "united states",
u"united states of america": "united states",
u"united sa": "united states",
u"united sates": "united states",
u"united stae": "united states",
u"united staes": "united states",
u"united state": "united states",
u"united stated": "united states",
u"us": "united states",
u"u.s.": "united states",
u"?usa": "united states",
u"[u.s.a.]": "united states",
u"[usa]": "united states",
u"u s a": "united states",
u"u.s.a": "united states",
u"u.s.a,": "united states",
u"u.s.a..": "united states",
u"usa,": "united states",
u"usa?": "united states",
u"usa.": "united states",
u"united stats": "united states",
u"united ststes": "united states",
u"unites states": "united states",
u"unite states": "united states",
u"united states": "united states",
u"united states (?)": "united states",
u"united states?": "united states",
u"united_states": "united states",
u"united states, canada": "united states",
u"congo (brazzaville)": "congo",
u"[central africa], congo fran\u008daise": "congo",
u"[central africa], congo": "congo",
u"[central africa, congo]": "congo",
u"central africa, congo": "congo",
u"congo/rwanda/zimbabwe": "congo",
u"congo republic": "congo",
u"republic of the congo": "congo",
u"republic of congo": "congo",
u"congo, republic of the": "congo",
u"congo to kenya": "congo",
u"congo/rwanda/zaire": "congo",
u"belgian congo [democratic republic of the congo]": "democratic republic of the congo",
u"belgian congo": "democratic republic of the congo",
u"democratic republic of the congo": "democratic republic of the congo",
u"democratic republic of congo": "democratic republic of the congo",
u"[central africa], congo [zaire]": "democratic republic of the congo",
u"congo [central africa, zaire]": "democratic republic of the congo",
u"central africa, congo [zaire]": "democratic republic of the congo",
u"congo, democratic republic of": "democratic republic of the congo",
u"congo, dem. rep. of the": "democratic republic of the congo",
u"congo, democratic republic of zaire": "democratic republic of the congo",
u"congo dr": "democratic republic of the congo",
u"democratic republic of congo/republic of congo/rwanda": "democratic republic of the congo",
u"dem. rep. of congo": "democratic republic of the congo",
u"dem. rep. congo": "democratic republic of the congo",
u"dem. republic of the congo": "democratic republic of the congo",
u"congo-kinshasa": "democratic republic of the congo",
u"(cf) zaire": "democratic republic of the congo",
u"zaire()": "democratic republic of the congo",
u"zaire].": "democratic republic of the congo",
u"zaire - enter as congo, democratic republic of": "democratic republic of the congo",
u"democratic republic of congo/tanzania": "democratic republic of the congo",
u"congo [zaire]": "democratic republic of the congo",
u"russia/china": "russia",
u"russian federation (former u.s.s.r.": "russia",
u"russian federation": "russia",
u"russia [finland prior to 1940]": "russia",
u"russian federation (former u.s.s.r.)": "russia",
u"asia bor orientalis (russian federation)": "russia",
u"united kingdom of great britain and northern ireland": "united kingdom",
u"[united kingdom]": "united kingdom",
u"[england (united kingdom)]": "united kingdom",
u"wales, united kingdom": "united kingdom",
u"france/scandinavia/united kingdom": "united kingdom",
u"united kingdom of great britain": "united kingdom",
u"england": "united kingdom",
u"france/spain/sweden/netherlands//united kingdom": "united kingdom",
u"ireland, republic of": "ireland",
u"republich of seychelles": "seychelles",
u"burundi/zaire": "burundi",
u"maldives islands": "maldives",
u"austria/hungary": "hungary",
u"[east africa], tanganyika [tanzania]": "tanzania",
u"seychelles republic": "seychelles",
u"uganda-kenya": "uganda",
u"india/nepal": "nepal",
u"dominican republoic": "dominican republic",
u"germaniae [germany]": "germany",
u"[kiribati].": "kiribati",
u"hispaniola (dom.r.+ haiti)": "haiti",
u"federal republic germany": "germany",
u"czech republic/germany/poland": "czech republic",
u"schlesien [poland]": "poland",
u"egypt/sudan": "egypt",
u"zambia/zimbabwe": "zambia",
u"bolivia, peru": "peru",
u"cameroon/nigeria": "nigeria",
u"french antilles (guedeloupe, martinique & al.)": "martinique",
u"bolivia/peru": "peru",
u"laos/thailand": "thailand",
u"[austria]": "austria",
u"malawi (nyasaland)": "malawi",
u"republic of the philippines": "philippines",
u"northern new zealand": "new zealand",
u"kingdom of norway": "norway",
u"republic of marshall islands": "marshall islands",
u"liberia/ghana": "ghana",
u"palau island": "palau",
u"[uruguay]": "uruguay",
u"samoan islands": "samoa",
u"egypt/morocco": "egypt",
u"panama/united states": "panama",
u"republic of croatia": "croatia",
u"kenya/uganda": "uganda",
u"spanish morocco": "morocco",
u"armenia/azerbaijan/georgia/russia": "azerbaijan",
u"france/switzerland": "switzerland",
u"saudi arabia/yemen/oman": "oman",
u"central siam, thailand": "thailand",
u"mexico, united states": "united states",
u"bangladesh/india": "bangladesh",
u"[brazil]": "brazil",
u"united states and canada": "united states",
u"togo or liberia": "togo",
u"maldives.": "maldives",
u"republic of kiribati": "kiribati",
u"faroe islands.": "faeroe islands",
u"faroe islands": "faeroe islands",
u"republic of senegal": "senegal",
u"burma [myanmar]": "myanmar",
u"india, nepal": "nepal",
u"mongolia [=kazakhstan]": "mongolia",
u"mexico/central america": "mexico",
u"republic of malta": "malta",
u"france/spain": "france",
u"[croatia]": "croatia",
u"kingdom of denmark": "denmark",
u"uzbekistan [turkestania]": "uzbekistan",
u"china [zhonghua]": "china",
u"austria / italy": "austria",
u"east siam, thailand": "thailand",
u"west australia": "australia",
u"malaysia/philippines": "philippines",
u"republic of the marshall islands": "marshall islands",
u"israel ()": "israel",
u"namibia/south africa": "namibia",
u"guatemala city": "guatemala",
u"[cayman islands]": "cayman islands",
u"syria or lebanon": "lebanon",
u"iran/pakistan": "iran",
u"republic of india": "india",
u"zaire/uganda": "uganda",
u"bangladesh, india": "bangladesh",
u"dominican republic": "dominican republic",
u"aruba.": "aruba",
u"post of spain": "spain",
u"french guyana": "french guiana",
u"south india": "india",
u"suecia [sweden]": "sweden",
u"independent state of samoa": "samoa",
u"south cook islands": "cook islands",
u"rwanda/uganda": "rwanda",
u"west indies [=trinidad and tobago]": "trinidad and tobago",
u"east siam [=thailand]": "thailand",
u"central china": "china",
u"kenya/tanzania": "tanzania",
u"[chile ]": "chile",
u"india & iran": "iran",
u"[british guiana (guyana)]": "guyana",
u"turkey (label says \"t\u00fcrkei\"": "turkey",
u"rep. south africa": "south africa",
u"ireland (\u00e9ire)": "ireland",
u"syria/lebanon": "lebanon",
u"[kyrgyzstan]": "kyrgyzstan",
u"mexico/el salvador": "mexico",
u"jordan/israel": "israel",
u"ecuador-galapagos islands": "ecuador",
u"india/pakistan/china": "china",
u"senegal/mali": "senegal",
u"kenya/tanzania/zanzibar": "tanzania",
u"new zealand.": "new zealand",
u"state of israel": "israel",
u"british north borneo [malaysia]": "malaysia",
u"germany/switzerland": "switzerland",
u"armenia/azerbaijan/georgia": "azerbaijan",
u"camerunia [= cameroon]": "cameroon",
u"fr.-indo china": "china",
u"burma/myanmar": "myanmar",
u"japan/korea/vietnam": "japan",
u"cayman islands.": "cayman islands",
u"cayenne [=french guiana]": "french guiana",
u"colombia ()": "colombia",
u"martinique.": "martinique",
u"guatemala/belize": "guatemala",
u"united states - minor outlying is.": "united states minor outlying islands",
u"bahamas ids": "bahamas",
u"[jamaica]": "jamaica",
u"south sudan": "sudan",
u"[germany]": "germany",
u"siam [thailand]": "thailand",
u"republic of niger": "niger",
u"palau & federated states of micronesia": "palau",
u"singapore/china": "china",
u"indonesia/malaysia": "malaysia",
u"mexico mexico": "mexico",
u"syria [jordan]": "jordan",
u"republic of cuba": "cuba",
u"spain (espa\u00f1a)": "spain",
u"myanmar (burma)": "myanmar",
u"saudi arabia (arabia)": "saudi arabia",
u"guyana batava [surinam]": "suriname",
u"east germany": "germany",
u"malawi/madagascar": "malawi",
u"[puerto rico]": "puerto rico",
u"republica dominicana": "dominican republic",
u"panama, boca del toro": "panama",
u"leeward islands: guadeloupe": "guadeloupe",
u"mexico/united states": "united states",
u"persia (iran)": "iran",
u"[south africa]": "south africa",
u"kingdom of tonga": "tonga",
u"lebanon or syria": "lebanon",
u"samoan ids.": "samoa",
u"montenegro (crna gora)": "montenegro",
u"united states minor outlying islands": "united states minor outlying islands",
u"pelew islands [=palau]": "palau",
u"guyana [as british guiana on the packet]": "guyana",
u"peninsular malaysia": "malaysia",
u"germany [poland]": "germany",
u"namibia (swa)": "namibia",
u"libyan arab jamahiriya": "libya",
u"russia, kazakhstan": "kazakhstan",
u"europe [probably now poland]": "poland",
u"malawi/mozambique/zimbabwe": "mozambique",
u"sri lanka (ceylon)": "sri lanka",
u"neth. ant./guadeloupe": "guadeloupe",
u"jordan/isreal": "jordan",
u"panama ()": "panama",
u"wallis and futuna": "wallis and futuna islands",
u"wallis & futuna": "wallis and futuna islands",
u"wallis & futuna grp.": "wallis and futuna islands",
u"georgia - republic of": "georgia",
u"uganda/kenya": "uganda",
u"republic of nicaragua": "nicaragua",
u"western sahara.": "western sahara",
u"the marshall islands": "marshall islands",
u"england/france/spain": "france",
u"brazil and zanzibar": "brazil",
u"federal republic of germany": "germany",
u"east malaysia": "malaysia",
u"palau.": "palau",
u"czech republic & slovakia": "czech republic",
u"[venezuela]": "venezuela",
u"south africa/swaziland": "swaziland",
u"mexico/costa rica": "mexico",
u"honduras/guatemala/costa rica": "guatemala",
u"australia (country)": "australia",
u"malaysia/singapore": "malaysia",
u"petite martinique": "martinique",
u"republic of guinea": "guinea",
u"guam island": "guam",
u"southern cook islands": "cook islands",
u"federative republic of brazil": "brazil",
u"e. malaysia": "malaysia",
u"east greenland": "greenland",
u"brazil/colombia": "brazil",
u"russia [=kazakhstan]": "kazakhstan",
u"people's republic of china": "china",
u"british indian ocean territory ocean territory": "british indian ocean territory",
u"republic of costa rica": "costa rica",
u"vanuatu id": "vanuatu",
u"argentina/patagonia": "argentina",
u"persien [=iran]": "iran",
u"republic of ecuador": "ecuador",
u"[angola]": "angola",
u"western ireland": "ireland",
u"republic of colombia": "colombia",
u"bahamas, the": "bahamas",
u"bhutan/india": "bhutan",
u"north vietnam/china": "china",
u"argentina/uruguay": "argentina",
u"china/mongolia/russia": "mongolia",
u"arabia [=jordan]": "jordan",
u"w. jamaica": "jamaica",
u"palestine [=jordan]": "jordan",
u"chile/peru": "peru",
u"[dominican republic]": "dominican republic",
u"tonga island": "tonga",
u"dominican rep.": "dominica",
u"w brazil, e peru, e ecuador": "brazil",
u"republic of south africa": "south africa",
u"republic of botswana": "botswana",
u"dominica, lesser antilles": "dominica",
u"(hungary)": "hungary",
u"guatemala, mexico": "guatemala",
u"thailand.": "thailand",
u"cook islands.": "cook islands",
u"republic of madagascar": "madagascar",
u"united states/canada": "united states",
u"martinique id.": "martinique",
u"paraguay or argentina": "argentina",
u"china/japan": "japan",
u"iran (persia)": "iran",
u"greenland (self-governing danish territory)": "greenland",
u"[argentina]": "argentina",
u"bahamas ids.": "bahamas",
u"[indonesia]": "indonesia",
u"russia [=ukraine]": "ukraine",
u"china, people's republic of": "china",
u"guadeloupe.": "guadeloupe",
u"northern ireland": "ireland",
u"israel/lebanon/syria/turkey": "lebanon",
u"tunisian republic": "tunisia",
u"paraguayq": "paraguay",
u"western taiwan": "taiwan",
u"[kiribati]": "kiribati",
u"mexico/costa rica/panama": "panama",
u"[spain]": "spain",
u"republic of haiti": "haiti",
u"the gambia": "gambia",
u"british solomon islands": "solomon islands",
u"ireland/eire": "ireland",
u"czechoslavakia/germany/*": "germany",
u"taiwan (= republic of china)": "taiwan",
u"serbia/montenegro": "serbia",
u"brasilia meridionali [c. brazil]": "brazil",
u"republic of ireland": "ireland",
u"[costa rica]": "costa rica",
u"prusia [germany]": "germany",
u"germany (west)": "germany",
u"palestine/israel": "israel",
u"dominican republic/puerto rico": "puerto rico",
u"tuvalu.": "tuvalu",
u"colombia, ecuador": "colombia",
u"palestine [jordan]": "jordan",
u"republic of liberia": "liberia",
u"w, n brazil, n peru, ecuador": "brazil",
u"palestine/lebanon": "lebanon",
u"french guiana & braz": "french guiana",
u"austria(=hungary)": "hungary",
u"czech republic (used to be in germany)": "czech republic",
u"argentina/paraguay": "argentina",
u"abyssinia [ethiopia]": "ethiopia",
u"vanuatu (new hebrides)": "vanuatu",
u"russian turkestan [=kyrgyzstan]": "kyrgyzstan",
u"saudi arabia.": "saudi arabia",
u"republic of trinidad and tobago": "trinidad and tobago",
u"peoples republic of china": "china",
u"macedonia, the former yugoslav republic of": "macedonia",
u"ecuador or peru": "peru",
u"juan fernandez islands (administrative division of chile)": "chile",
u"cambodia/vietnam": "cambodia",
u"persia [=iran]": "iran",
u"tanzania/uganda": "tanzania",
u"palestine [=israel]": "israel",
u"dalmatia [croatia]": "croatia",
u"philippines.": "philippines",
u"vanuatu.": "vanuatu",
u"turkestan/uzbekistan": "uzbekistan",
u"europe, now poland": "poland",
u"kenya-tanzania-uganda": "tanzania",
u"republic of maldives": "maldives",
u"cambodia/thailand/vietnam": "cambodia",
u"lower austria": "austria",
u"mozambique channel isl": "mozambique",
u"belgium/switzerland": "belgium",
u"united states of colombia": "united states",
u"southern france": "france",
u"[thailand]": "thailand",
u"republic of palau": "palau",
u"commonwealth of the northern mariana islands": "northern mariana islands",
u"senegal to e. democratic republic of congo; bioko (equatorial guinea)": "senegal",
u"pakistan and baluchistan": "pakistan",
u"the netherlands": "netherlands",
u"republic of peru": "peru",
u"[jordan]": "jordan",
u"[france]": "france",
u"brazil []": "brazil",
u"caucasia [=ukraine]": "ukraine",
u"[ukraine]": "ukraine",
u"madeira (portugal)": "portugal",
u"palau ids.": "palau",
u"vanuatu(new hebrides)": "vanuatu",
u"thailand [siam]": "thailand",
u"commonwealth of dominica": "dominica",
u"canada and united states": "united states",
u"montserrat.": "montserrat",
u"east nepal": "nepal",
u"mexico/nicaragua": "nicaragua",
u"england/denmark/ireland/peru/united states": "denmark",
u"argentina/brazil/paraguay": "argentina",
u"lower cook islands": "cook islands",
u"angola/namibia/south africa": "namibia",
u"neth. antil./guadeloupe": "guadeloupe",
u"tanzania, united republic of": "tanzania",
u"siam [=thailand]": "thailand",
u"czech republic/poland": "czech republic",
u"[poland]": "poland",
u"norfolk islands": "norfolk island",
u"guadeloupe island": "guadeloupe",
u"tanzania/madagascar": "tanzania",
u"argentina/colombia/peru": "argentina",
u"cura\u00e7ao/jamaica": "jamaica",
u"chile/patagonia": "chile",
u"samoa.": "samoa",
u"taiwan, province of china": "taiwan",
u"england/france": "france",
u"guiana [guyana]": "guyana",
u"n. cook islands": "cook islands",
u"french west indies [france]": "france",
u"south yemen": "yemen",
u"montserrat island": "montserrat",
u"germany/poland": "germany",
u"caucasus [=ukraine]": "ukraine",
u"\"austria\"": "austria",
u"republic of serbia": "serbia",
u"saudi arabia/oman": "oman",
u"bohemica [= czechoslovakia]": "slovakia",
u"togo or ghana": "togo",
u"jamaica/st. croix": "jamaica",
u"ferghana [=kyrgyzstan]": "kyrgyzstan",
u"afghanistan/iran/pakistan": "afghanistan",
u"republic of nauru": "nauru",
u"india/pakistan": "pakistan",
u"[vanuatu]": "vanuatu",
u"the dominican republic": "dominican republic",
u"islamic republic of iran": "iran",
u"[canada]": "canada",
u"cochin-china": "china",
u"peru/brazil": "brazil",
u"saudi arabia/yemen": "saudi arabia",
u"rhodesia [zimbabwe]": "zimbabwe",
u"sachsen [germany]": "germany",
u"argentina, chile": "argentina",
u"lebanon.": "lebanon",
u"[new zealand]": "new zealand",
u"west indies [=trindiad and tobago]": "india",
u"iran (label says \"persia\")": "iran",
u"muscat/oman/saudi arabia": "oman",
u"iran/turkmenistan": "turkmenistan",
u"south thailand, malaysia peninsula": "malaysia",
u"british honduras [belize]": "belize",
u"ghana & togo": "togo",
u"gambia/guinea/senegal": "senegal",
u"austria-italy": "austria",
u"costa rica, panama": "panama",
u"zambia, zimbabwe": "zambia",
u"republic of sierra leone": "sierra leone",
u"burma (myanmar)": "myanmar",
u"new hebrides (vanuatu)": "vanuatu",
u"argentina/suriname/venezuela": "argentina",
u"north germany": "germany",
u"serbia and montenegro": "serbia",
u"new hebrides [=vanuatu]": "vanuatu",
u"colombia/suriname": "colombia",
u"singapore/thailand": "thailand",
u"[india]": "india",
u"portuguese east africa (mozambique)": "mozambique",
u"united states ()": "united states",
u"bermuda (uk overseas territory)": "bermuda",
u"zambia (northern rhodesia)": "zambia",
u"bolivia, plurinational state of": "bolivia",
u"republic of panama": "panama",
u"mexico/guatemala": "guatemala",
u"fiji island/s": "fiji",
u"bahamas islands": "bahamas",
u"south africa, republic of": "south africa",
u"guam (us unincorporated territory)": "guam",
u"costa rica/nicaragua": "nicaragua",
u"republic of georgia": "georgia",
u"formosa [taiwan]": "taiwan",
u"republic of hungary": "hungary",
u"bolivia/french guiana/suriname/venezuela": "french guiana",
u"georgia (russia)": "georgia",
u"[ethiopia]": "ethiopia",
u"bolivia/guyana/paraguay": "bolivia",
u"kingdom of the netherlands": "netherlands",
u"mauritius island/s": "mauritius",
u"afghanistan/china/ussr": "afghanistan",
u"people's democratic republic of algeria": "algeria",
u"new holland [=australia]": "australia",
u"commonwealth of the bahamas": "bahamas",
u"spain (overseas territory)": "spain",
u"northwest persia [=iran]": "iran",
u"republic of turkey": "turkey",
u"turkmenistan, uzbekistan": "uzbekistan",
u"[paraguay]": "paraguay",
u"puerto rico (us)": "puerto rico",
u"north persia [=iran]": "iran",
u"brazil/paraguay": "brazil",
u"west pakistan": "pakistan",
u"dominica, british west indies": "dominica",
u"republic of fiji islands": "fiji",
u"costa rica/colombia": "colombia",
u"lapland/norway": "norway",
u"kiribati (gilbert islands)": "kiribati",
u"latvia/estonia": "latvia",
u"polynesia [=tuvalu]": "tuvalu",
u"lebanon/syria": "lebanon",
u"poland [silesia]": "poland",
u"georgia (former u.r.s.s,known as u.s.s.r.)": "georgia",
u"colombia/peru": "peru",
u"[east africa], uganda": "uganda",
u"uganda/democratic republic of congo": "uganda",
u"east thailand": "thailand",
u"(french (indo-china)": "china",
u"south georgia": "south georgia and south sandwich islands",
u"south georgia and sandwich islands": "south georgia and south sandwich islands",
u"south georgia and the south sandwich": "south georgia and south sandwich islands",
u"bosnia and herzegovina/croatia": "croatia",
u"french guiana,": "french guiana",
u"[surinam (suriname)]": "suriname",
u"brasilia [brazil]": "brazil",
u"grand cayman islands": "cayman islands",
u"[israel]": "israel",
u"french guiana (france)": "french guiana",
u"thailand (siam)": "thailand",
u"french polynesia [tahiti]": "french polynesia",
u"republic of finland": "finland",
u"france or switzerland": "switzerland",
u"canada.": "canada",
u"canada/united states": "united states",
u"seychelles island": "seychelles",
u"west indies [trinidad and tobago]": "trinidad and tobago",
u"[panama]": "panama",
u"austria, hungary": "hungary",
u"panamas": "panama",
u"abkhazia (georgia)": "georgia",
u"republic of zambia": "zambia",
u"dominican republic/jamaica/trinidad and tobago": "jamaica",
u"[italy]": "italy",
u"russian poland": "poland",
u"china/india/sri lanka": "sri lanka",
u"cyprus/switzerland": "cyprus",
u"guyana [british guiana]": "guyana",
u"west indies (cuba)": "cuba",
u"republic of france": "france",
u"samoa and american samoa": "samoa",
u"saint pierre and miquelon (french territorial collectivity)": "saint pierre and miquelon",
u"malaysia, new guinea, etc.": "malaysia",
u"france & switzerland": "switzerland",
u"kamerun [= cameroon]": "cameroon",
u"kingdom of belgium": "belgium",
u"republic of kenya": "kenya",
u"pitcairn's island.": "pitcairn",
u"zambia (n. rhodesia)": "zambia",
u"indian ocean island/s": "india",
u"oriental republic of uruguay": "uruguay",
u"western uganda": "uganda",
u"india/sri lanka": "sri lanka",
u"india.": "india",
u"gibraltar (uk overseas territory)": "gibraltar",
u"uzbekistan (russa)": "uzbekistan",
u"denmark(zealand)": "denmark",
u"saudi arabia()": "saudi arabia",
u"syria [israel]": "israel",
u"federated states of micronesia/palau": "palau",
u"venezuela, bolivarian republic of": "venezuela",
u"yemen, saudi arabia": "saudi arabia",
u"pakistan or india": "pakistan",
u"bermuda island/s": "bermuda",
u"northeast persia [=iran]": "iran",
u"mozambique/zanzibar": "mozambique",
u"mesopotamia [=iraq]": "iraq",
u"\"hungary\"": "hungary",
u"south africa/zimbabwe": "south africa",
u"cuba/trinidad and tobago": "cuba",
u"egypt, sudan": "egypt",
u"ceylon [sri lanka]": "sri lanka",
u"sudan/uganda": "uganda",
u"republic of poland": "poland",
u"north siam [=thailand]": "thailand",
u"malaysia/thailand": "malaysia",
u"gallia [france]": "france",
u"commonwealth of australia": "australia",
u"ecuador/peru": "peru",
u"turkestan [=kyrgyzstan]": "kyrgyzstan",
u"portuguese east africa [now mozambique]": "mozambique",
u"indonesia.": "indonesia",
u"[samoa]": "samoa",
u"tanganyika [tanzania], africa": "tanzania",
u"[french guiana]": "french guiana",
u"new zealand (country not listed)": "new zealand",
u"iran, islamic republic of": "iran",
u"[mexico]": "mexico",
u"honduras []": "honduras",
u"[philippines]": "philippines",
u"jamaica ()": "jamaica",
u"northern rhodesia [zambia]": "zambia",
u"argentina/brazil/uruguay": "argentina",
u"american samoa (us)": "american samoa",
u"south manchuria [=china]": "china",
u"australia/new zealand": "new zealand",
u"siam, thailand": "thailand",
u"crimea [=ukraine]": "ukraine",
u"england/france/ireland": "france",
u"republic of iraq": "iraq",
u"syria [=jordan]": "jordan",
u"british guiana [guyana]": "guyana",
u"australia, captive": "australia",
u"grenada/guadeloupe/navassa/netherland antilles": "guadeloupe",
u"martinique (france)": "france",
u"estonia/latvia": "latvia",
u"bahamas [=turks and caicos islands]": "bahamas",
u"malaysia timor": "malaysia",
u"burma/thailand": "thailand",
u"mongolia/russia": "mongolia",
u"colombia, panama": "panama",
u"niue island": "niue",
u"israel/jordan": "israel",
u"tanzania (united republic of tanzania, formerly the united repub": "tanzania",
u"fiji islands": "fiji",
u"[palau]": "palau",
u"[australia]": "australia",
u"british east africa [= kenya]": "kenya",
u"republic_of_nicaragua": "nicaragua",
u"pitcairn island].": "pitcairn",
u"iceland/greenland": "iceland",
u"greenland (denmark)": "greenland",
u"angola/namibia": "namibia",
u"taiwan or indonesia": "taiwan",
u"bermudas": "bermuda",
u"grenada & grenadines": "grenada",
u"polynesia [=kiribati]": "kiribati",
u"northwestern taiwan": "taiwan",
u"france/italy": "france",
u"kenya/uganda/tanzania": "tanzania",
u"guadeloupe (france)": "guadeloupe",
u"swa-namibia": "namibia",
u"[guatemala]": "guatemala",
u"thailand (french indo-china)": "china",
u"colombia/ecuador": "colombia",
u"ceylon [= sri lanka]": "sri lanka",
u"west indies [=cayman islands]": "cayman islands",
u"kiribati.": "kiribati",
u"norfolk island (australia)": "australia",
u"myanmar-thailand": "thailand",
u"persia [iran]": "iran",
u"[s\u00f9dwest-afrika (angola)]": "angola",
u"republic of cyprus": "cyprus",
u"costa rica/honduras": "costa rica",
u"honduras or belize": "belize",
u"dominican republican": "dominican republic",
u"[iran]": "iran",
u"german east africa [tanzania]": "tanzania",
u"british honduras": "honduras",
u"isreal/jordan": "jordan",
u"indo china": "china",
u"republic of honduras": "honduras",
u"ethiopia, captive": "ethiopia",
u"germaniae [germany; now poland]": "germany",
u"the bahamas": "bahamas",
u"canada, united states": "united states",
u"south georgia and sandwich islands": "south georgia and south sandwich islands",
u"brazil, guyana, french guiana, surinam": "french guiana",
u"virgin islands, british (uk)": "virgin islands, british",
u"republic of singapore": "singapore",
u"west germany": "germany",
u"[martinique]": "martinique",
u"[antarctica]": "antarctica",
u"brazil-guyana boundary": "brazil",
u"west greenland": "greenland",
u"gold coast, ghana": "ghana",
u"costa rica/panama/honduras": "panama",
u"kingdom of saudi arabia": "saudi arabia",
u"slovenia (yugoslavia)": "slovenia",
u"porto rico [puerto rico]": "puerto rico",
u"[singapore]": "singapore",
u"france [italy]": "france",
u"[french polynesia]": "french polynesia",
u"kingdom of morocco": "morocco",
u"norvegia [norway]": "norway",
u"south russia [=ukraine]": "ukraine",
u"rwanda/burundi": "burundi",
u"s cook islands": "cook islands",
u"panama/costa rica": "panama",
u"belarus (belorussia)": "belarus",
u"palau islands": "palau",
u"france or germany": "france",
u"leeward islands: montserrat": "montserrat",
u"democratic republic of timor-leste": "timor-leste",
u"panama/brazil": "brazil",
u"siam (=thailand)": "thailand",
u"argentina & bolivia": "argentina",
u"pitcairn islands": "pitcairn",
u"turkistan [=kyrgyzstan]": "kyrgyzstan",
u"brazil/chile": "brazil",
u"jamaica.": "jamaica",
u"saudi arabia/united arab emirates": "united arab emirates",
u"pitcairn isl": "pitcairn",
u"germany ()": "germany",
u"istria [croatia]": "croatia",
u"germany [france]": "france",
u"ferghana [= kyrgyzstan]": "kyrgyzstan",
u"mexico city": "mexico",
u"brasilien [brazil]": "brazil",
u"[malaysia]": "malaysia",
u"palestine/jordan": "jordan",
u"island of guam": "guam",
u"china, people's republic": "china",
u"germany (east)": "germany",
u"france & netherlands": "netherlands",
u"panama/guatemala": "guatemala",
u"gambia, the": "gambia",
u"tanzania, captive": "tanzania",
u"mexico/panama": "panama",
u"usa-barbados": "barbados",
u"malawi, mozambique, tanzania": "mozambique",
u"honduras/nicaragua": "nicaragua",
u"bahamas.": "bahamas",
u"algeria/morocco": "algeria",
u"panama city": "panama",
u"republica dominicana [dominican republic]": "dominican republic",
u"[peru]": "peru",
u"indonesia, malaysia": "malaysia",
u"\"germany'": "germany",
u"\"germany\"": "germany",
u"deutsch-ostafrika [east africa, tanzania]": "tanzania",
u"south of japan": "japan",
u"puerto rico (usa)": "puerto rico",
u"netherlands, kingdom of the": "netherlands",
u"republic of bulgaria": "bulgaria",
u"pakistan and baluchistan": "pakistan",
u"\"italy\"": "italy",
u"united republic of tanzania": "tanzania",
u"republic of chile": "chile",
u"ukrainian ssr [=ukraine]": "ukraine",
u"[kenya]": "kenya",
u"soloman is.": "oman",
u"montevideo, uruguay": "uruguay",
u"s. cook islands": "cook islands",
u"argentina/chile": "argentina",
u"cuba ()": "cuba",
u"gambia.": "gambia",
u"france/italy/slovenia/spain": "france",
u"dominican republic/haiti": "haiti",
u"germany, west": "germany",
u"latvia (lettland)": "latvia",
u"belize (british honduras)": "belize",
u"lower canada": "canada",
u"bolivia/paraguay/uruguay/venezuela": "venezuela",
u"trinidad/venezuela": "venezuela",
u"\u00bf probably from lebanon or syria \u00bf": "lebanon",
u"cabo verde islands": "cabo verde",
u"kingdom of sweden": "sweden",
u"kingdom of spain": "spain",
u"chile/argentina": "argentina",
u"british east africa [kenya]": "kenya",
u"australia/papua new guinea": "papua new guinea",
u"papua-new guinea": "papua new guinea",
u"independent state of papua new guinea": "papua new guinea",
u"papua (the british new guinea)": "papua new guinea",
u"papua (british new guinea)": "papua new guinea",
u"papua new guinea/solomon islands": "papua new guinea",
u"new guinea / papua new guinea": "papua new guinea",
u"papua new guinea": "papua new guinea",
u"british new guinea [papua new guinea]": "papua new guinea",
u"australia/indonesia/papua new guinea": "papua new guinea",
u"papaua new guinea": "papua new guinea",
u"papau new guinea": "papua new guinea",
u"papa new guinea": "papua new guinea",
u"anglo-egyptian sudan": "sudan",
u"united states minor outlying islands": "united states minor outlying islands",
u"somaliland": "somalia",
u"western samoa": "samoa",
u"[camaroon]": "cameroon",
u"central african empire": "central african republic",
u"central african rep": "central african republic",
u"erithrea": "eritrea",
u"ethiopis (british east africa)": "ethiopia",
u"gaboon": "gabon",
u"gabooon": "gabon",
u"guinée": "guinea",
u"madgascar": "madagascar",
u"madeira": "portugal",
u"namaibia": "namibia",
u"nambia": "namibia",
u"nambia (southwest africa)": "namibia",
u"republic of cape verde": "cabo verde",
u"rhodesia": "zimbabwe",
u"sao tome and principe": "sao tome and principe",
u"sao tomé and príncipe": "sao tome and principe",
u"são tomé and príncipe": "sao tome and principe",
u"sao tome & principe": "sao tome and principe",
u"são tomé & príncipe": "sao tome and principe",
u"soalia": "somalia",
u"somaliland": "somalia",
u"somililand": "somalia",
u"spanish overseas territory": "spain",
u"south rhodesia": "zimbabwe",
u"syria.": "syria",
u"taganyika": "tanzania",
u"tangananyika": "tanzania",
u"tanganyika": "tanzania",
u"(tanganyika)": "tanzania",
u"tanganyikat": "tanzania",
u"tanganyika terr.": "tanzania",
u"tansania": "tanzania",
u"transvaal province": "south africa",
u"zanzibar": "tanzania",
u"cameroun": "cameroon",
u"cape verde": "cabo verde",
u"german east africa": "tanzania",
u"german east africa [=tanganyika territory]": "tanzania",
u"u.s. virgin islands": "virgin islands, u.s.",
u"virgin islands of the united states": "virgin islands, u.s.",
u"virgin islands (united states of america)": "virgin islands, u.s.",
u"virgin islands, us": "virgin islands, u.s.",
u"virgin islands (u.k.)": "virgin islands, british",
u"[american samoa]": "american samoa",
u"american somoa/w. somoa": "american samoa",
u"united republic of tanganyika and zanzibar": "tanzania",
u"philiippines": "philippines",
u"philipine islands": "philippines",
u"philipines": "philippines",
u"philipines.": "philippines",
u"philipinnes": "philippines",
u"philippine id.": "philippines",
u"philippine ids": "philippines",
u"philippine is": "philippines",
u"philippine is.": "philippines",
u"philippine island": "philippines",
u"philippine island/s": "philippines",
u"philippine": "philippines",
u"philippine. islands": "philippines",
u"philipplines": "philippines",
u"phillipine ids.": "philippines",
u"phillipine islands": "philippines",
u"phillipines id.": "philippines",
u"phillipines": "philippines",
u"phillippine islands": "philippines",
u"phillippines": "philippines",
u"philppines": "philippines",
u"phippines": "philippines",
u"phlippines": "philippines",
u"pierto rico": "puerto rico",
u"porto rico": "puerto rico",
u"poto rico": "puerto rico",
u"puert rico": "puerto rico",
u"puerto rica": "puerto rico",
u"puertorico": "puerto rico",
u"caanda": "canada",
u"canaada": "canada",
u"cananda": "canada",
u"canda": "canada",
u"candada": "canada",
u"viet nam": "vietnam",
u"vatican city": "holy see",
u"laos ?": "laos",
u"laos / vietnam": "laos",
u"lao people's democratic republic" : "laos",
},
"stateprovince": {
u"(virginia )": "virginia",
u"(wy)": "wyoming",
u"[alaska]": "alaska",
u"[california]": "california",
u"[idaho or washington]": "idaho",
u"[illinois]": "illinois",
u"[indiana]": "indiana",
u"[louisiana]": "louisiana",
u"[ma]": "massachusetts",
u"[me]": "maine",
u"[mt]": "montana",
u"[nd]": "north dakota",
u"[nebraska]": "nebraska",
u"[north carolina]": "north carolina",
u"[oregon]": "oregon",
u"[pa]": "pennsylvania",
u"[south] dakota": "south dakota",
u"[tennessee/ virginia]": "tennessee",
u"[tennessee]": "tennessee",
u"[tx]": "texas",
u"[wa]": "washington",
u"[wy]": "wyoming",
u"\"virginia,\"": "virginia",
u"\"wisconsin,\"": "wisconsin",
u"ak": "alaska",
u"al": "alabama",
u"alabama & mississippi": "alabama",
u"alabama, florida": "alabama",
u"alabama, mississippi": "alabama",
u"alabama, tennessee": "alabama",
u"alabama-mississippi state line": "alabama",
u"alabama/arkansas": "alabama",
u"alabama/florida": "alabama",
u"alabama/florida/georgia/louisiana": "alabama",
u"alabama/florida/georgia/pennsylvania/tennessee/virginia": "alabama",
u"alabama/florida/georgia/texas/virginia": "alabama",
u"alabama/georgia": "alabama",
u"alabama/georgia/louisiana/mississippi": "alabama",
u"alabama/georgia/mississippi/north carolina/tennessee/virginia": "alabama",
u"alabama/georgia/north carolina/south carolina": "alabama",
u"alabama/tennessee": "alabama",
u"alaksa": "alaska",
u"alamaba": "alabama",
u"alambama": "alabama",
u"alasa": "alaska",
u"alasaka": "alaska",
u"alaska, yukon": "alaska",
u"alaska/oregon/washington": "alaska",
u"alaskaa": "alaska",
u"albama": "alabama",
u"aleutian islands": "alaska",
u"ar": "arkansas",
u"ariona": "arizona",
u"ariozona": "arizona",
u"arizon": "arizona",
u"arizona & california": "arizona",
u"arizona or utah": "arizona",
u"arizona, new mexico": "arizona",
u"arizona, utah": "arizona",
u"arizona/california": "arizona",
u"arizona/colorado/montana/nevada/wyoming": "arizona",
u"arizona/new mexico": "arizona",
u"arizona/new mexico/texas": "arizona",
u"arizona/new mexico/utah": "arizona",
u"arizona/utah": "arizona",
u"arkansa": "arkansas",
u"arkansas, mississippi": "arkansas",
u"arkansas, oklahoma": "arkansas",
u"arkansas/illinois/iowa/kansas/nebraska": "arkansas",
u"arkansas/indiana/michigan/missouri": "arkansas",
u"arkansas/texas/new mexico": "arkansas",
u"arziona": "arizona",
u"arzona": "arizona",
u"az": "arizona",
u"bergen co., new jersey": "new jersey",
u"ca [=az]": "arizona",
u"ca": "california",
u"cal": "california",
u"calafornia": "california",
u"calfornia": "california",
u"califo": "california",
u"califonia": "california",
u"califonria": "california",
u"califonrnia": "california",
u"califorani": "california",
u"califoria": "california",
u"califorina": "california",
u"califorinia": "california",
u"californa": "california",
u"californai": "california",
u"california and washington": "california",
u"california or nevada": "california",
u"california": "california",
u"california, nevada": "california",
u"california, oregon": "california",
u"california/colorado/idaho/new mexico/ utah/washington/saskatchewan": "california",
u"california/colorado/oregon/washington/wyoming/british columbia": "california",
u"california/florida": "california",
u"california/oregon": "california",
u"califronia": "california",
u"calilf": "california",
u"calilfornia": "california",
u"calilifornia": "california",
u"caliornia": "california",
u"callifornia": "california",
u"cnnecticut": "connecticut",
u"co []": "colorado",
u"co": "colorado",
u"col": "colorado",
u"colarado": "colorado",
u"colorad": "colorado",
u"colorad0": "colorado",
u"coloradao": "colorado",
u"colorade": "colorado",
u"colorado - new mexico - oklahoma": "colorado",
u"colorado, kansas": "colorado",
u"colorado, utah": "colorado",
u"colorado/mississippi/missouri/texas": "colorado",
u"colorado/montana/north dakota": "colorado",
u"colorado/texas": "colorado",
u"colorado/wyoming/montana": "colorado",
u"coloradoo": "colorado",
u"colroado": "colorado",
u"conn": "connecticut",
u"connecticut, massaschusetts": "connecticut",
u"connecticut, new jersey": "connecticut",
u"connecticut/florida": "connecticut",
u"connecticut/maine/massachusetts/new hampshire/vermont": "connecticut",
u"connecticut/maine/new hampshire/vermont": "connecticut",
u"connecticut/maryland/new hampshire/new jersey/new york/virginia/wash dc": "connecticut",
u"connecticut/maryland/new jersey/new york/pennsylania": "connecticut",
u"connecticut/massachusetts/new york/pennsylvania": "connecticut",
u"connecticut/new jersey/new york": "connecticut",
u"connecticut/ohio/massachusetts": "connecticut",
u"connecticut/pennsylvania/new york/rhode island pennsylvania": "connecticut",
u"connecticutt": "connecticut",
u"conneticut": "connecticut",
u"coorado": "colorado",
u"ct": "connecticut",
u"d.c.": "washington, d.c.",
u"dc": "washington, d.c.",
u"delaware/maryland": "delaware",
u"delaware/maryland/new jersey/new york": "delaware",
u"deleware": "delaware",
u"delware": "delaware",
u"dist. of columbia": "washington, d.c.",
u"district of colombia": "washington, d.c.",
u"district of colubia": "washington, d.c.",
u"district of columbia": "washington, d.c.",
u"distrit of columbia": "washington, d.c.",
u"douth dakota": "south dakota",
u"eastern north carolina": "north carolina",
u"fl ()": "florida",
u"fl": "florida",
u"floida": "florida",
u"flor": "florida",
u"florida or georgia": "florida",
u"florida": "florida",
u"florida-georgia": "florida",
u"florida/georgia border": "florida",
u"florida/georgia": "florida",
u"florida/georgia/louisiana/mississippi": "florida",
u"florida/louisiana/north carolina": "florida",
u"florida/mississippi": "florida",
u"florida/mississippi/alabama": "florida",
u"florida/pennsylvania": "florida",
u"florida/south carolina": "florida",
u"florida/tennessee": "florida",
u"floriida": "florida",
u"florioda": "florida",
u"floroda": "florida",
u"ga": "georgia",
u"geogia": "georgia",
u"geor": "georgia",
u"georgia or florida": "georgia",
u"georgia/ south carolina": "georgia",
u"georgia/alabama": "georgia",
u"georgia/louisiana": "georgia",
u"georgia/mississippi": "georgia",
u"georgia/tennessee/west virginia": "georgia",
u"georgia/texas": "georgia",
u"gerogia": "georgia",
u"gerorgia": "georgia",
u"ha": "hawaii",
u"haw": "hawaii",
u"haw. ids.": "hawaii",
u"hawaii": "hawaii",
u"hawaii, oahu island": "hawaii",
u"hawaii0003268": "hawaii",
u"hawaiian islands": "hawaii",
u"hawaiian": "hawaii",
u"hi": "hawaii",
u"ia": "iowa",
u"id": "idaho",
u"ida": "idaho",
u"idado": "idaho",
u"idaho, montana": "idaho",
u"idaho, washington": "idaho",
u"idaho,/illinois/indiana/iowa/michigan/utah": "idaho",
u"idaho/oregon/washington": "idaho",
u"idaho/washington/british columbia": "idaho",
u"idaho/washington/vancouver": "idaho",
u"iillinois": "illinois",
u"il": "illinois",
u"ilinois": "illinois",
u"illiinois": "illinois",
u"illiniois": "illinois",
u"illinios": "illinois",
u"illinois": "illinois",
u"illinois, new york": "illinois",
u"illinois-indiana": "illinois",
u"illinois/indiana/minnesota/wisconsin": "illinois",
u"illinois/indiana/missouri/ohio": "illinois",
u"illinois/iowa/michigan": "illinois",
u"illinois/michigan/wisconsin": "illinois",
u"illinois`": "illinois",
u"illinoise": "illinois",
u"illlinois": "illinois",
u"illnois": "illinois",
u"in": "indiana",
u"indiana/iowa/minnesota/ohio/wisconsin": "indiana",
u"indiana/kentucky": "indiana",
u"indiana/kentucky/maryland/ohio/washington d.c./west virginia": "indiana",
u"iowa co.": "iowa",
u"iowa/kansas/minnesota/oklahoma/missouri": "iowa",
u"iowa/kansas/oklahoma": "iowa",
u"iowa/michigan/missouri/ohio/wisconsin/ontario": "iowa",
u"iowa/missouri/nebraska/ohio/pennsylvania/south dakota/west virginia": "iowa",
u"iowas": "iowa",
u"kansas/missouri": "kansas",
u"kansas/texas": "kansas",
u"kansasashington": "kansas",
u"kentuck": "kentucky",
u"kentucky or tennessee": "kentucky",
u"kentucky/indiana": "kentucky",
u"kentucky/maryland/north carolina/tennessee/virginia/west virginia": "kentucky",
u"kentucky/maryland/virginia/west virginia": "kentucky",
u"kentucky/new york": "kentucky",
u"kentucky/north carolina/tennessee/virginia/west virginia": "kentucky",
u"kentuncky": "kentucky",
u"keutucky": "kentucky",
u"la": "louisiana",
u"lllinois": "illinois",
u"louisana": "louisiana",
u"louisiana*": "louisiana",
u"louisiana, mississippi": "louisiana",
u"louisiana/miss.": "louisiana",
u"louisiana/mississippi": "louisiana",
u"louisianna": "louisiana",
u"lousiana & alabama": "louisiana",
u"lousiana": "louisiana",
u"lousisiana": "louisiana",
u"ma ()": "massachusetts",
u"ma": "massachusetts",
u"ma, me, vt, ri, cn": "massachusetts",
u"main": "maine",
u"maine/massachusetts/michigan/wisconisn/ottawa": "maine",
u"maine/massachusetts/new hampshire/vermont": "maine",
u"maine/massachusetts/vermont": "maine",
u"maine/new brunswick": "maine",
u"maine/new hampshire/vermont": "maine",
u"maine/vermont": "maine",
u"maine/vermont/new hampshire/rhode island/connecticut": "maine",
u"marland": "maryland",
u"marlyland": "maryland",
u"maryland, new jersey": "maryland",
u"maryland/virginia": "maryland",
u"maryland/virginia/washington d.c.": "maryland",
u"maryland/virginia/west virginia": "maryland",
u"masachusetts": "massachusetts",
u"mass": "massachusetts",
u"massacheusetts": "massachusetts",
u"massachuetts": "massachusetts",
u"massachusets": "massachusetts",
u"massachusettes": "massachusetts",
u"massachusetts": "massachusetts",
u"massachusetts/ connecticut": "massachusetts",
u"massachusetts/maine": "massachusetts",
u"massachusetts/michigan/new hampshire/wisconsin": "massachusetts",
u"massachusetts/new hampshire": "massachusetts",
u"massachusetts/rhode island": "massachusetts",
u"massachussets": "massachusetts",
u"massachussettes": "massachusetts",
u"massachussetts": "massachusetts",
u"massachusstts": "massachusetts",
u"masschusetts": "massachusetts",
u"masssachusetts": "massachusetts",
u"md": "maryland",
u"me": "maine",
u"me. or ma.": "maine",
u"mi": "michigan",
u"mich": "michigan",
u"michgan": "michigan",
u"michigan": "michigan",
u"michigan, ohio": "michigan",
u"michigan, ontario": "michigan",
u"michigan, wisconsin": "michigan",
u"michigan/minnesota/iowa/kansas/texas/wisconsin": "michigan",
u"michigan/tennessee": "michigan",
u"michigan/wisconsin/illinois/indiana": "michigan",
u"michigan/wisconsin/minnesota": "michigan",
u"mighican": "michigan",
u"mighigan": "michigan",
u"minneosta": "minnesota",
u"minnesota, new york": "minnesota",
u"minnesota/ iowa": "minnesota",
u"minnesota/wisconsin": "minnesota",
u"minnesotra": "minnesota",
u"minnestoa": "minnesota",
u"minnestota": "minnesota",
u"misouri": "missouri",
u"mississipi": "mississippi",
u"mississippi [=louisiana]": "louisiana",
u"mississippi/louisiana/texas/arizona": "mississippi",
u"mississipppi": "mississippi",
u"mississppi": "mississippi",
u"misso": "missouri",
u"missorui": "missouri",
u"missouri or arkansas": "missouri",
u"mn ()": "montana",
u"mn": "minnesota",
u"mon tana": "montana",
u"monatana": "montana",
u"montana": "montana",
u"montana, north dakota": "montana",
u"montana, wyoming": "montana",
u"montana/nebraska/south dakota": "montana",
u"montana/south dakota/wyoming": "montana",
u"montana/wyoming": "montana",
u"montanta": "montana",
u"ms": "mississippi",
u"nc": "north carolina",
u"nc/tn": "north carolina",
u"nd": "north dakota",
u"ne/ca": "nebraska",
u"nebaska": "nebraska",
u"nebraska or dakota": "nebraska",
u"nebraska/wyoming": "nebraska",
u"neew york": "new york",
u"nevada/arizona": "nevada",
u"new york": "new york",
u"new hamphire": "new hampshire",
u"new hampshie": "new hampshire",
u"new hampshire or new york": "new hampshire",
u"new hampshire/maine": "new hampshire",
u"new hampshire/wisconsin/idaho": "new hampshire",
u"new hampshirt": "new hampshire",
u"new hampshre": "new hampshire",
u"new hampsire": "new hampshire",
u"new hamshire": "new hampshire",
u"new hamsphire": "new hampshire",
u"new jersery": "new jersey",
u"new jersey and new york": "new jersey",
u"new jersey and ohio": "new jersey",
u"new jersey and pennsylvania": "new jersey",
u"new jersey to florida": "new jersey",
u"new jersey, new york": "new jersey",
u"new jersey, new york, colorado": "new jersey",
u"new jersey/delaware": "new jersey",
u"new jersey/new york": "new jersey",
u"new meico": "new mexico",
u"new mexico and texas": "new mexico",
u"new mexico/california": "new mexico",
u"new mexico/colorado": "new mexico",
u"new mexico/texas": "new mexico",
u"new york & vermont": "new york",
u"new york or penna": "new york",
u"new york, pennsylvania": "new york",
u"new york/florida": "new york",
u"new york/louisiana": "new york",
u"new york/massachusetts": "new york",
u"new york/new jersey": "new york",
u"new york/pennsylvania": "new york",
u"new york/vermont": "new york",
u"new york/washington d.c.": "new york",
u"new_mexico": "new mexico",
u"new_york": "new york",
u"newæhampshire": "new hampshire",
u"newæjersey": "new jersey",
u"newæyork": "new york",
u"nh [=me]": "maine",
u"nh": "new hampshire",
u"nj": "new jersey",
u"nm": "new mexico",
u"north ca": "north carolina",
u"north carlolina": "north carolina",
u"north carolina - tennessee": "north carolina",
u"north carolina and tennessee": "north carolina",
u"north carolina or tennessee": "north carolina",
u"north carolina": "north carolina",
u"north carolina*": "north carolina",
u"north carolina, south carolina": "north carolina",
u"north carolina, tennessee": "north carolina",
u"north carolina-tennessee": "north carolina",
u"north carolina-virginia": "north carolina",
u"north carolina/massachusetts": "north carolina",
u"north carolina/south carolina/tennessee": "north carolina",
u"north carolina/tennessee": "north carolina",
u"north caroline": "north carolina",
u"north caronlina": "north carolina",
u"north dakota, south dakota": "north dakota",
u"north dakota, wyoming": "north dakota",
u"northæcarolina": "north carolina",
u"noth carolina": "north carolina",
u"nv": "nevada",
u"ny": "new york",
u"off virginia": "virginia",
u"oh": "ohio",
u"ohiio": "ohio",
u"ohio*": "ohio",
u"ohio/maryland/massachusetts/rhode island": "ohio",
u"ohio/massachusetts": "ohio",
u"ohio/pennsylvania": "ohio",
u"ohio/tennessee": "ohio",
u"ohio/west virginia": "ohio",
u"ok": "oklahoma",
u"ok, ut, tx": "oklahoma",
u"oklaahoma": "oklahoma",
u"oklaholma": "oklahoma",
u"oklahom": "oklahoma",
u"oklahoma-texas": "oklahoma",
u"oklahoma/nebraska/arkansas/texas": "oklahoma",
u"oklahoma/texas": "oklahoma",
u"or": "oregon",
u"oregan": "oregon",
u"oregeon": "oregon",
u"oregon, washington": "oregon",
u"oregon, washinton": "oregon",
u"oregon/california": "oregon",
u"oregon/washington/alaska": "oregon",
u"oregon/washington/wyoming/utah/texas": "oregon",
u"organ": "oregon",
u"orgegon": "oregon",
u"pa": "pennsylvania",
u"pennsilvania": "pennsylvania",
u"pennslyvania": "pennsylvania",
u"pennsylania": "pennsylvania",
u"pennsylvaina": "pennsylvania",
u"pennsylvania and ohio": "pennsylvania",
u"pennsylvania/kentucky": "pennsylvania",
u"pennsylvanis": "pennsylvania",
u"pennsylvannia": "pennsylvania",
u"pennsynvania": "pennsylvania",
u"pennyslvania": "pennsylvania",
u"penssylvania": "pennsylvania",
u"philadelphia": "pennsylvania",
# u"puerto rico": "puerto rico",
u"rhode island": "rhode island",
u"ri": "rhode island",
u"s. w. virginia": "virginia",
u"sacramento": "california",
u"sc": "south carolina",
u"sd": "south dakota",
u"slabama": "alabama",
u"so dakota/nebraska": "south dakota",
u"soiuth dakota": "south dakota",
u"sonora, texas": "texas",
u"sorth carolina": "south carolina",
u"south arizona": "arizona",
u"south carolin": "south carolina",
u"south carolina": "south carolina",
u"south carolina/georgia": "south carolina",
u"south caroline": "south carolina",
u"south dakota, wyoming": "south dakota",
u"south dakota/wyoming": "south dakota",
u"south datoka": "south dakota",
u"south sakota": "south dakota",
u"south-west virginia": "virginia",
u"southædakota": "south dakota",
u"teaxs": "texas",
u"tenessee": "tennessee",
u"tennesee": "tennessee",
u"tennesse": "tennessee",
u"tennesse/north carolina": "tennessee",
u"tennessee /north carolina": "tennessee",
u"tennessee and north carolina": "tennessee",
u"tennessee": "tennessee",
u"tennessee-north carolina state line": "tennessee",
u"tennessee-north carolina": "tennessee",
u"tennessee/kentucky": "tennessee",
u"tennessee/north carolina": "tennessee",
u"tennessee/oregon": "tennessee",
u"tennesseee": "tennessee",
u"tennesssee": "tennessee",
u"texa": "texas",
u"texas": "texas",
u"texas/arkansas": "texas",
u"texas/florida": "texas",
u"texas/louisiana": "texas",
u"texas/louisiana/mississippi": "texas",
u"texas/oklahoma": "texas",
u"texas/oklahoma/kansas": "texas",
u"texas/pennsylvania": "texas",
u"texs": "texas",
u"tn": "tennessee",
u"tn-n.c.": "tennessee",
u"tx": "texas",
u"uniontown, d.c.": "washington, d.c.",
u"uta": "utah",
u"utah/new mexico": "utah",
u"utha": "utah",
u"va [=md]": "maryland",
u"va": "virginia",
u"vemont": "vermont",
u"ver": "vermont",
u"vermong": "vermont",
u"vermont ()": "vermont",
u"vermont/new hampshire": "vermont",
u"vermont/new york": "vermont",
u"vermont/new york/quebec": "vermont",
u"viginia": "virginia",
u"virgina": "virginia",
u"virginia, west virginia": "virginia",
u"virginia/tennessee": "virginia",
u"viriginia": "virginia",
u"vt": "vermont",
u"vt-nh": "vermont",
u"vt/nh": "vermont",
u"w. t. [=wa]": "washington",
u"w.t. [=wa]": "washington",
u"wa": "washington",
u"wash": "washington",
u"washing": "washington",
u"washingon": "washington",
u"washingotn territory [=wa]": "washington",
u"washingotn": "washington",
u"washingrton": "washington",
u"washington (state)": "washington",
u"washington d. c.": "washington, d.c.",
u"washington d.c.": "washington, d.c.",
u"washington d.c., florida": "washington, d.c.",
u"washington dc": "washington, d.c.",
u"washington state": "washington",
u"washington territory": "washington",
u"washington, d. c.": "washington, d.c.",
u"washington, d.c.": "washington, d.c.",
u"washington, district of columbia": "washington, d.c.",
u"washington.": "washington",
u"washington/vancouver": "washington",
u"washinton": "washington",
u"washngton": "washington",
u"wasington": "washington",
u"west virgiana": "west virginia",
u"west virgina": "west virginia",
u"west virginia- ohio": "west virginia",
u"west viriginia": "west virginia",
u"western north carolina": "north carolina",
u"westævirginia": "west virginia",
u"wi, mi or mn": "wisconsin",
u"wiconsin": "wisconsin",
u"wilsconsin": "wisconsin",
u"winconsin": "wisconsin",
u"wisc": "wisconsin",
u"wisconsin, other": "wisconsin",
u"wisconsin/illinois": "wisconsin",
u"wisconsin/michigan": "wisconsin",
u"wisconsin/minnesota": "wisconsin",
u"wiscosin": "wisconsin",
u"wisonsin": "wisconsin",
u"wisonsion": "wisconsin",
u"wv": "west virginia",
u"wy": "wyoming",
u"wy,grand teton national park": "wyoming",
u"wyomin": "wyoming",
u"wyoming (most likely)": "wyoming",
u"wyoming/colorado": "wyoming",
u"wyoming/south dakota": "wyoming",
u"wyoming/texas/new mexico/utah/arizona/arkansas/california": "wyoming",
u"wyomiong": "wyoming",
u"wyomning": "wyoming",
}
}
kl["country"].update(kl_none)
kl["continent"].update(kl_none)
kl["stateprovince"].update(kl_none)
real_continents = {
"europe",
"asia",
"australia",
"africa",
"north america",
"south america",
"antarctica",
"oceania"
}
major_bodies_of_water = {
"atlantic",
"arctic",
"antarctic",
"pacific",
"indian",
"caribbean",
"mediterranean",
"bering"
}
major_bodies_of_water_type = {
"atlantic": "ocean",
"arctic": "ocean",
"antarctic": "ocean",
"pacific": "ocean",
"indian": "ocean",
"caribbean": "sea",
"mediterranean": "sea",
"bering": "sea",
}
mbw_complete = set([k + " " + major_bodies_of_water_type[k] for k in major_bodies_of_water])
real_continents |= mbw_complete | major_bodies_of_water
string_to_iso_code = {
u"aruba": "abw",
u"afghanistan": "afg",
u"angola": "ago",
u"anguilla": "aia",
u"åland islands": "ala",
u"albania": "alb",
u"andorra": "and",
u"united arab emirates": "are",
u"argentina": "arg",
u"armenia": "arm",
u"american samoa": "asm",
u"antarctica": "ata",
u"french southern territories": "atf",
u"antigua and barbuda": "atg",
u"australia": "aus",
u"austria": "aut",
u"azerbaijan": "aze",
u"burundi": "bdi",
u"belgium": "bel",
u"benin": "ben",
u"bonaire, sint eustatius and saba": "bes",
u"burkina faso": "bfa",
u"bangladesh": "bgd",
u"bulgaria": "bgr",
u"bahrain": "bhr",
u"bahamas": "bhs",
u"bosnia and herzegovina": "bih",
u"saint barthélemy": "blm",
u"belarus": "blr",
u"belize": "blz",
u"bermuda": "bmu",
u"bolivia": "bol",
u"brazil": "bra",
u"barbados": "brb",
u"brunei darussalam": "brn",
u"bhutan": "btn",
u"bouvet island": "bvt",
u"botswana": "bwa",
u"central african republic": "caf",
u"canada": "can",
u"cocos (keeling) islands": "cck",
u"switzerland": "che",
u"chile": "chl",
u"china": "chn",
u"côte d'ivoire": "civ",
u"cameroon": "cmr",
u"democratic republic of the congo": "cod",
u"cook islands": "cok",
u"colombia": "col",
u"comoros": "com",
u"cabo verde": "cpv",
u"costa rica": "cri",
u"cuba": "cub",
u"curaçao": "cuw",
u"christmas island": "cxr",
u"cayman islands": "cym",
u"cyprus": "cyp",
u"czech republic": "cze",
u"germany": "deu",
u"djibouti": "dji",
u"dominica": "dma",
u"denmark": "dnk",
u"dominican republic": "dom",
u"algeria": "dza",
u"ecuador": "ecu",
u"egypt": "egy",
u"eritrea": "eri",
u"western sahara": "esh",
u"spain": "esp",
u"estonia": "est",
u"ethiopia": "eth",
u"finland": "fin",
u"fiji": "fji",
u"falkland islands": "flk",
u"france": "fra",
u"faroe islands": "fro",
u"federated states of micronesia": "fsm",
u"gabon": "gab",
u"united kingdom": "gbr",
u"georgia": "geo",
u"guernsey": "ggy",
u"ghana": "gha",
u"gibraltar": "gib",
u"guadeloupe": "glp",
u"gambia": "gmb",
u"guinea-bissau": "gnb",
u"equatorial guinea": "gnq",
u"greece": "grc",
u"grenada": "grd",
u"greenland": "grl",
u"guatemala": "gtm",
u"french guiana": "guf",
u"guam": "gum",
u"guyana": "guy",
u"hong kong": "hkg",
u"heard island and mcdonald islands": "hmd",
u"honduras": "hnd",
u"croatia": "hrv",
u"haiti": "hti",
u"hungary": "hun",
u"indonesia": "idn",
u"isle of man": "imn",
u"india": "ind",
u"british indian ocean territory": "iot",
u"ireland": "irl",
u"iran": "irn",
u"iraq": "irq",
u"iceland": "isl",
u"israel": "isr",
u"italy": "ita",
u"jamaica": "jam",
u"jersey": "jey",
u"jordan": "jor",
u"japan": "jpn",
u"kazakhstan": "kaz",
u"kenya": "ken",
u"kyrgyzstan": "kgz",
u"cambodia": "khm",
u"kiribati": "kir",
u"saint kitts and nevis": "kna",
u"south korea": "kor",
u"kuwait": "kwt",
u"laos": "lao",
u"lebanon": "lbn",
u"liberia": "lbr",
u"libya": "lby",
u"saint lucia": "lca",
u"liechtenstein": "lie",
u"sri lanka": "lka",
u"lesotho": "lso",
u"lithuania": "ltu",
u"luxembourg": "lux",
u"latvia": "lva",
u"macao": "mac",
u"saint martin (french part)": "maf",
u"morocco": "mar",
u"monaco": "mco",
u"moldova, republic of": "mda",
u"madagascar": "mdg",
u"maldives": "mdv",
u"mexico": "mex",
u"marshall islands": "mhl",
u"macedonia": "mkd",
u"malta": "mlt",
u"myanmar": "mmr",
u"montenegro": "mne",
u"mongolia": "mng",
u"northern mariana islands": "mnp",
u"mozambique": "moz",
u"mauritania": "mrt",
u"montserrat": "msr",
u"martinique": "mtq",
u"mauritius": "mus",
u"malawi": "mwi",
u"malaysia": "mys",
u"mayotte": "myt",
u"namibia": "nam",
u"new caledonia": "ncl",
u"niger": "ner",
u"norfolk island": "nfk",
u"nigeria": "nga",
u"nicaragua": "nic",
u"niue": "niu",
u"netherlands": "nld",
u"norway": "nor",
u"nepal": "npl",
u"nauru": "nru",
u"new zealand": "nzl",
u"pakistan": "pak",
u"panama": "pan",
u"pitcairn": "pcn",
u"peru": "per",
u"philippines": "phl",
u"palau": "plw",
u"papua new guinea": "png",
u"poland": "pol",
u"puerto rico": "pri",
u"north korea": "prk",
u"portugal": "prt",
u"paraguay": "pry",
u"palestine, state of": "pse",
u"french polynesia": "pyf",
u"qatar": "qat",
u"réunion": "reu",
u"romania": "rou",
u"russia": "rus",
u"rwanda": "rwa",
u"saudi arabia": "sau",
u"sudan": "sdn",
u"senegal": "sen",
u"singapore": "sgp",
u"south georgia and south sandwich islands": "sgs",
u"saint helena, ascension and tristan da cunha": "shn",
u"svalbard and jan mayen": "sjm",
u"solomon islands": "slb",
u"sierra leone": "sle",
u"el salvador": "slv",
u"san marino": "smr",
u"somalia": "som",
u"saint pierre and miquelon": "spm",
u"serbia": "srb",
u"south sudan": "ssd",
u"sao tome and principe": "stp",
u"suriname": "sur",
u"slovakia": "svk",
u"slovenia": "svn",
u"sweden": "swe",
u"swaziland": "swz",
u"sint maarten (dutch part)": "sxm",
u"seychelles": "syc",
u"syria": "syr",
u"turks and caicos islands": "tca",
u"chad": "tcd",
u"togo": "tgo",
u"thailand": "tha",
u"tajikistan": "tjk",
u"tokelau": "tkl",
u"turkmenistan": "tkm",
u"timor-leste": "tls",
u"tonga": "ton",
u"trinidad and tobago": "tto",
u"tunisia": "tun",
u"turkey": "tur",
u"tuvalu": "tuv",
u"taiwan": "twn",
u"tanzania": "tza",
u"uganda": "uga",
u"ukraine": "ukr",
u"united states minor outlying islands": "umi",
u"uruguay": "ury",
u"united states": "usa",
u"uzbekistan": "uzb",
u"holy see": "vat",
u"saint vincent and the grenadines": "vct",
u"venezuela": "ven",
u"virgin islands, british": "vgb",
u"virgin islands, u.s.": "vir",
u"vietnam": "vnm",
u"vanuatu": "vut",
u"wallis and futuna islands": "wlf",
u"yemen": "yem",
u"south africa": "zaf",
u"zambia": "zmb",
u"zimbabwe": "zwe",
# Things in other things
u"oman": "omn",
u"samoa": "wsm",
u"mali": "mli",
u"guinea": "gin",
u"congo": "cog",
}
iso_countries = set(string_to_iso_code.keys())
implied_parent = {
"stateprovince": {
"alabama": ["north america","united states"],
"alaska": ["north america","united states"],
"arizona": ["north america","united states"],
"arkansas": ["north america","united states"],
"california": ["north america","united states"],
"colorado": ["north america","united states"],
"connecticut": ["north america","united states"],
"delaware": ["north america","united states"],
"florida": ["north america","united states"],
"georgia": ["north america","united states"],
"hawaii": ["oceania", "united states"],
"idaho": ["north america","united states"],
"illinois": ["north america","united states"],
"indiana": ["north america","united states"],
"iowa": ["north america","united states"],
"kansas": ["north america","united states"],
"kentucky": ["north america","united states"],
"louisiana": ["north america","united states"],
"maine": ["north america","united states"],
"maryland": ["north america","united states"],
"massachusetts": ["north america","united states"],
"michigan": ["north america","united states"],
"minnesota": ["north america","united states"],
"mississippi": ["north america","united states"],
"missouri": ["north america","united states"],
"montana": ["north america","united states"],
"nebraska": ["north america","united states"],
"nevada": ["north america","united states"],
"new hampshire": ["north america","united states"],
"new jersey": ["north america","united states"],
"new mexico": ["north america","united states"],
"new york": ["north america","united states"],
"north carolina": ["north america","united states"],
"north dakota": ["north america","united states"],
"ohio": ["north america","united states"],
"oklahoma": ["north america","united states"],
"oregon": ["north america","united states"],
"pennsylvania": ["north america","united states"],
"rhode island": ["north america","united states"],
"south carolina": ["north america","united states"],
"south dakota": ["north america","united states"],
"tennessee": ["north america","united states"],
"texas": ["north america","united states"],
"utah": ["north america","united states"],
"vermont": ["north america","united states"],
"virginia": ["north america","united states"],
"washington": ["north america","united states"],
"west virginia": ["north america","united states"],
"wisconsin": ["north america","united states"],
"wyoming": ["north america","united states"],
"washington, d.c.": ["north america", "united states"],
},
"country": {
u"afghanistan": ["asia"],
u"albania": ["europe"],
u"algeria": ["africa"],
u"american samoa": ["oceania"],
u"andorra": ["europe"],
u"angola": ["africa"],
u"anguilla": ["north america"],
u"antarctica": ["antarctica"],
u"antigua and barbuda": ["north america"],
u"argentina": ["south america"],
u"armenia": ["europe"],
u"aruba": ["south america"],
u"australia": ["australia"],
u"austria": ["europe"],
u"azerbaijan": ["europe"],
u"bahamas": ["north america"],
u"bahrain": ["asia"],
u"bangladesh": ["asia"],
u"barbados": ["north america"],
u"belarus": ["europe"],
u"belgium": ["europe"],
u"belize": ["north america"],
u"benin": ["africa"],
u"bhutan": ["asia"],
u"bolivia": ["south america"],
u"bonaire, sint eustatius and saba": ["south america"],
u"bosnia and herzegovina": ["europe"],
u"botswana": ["africa"],
u"brazil": ["south america"],
u"british indian ocean territory": ["asia"],
u"brunei darussalam": ["asia"],
u"bulgaria": ["europe"],
u"burkina faso": ["africa"],
u"burundi": ["africa"],
u"cabo verde": ["africa"],
u"cambodia": ["asia"],
u"cameroon": ["africa"],
u"canada": ["north america"],
u"cayman islands": ["north america"],
u"central african republic": ["africa"],
u"chad": ["africa"],
u"chile": ["south america"],
u"china": ["asia"],
u"christmas island": ["asia"],
u"cocos (keeling) islands": ["asia"],
u"colombia": ["south america"],
u"comoros": ["africa"],
u"congo": ["africa"],
u"democratic republic of the congo": ["africa"],
u"cook islands": ["oceania"],
u"costa rica": ["north america"],
u"croatia": ["europe"],
u"cuba": ["north america"],
u"curaçao": ["south america"],
u"cyprus": ["europe"],
u"czech republic": ["europe"],
u"côte d'ivoire": ["africa"],
u"denmark": ["europe"],
u"djibouti": ["africa"],
u"dominica": ["north america"],
u"dominican republic": ["north america"],
u"ecuador": ["south america"],
u"egypt": ["africa"],
u"el salvador": ["north america"],
u"equatorial guinea": ["africa"],
u"eritrea": ["africa"],
u"estonia": ["europe"],
u"ethiopia": ["africa"],
u"falkland islands": ["south america"],
u"faroe islands": ["europe"],
u"fiji": ["oceania"],
u"finland": ["europe"],
u"france": ["europe"],
u"french guiana": ["south america"],
u"french polynesia": ["oceania"],
u"gabon": ["africa"],
u"gambia": ["africa"],
u"georgia": ["europe"],
u"germany": ["europe"],
u"ghana": ["africa"],
u"gibraltar": ["europe"],
u"greece": ["europe"],
u"greenland": ["north america"],
u"grenada": ["north america"],
u"guadeloupe": ["north america"],
u"guam": ["oceania"],
u"guatemala": ["north america"],
u"guernsey": ["europe"],
u"guinea": ["africa"],
u"guinea-bissau": ["africa"],
u"guyana": ["south america"],
u"haiti": ["north america"],
u"holy see": ["europe"],
u"honduras": ["north america"],
u"hong kong": ["asia"],
u"hungary": ["europe"],
u"iceland": ["europe"],
u"india": ["asia"],
u"indonesia": ["asia"],
u"iran": ["asia"],
u"iraq": ["asia"],
u"ireland": ["europe"],
u"isle of man": ["europe"],
u"israel": ["asia"],
u"italy": ["europe"],
u"jamaica": ["north america"],
u"japan": ["asia"],
u"jersey": ["europe"],
u"jordan": ["asia"],
u"kazakhstan": ["asia"],
u"kenya": ["africa"],
u"kiribati": ["europe"],
u"north korea": ["asia"],
u"south korea": ["asia"],
u"kuwait": ["asia"],
u"kyrgyzstan": ["asia"],
u"laos": ["asia"],
u"latvia": ["europe"],
u"lebanon": ["asia"],
u"lesotho": ["africa"],
u"liberia": ["africa"],
u"libya": ["africa"],
u"liechtenstein": ["europe"],
u"lithuania": ["europe"],
u"luxembourg": ["europe"],
u"macao": ["asia"],
u"macedonia": ["europe"],
u"madagascar": ["africa"],
u"malawi": ["africa"],
u"malaysia": ["asia"],
u"maldives": ["asia"],
u"mali": ["africa"],
u"malta": ["europe"],
u"marshall islands": ["europe"],
u"martinique": ["north america"],
u"mauritania": ["africa"],
u"mauritius": ["africa"],
u"mayotte": ["africa"],
u"mexico": ["north america"],
u"federated states of micronesia": ["oceania"],
u"moldova, republic of": ["europe"],
u"monaco": ["europe"],
u"mongolia": ["asia"],
u"montenegro": ["europe"],
u"montserrat": ["north america"],
u"morocco": ["africa"],
u"mozambique": ["africa"],
u"myanmar": ["asia"],
u"namibia": ["africa"],
u"nauru": ["oceania"],
u"nepal": ["asia"],
u"netherlands": ["europe"],
u"new caledonia": ["oceania"],
u"new zealand": ["oceania"],
u"nicaragua": ["north america"],
u"niger": ["africa"],
u"nigeria": ["africa"],
u"niue": ["oceania"],
u"norfolk island": ["oceania"],
u"northern mariana islands": ["oceania"],
u"norway": ["europe"],
u"oman": ["asia"],
u"pakistan": ["asia"],
u"palau": ["asia"],
u"palestine, state of": ["asia"],
u"panama": ["north america"],
u"papua new guinea": ["oceania"],
u"paraguay": ["south america"],
u"peru": ["south america"],
u"philippines": ["asia"],
u"pitcairn": ["oceania"],
u"poland": ["europe"],
u"portugal": ["europe"],
u"puerto rico": ["north america"],
u"qatar": ["asia"],
u"romania": ["europe"],
u"rwanda": ["africa"],
u"réunion": ["africa"],
u"saint barthélemy": ["north america"],
u"saint kitts and nevis": ["north america"],
u"saint lucia": ["north america"],
u"saint martin (french part)": ["north america"],
u"saint pierre and miquelon": ["north america"],
u"saint vincent and the grenadines": ["north america"],
u"samoa": ["oceania"],
u"san marino": ["europe"],
u"sao tome and principe": ["africa"],
u"saudi arabia": ["asia"],
u"senegal": ["africa"],
u"serbia": ["europe"],
u"seychelles": ["africa"],
u"sierra leone": ["africa"],
u"singapore": ["asia"],
u"sint maarten (dutch part)": ["north america"],
u"slovakia": ["europe"],
u"slovenia": ["europe"],
u"solomon islands": ["oceania"],
u"somalia": ["africa"],
u"south africa": ["africa"],
u"south sudan": ["africa"],
u"sri lanka": ["asia"],
u"sudan": ["africa"],
u"suriname": ["south america"],
u"svalbard and jan mayen": ["europe"],
u"swaziland": ["africa"],
u"sweden": ["europe"],
u"switzerland": ["europe"],
u"syria": ["asia"],
u"taiwan": ["asia"],
u"tajikistan": ["asia"],
u"tanzania": ["africa"],
u"thailand": ["asia"],
u"timor-leste": ["asia"],
u"togo": ["africa"],
u"tokelau": ["oceania"],
u"tonga": ["oceania"],
u"trinidad and tobago": ["north america"],
u"tunisia": ["africa"],
u"turkmenistan": ["asia"],
u"turks and caicos islands": ["north america"],
u"tuvalu": ["oceania"],
u"uganda": ["africa"],
u"ukraine": ["europe"],
u"united arab emirates": ["asia"],
u"united kingdom": ["europe"],
u"united states minor outlying islands": ["oceania"],
u"uruguay": ["south america"],
u"uzbekistan": ["asia"],
u"vanuatu": ["oceania"],
u"venezuela": ["south america"],
u"vietnam": ["asia"],
u"virgin islands, british": ["north america"],
u"virgin islands, u.s.": ["north america"],
u"wallis and futuna islands": ["oceania"],
u"western sahara": ["africa"],
u"yemen": ["asia"],
u"zambia": ["africa"],
u"zimbabwe": ["africa"],
u"åland islands": ["europe"],
},
"continent":{}
}
def main():
# TODO: Consider making this into a test.
# for k,v in kl["continent"].iteritems():
# if v not in real_continents and v != "None":
# print k,v
# for k,v in kl["country"].iteritems():
# if v not in iso_countries and v != "None":
# print k,v
pp = pprint.PrettyPrinter()
for each in globals():
if not each.startswith("_"):
if isinstance(globals()[each], set) or isinstance(globals()[each], dict):
print(each.join("**"))
pp.pprint(globals()[each])
print()
| iDigBio/idb-backend | idb/data_tables/locality_data.py | Python | gpl-3.0 | 93,190 | [
"BWA"
] | 9f85d25cf57ce8bed98efd797b4da970afe4f3922da5072584d2941a4c8cdb06 |
#!/usr/bin/env python
"""Module for making tests on small molecules in GPAW.
One molecule test to rule them all
One molecule test to run them
One molecule test to save them all
And on the webpage plot them (implementation pending)
"""
from gpaw import GPAW, ConvergenceError
from ase.structure import molecule
from ase.data.g2_1 import atom_names as atoms
from ase.data.g2_1 import molecule_names as g1
from ase.utils.molecule_test import MoleculeTest, EnergyTest, BondLengthTest,\
BatchTest
class GPAWMoleculeTest(MoleculeTest):
def __init__(self, name='gpaw', vacuum=6.0, h=0.17, xc='LDA',
setups='paw', mode='fd', basis=None,
exceptions=(RuntimeError, ConvergenceError)):
MoleculeTest.__init__(self, name=name, vacuum=vacuum,
exceptions=exceptions)
if basis is None:
basis = {}
self.basis = basis
self.mode = mode
self.setups = setups
self.h = h
self.xc = xc
self.bad_formulas = ['NO', 'ClO', 'CH']
def setup_calculator(self, system, formula):
hund = (len(system) == 1)
cell = system.get_cell()
h = self.h
system.set_cell((cell / (4 * h)).round() * 4 * h)
system.center()
calc = GPAW(xc=self.xc,
h=h,
hund=hund,
fixmom=True,
setups=self.setups,
txt=self.get_filename(formula, extension='txt'),
mode=self.mode,
basis=self.basis
)
# Special cases
if formula == 'BeH':
calc.set(idiotproof=False)
#calc.initialize(system)
#calc.nuclei[0].f_si = [(1, 0, 0.5, 0, 0),
# (0.5, 0, 0, 0, 0)]
if formula in self.bad_formulas:
system.positions[:, 1] += h * 1.5
return calc
class GPAWEnergyTest(EnergyTest, GPAWMoleculeTest):
pass
class GPAWBondLengthTest(BondLengthTest, GPAWMoleculeTest):
pass
def main():
formulas = g1 + atoms
dimers = [formula for formula in g1 if len(molecule(formula)) == 2]
kwargs = dict(vacuum=3.0,
mode='lcao',
basis='dzp')
etest = BatchTest(GPAWEnergyTest('test/energy', **kwargs))
btest = BatchTest(GPAWBondLengthTest('test/bonds', **kwargs))
etest.run(formulas)
btest.run(dimers)
if __name__ == '__main__':
main()
| ajylee/gpaw-rtxs | gpaw/testing/molecule_test.py | Python | gpl-3.0 | 2,509 | [
"ASE",
"GPAW"
] | 4986436f0212a46c87a73da1f8e6a4beb169a027aa380efb5e7f79b5c66441dc |
#!/usr/bin/env python
"""
tide.py
Methods for working tidal forcing files in ROMS
Written by Brian Powell on 04/05/16
Copyright (c)2017 University of Hawaii under the MIT-License.
"""
import numpy as np
import netCDF4
import seapy
import datetime
from warnings import warn
def create_forcing(filename, tide, title="Tidal Forcing", epoch=seapy.default_epoch):
"""
Create a tidal forcing file from the given tidal values.
Parameters
----------
filename: string,
File name of the tidal forcing to create
tide: dict,
Dictionary of the tidal forcing containing the following keys:
Eamp : SSH amplitdue
Ephase : SSH phase (radians)
Cmajor : velocity major ellipse
Cminor : velocity minor ellipse
Cphase : velocity ellipse phase (radians)
Cangle : velocity ellipse angle (radians)
tide_start : datetime of the tide reference
tides : list of the tides
title: string, optional,
NetCDF title string to use
epoch: datetime, optional,
Epoch date for time reference
Returns
-------
None
"""
# Create the tide forcing file
ntides, eta_rho, xi_rho = tide['Eamp'].shape
if ntides != len(tide['tides']):
raise ValueError(
"The number of tidal data are different than the tides.")
tideout = seapy.roms.ncgen.create_tide(filename, eta_rho=eta_rho,
xi_rho=xi_rho,
reftime=epoch,
ntides=ntides,
clobber=True, title=title)
# Set the tide periods and attributes
tideout.variables['tide_period'][:] = 1.0 / \
seapy.tide.frequency(tide['tides'])
tideout.tidal_constituents = ", ".join(tide['tides'])
tideout.tide_start = "Day {:5.1f} ({:s})".format((tide['tide_start'] -
epoch).total_seconds() / 86400,
str(tide['tide_start']))
tideout.base_date = "days since {:s}".format(str(tide['tide_start']))
tideout.variables['tide_Eamp'][:] = tide['Eamp']
tideout.variables['tide_Ephase'][:] = np.degrees(tide['Ephase'])
tideout.variables['tide_Cmax'][:] = tide['Cmajor']
tideout.variables['tide_Cmin'][:] = tide['Cminor']
tideout.variables['tide_Cphase'][:] = np.degrees(tide['Cphase'])
tideout.variables['tide_Cangle'][:] = np.degrees(tide['Cangle'])
tideout.close()
def load_forcing(filename):
"""
Load a tidal forcing file into a dictionary
Parameters
----------
filename: string
File name of the tidal forcing file to load
Returns
-------
dict:
Dictionary of the tidal forcing information with keys:
Eamp : SSH amplitdue
Ephase : SSH phase (radians)
Cmajor : velocity major ellipse
Cminor : velocity minor ellipse
Cphase : velocity ellipse phase (radians)
Cangle : velocity ellipse angle (radians)
tide_start : datetime of the tide reference
tides : list of the tides
"""
import re
nc = seapy.netcdf(filename)
frc = {}
frc['Eamp'] = nc.variables['tide_Eamp'][:]
frc['Ephase'] = np.radians(nc.variables['tide_Ephase'][:])
frc['Cmajor'] = nc.variables['tide_Cmax'][:]
frc['Cminor'] = nc.variables['tide_Cmin'][:]
frc['Cphase'] = np.radians(nc.variables['tide_Cphase'][:])
frc['Cangle'] = np.radians(nc.variables['tide_Cangle'][:])
start_str = getattr(nc, 'tide_start', None) or \
getattr(nc, 'base_date', None)
tides = getattr(nc, 'tidal_constituents', None) or \
getattr(nc, 'tides', None)
frc['tides'] = tides.upper().split(", ")
frc['tide_start'] = None
nc.close()
if start_str:
try:
frc['tide_start'] = datetime.datetime.strptime(
re.sub('^.*since\s*', '', start_str),
"%Y-%m-%d %H:%M:%S")
except ValueError:
pass
return frc
def tide_error(his_file, tide_file, grid=None):
"""
Calculates the tidal error for each point given a model history and the
tidal file used
Parameters
----------
his_file : string,
String of history file location. Can be multiple files using wildcard
tide_file: string,
String of tidal file location
grid : string or grid, optional,
If specified, use this grid. Default is to build a grid from the history
file.
Returns
-------
tide_error : masked_array,
Array containing the tidal error at each point, with land points masked
"""
if grid:
grid = seapy.model.asgrid(grid)
else:
grid = seapy.model.asgrid(his_file)
# Load tidal file data
frc = load_forcing(tide_file)
# Calculate tidal error for each point
nc = seapy.netcdf(his_file)
times = seapy.roms.get_time(nc)
tide_error = np.ma.masked_where(
grid.mask_rho == 0, np.zeros((grid.mask_rho.shape)))
zeta = nc.variables['zeta'][:]
nc.close()
for i in seapy.progressbar.progress(range(grid.ln)):
for j in range(grid.lm):
if not tide_error.mask[i, j]:
z = zeta[:, i, j]
t_ap = seapy.tide.pack_amp_phase(frc['tides'],
frc['Eamp'][:, i, j], frc['Ephase'][:, i, j])
mout = seapy.tide.fit(times, z, tides=frc['tides'],
lat=grid.lat_rho[i, j], tide_start=frc['tide_start'])
for c in t_ap:
m = mout['major'][c]
t = t_ap[c]
tide_error[i, j] += 0.5 * (m.amp**2 + t.amp**2) - \
m.amp * t.amp * np.cos(m.phase - t.phase)
tide_error[i, j] = np.sqrt(tide_error[i, j])
return tide_error
| ocefpaf/seapy | seapy/roms/tide.py | Python | mit | 5,936 | [
"Brian",
"NetCDF"
] | 5e0e9e21bd593018271df12ba46fda6b027490a0c5461179b9ccd2acab7b873f |
# -*- coding: utf-8 -*-
import os, re, sqlite3, sys; sys.path.insert(0, os.path.join("..", ".."))
import string
from datetime import datetime
from pattern.web import Spider, DEPTH, BREADTH, FIFO, LIFO, URL,plaintext,DOM
from django.utils.encoding import smart_str, smart_unicode
from whooshHelper import *
def encode(s):
#Removes non-ascii characters.
return "".join(filter(lambda x: 32 <= ord(x) <= 126, s))
class HuffingtonSpider(Spider):
def __init__(self, whoosh):
Spider.__init__(self, links=["http://www.huffingtonpost.co.uk/"], domains=["huffingtonpost.co.uk"], delay=0.0)
self.whoosh=whoosh
def htmlParser(self,link):
html = URL(link).download()
result = ''
body = DOM(html).body
for e in body.by_tag('p'):
a = e.by_tag('a')
img = e.by_tag('img')
span = e.by_tag('span')
if a == [] and img == [] and span == []:
plainText = plaintext(encode(e.content),linebreaks=2, indentation = True)
content = encode(plainText)
filterContent = content.strip().lower()
if filterContent != 'share your comment:':
result = result + plainText + '\n '
pretty = unicode(result.strip())
return pretty
def getTitle(self, link):
html = URL(link).download()
body = DOM(html).body
title = body.by_class("title-news")[0].content.strip()
return title
def visit(self, link, source=None):
match = re.search("huffingtonpost.co.uk/\d{4}/\d{2}/\d{2}/", link.url)
if match:
splitted_url = link.url.split('/')
article_date = datetime.datetime(int(splitted_url[3]), int(splitted_url[4]), int(splitted_url[5]))
title = self.getTitle(link.url)
encodedContent = self.htmlParser(link.url)
self.whoosh.addDocument(title, link.url, article_date, encodedContent)
print "Date:", article_date, "\nTitle:", str(encode(title)), "\nUrl:", link.url, "\n\n"
print "----------------------------------------------------------------------------------------------"
def fail(self, link):
print "failed:", encode(link.url),"\n"
def priority(self, link, method=DEPTH):
match = re.search("huffingtonpost.co.uk/\d{4}/\d{2}/\d{2}/", link.url)
if match:
return Spider.priority(self, link, method)
else:
return 0.0
class GuardianSpider(Spider):
dic = {'jan':1,'feb':2,'mar':3, 'apr':4, 'may':5, 'jun':6, 'jul':7, 'aug':8, 'sep':9, 'oct':10, 'nov':11, 'dec':12}
def __init__(self, whoosh):
Spider.__init__(self, links=["http://www.theguardian.com/"], domains=["www.theguardian.com"], delay=0.0)
self.whoosh=whoosh
def htmlParser(self,link):
html = URL(link).download()
body = DOM(html).body
content = body.by_id("content")
if content:
plaincontent = plaintext(content.content, linebreaks=2, indentation = True)
pretty = unicode(plaincontent.strip())
else:
pretty=''
return pretty
def getTitle(self, link):
html = URL(link).download()
body = DOM(html).body
node = body.by_id("main-article-info")
if node:
title = node.children[1].content.strip()
else:
title = ''
return title
def visit(self, link, source=None):
match = re.search("/\d{4}/\w{3}/\d{2}/", link.url)
if match:
is_video = re.search("video", link.url)
if not is_video:
splitted_url = link.url.split('/')
splitted_date = match.group(0).split('/')
article_date = datetime.datetime(int(splitted_date[1]), self.dic[splitted_date[2]], int(splitted_date[3]))
encodedContent = self.htmlParser(link.url)
if encodedContent:
title = self.getTitle(link.url)
if title:
self.whoosh.addDocument(title, link.url, article_date, encodedContent)
print "Date:", article_date, "\nTitle:", str(encode(title)), "\nUrl:", link.url, "\n\n"
print "-----------------------------------------------------"
else:
print "Not a news article."
print link.url
print "-----------------------------------------------------"
else:
print "Its a video."
print "-----------------------------------------------------"
def fail(self, link):
print "failed:", encode(link.url),"\n"
def priority(self, link, method=DEPTH):
match = re.search("/\d{4}/\w{3}/\d{2}/", link.url)
if match:
if re.search("media", link.url):
res = 0.0
else:
res = Spider.priority(self, link, method)
else:
res= 0.0
return res
class ReutersSpider(Spider):
def __init__(self, whoosh):
Spider.__init__(self, links=["http://in.reuters.com/"], domains=["in.reuters.com"], delay=0.0)
self.whoosh=whoosh
def htmlParser(self,link):
html = URL(link).download()
result = ''
body = DOM(html).body.by_class('column2 gridPanel grid8')[0]
paragraphs = body('p')
article = ''
for p in paragraphs:
article+=str(p)
plainText = plaintext(encode(article),linebreaks=2, indentation = True)
content = encode(plainText)
pretty = unicode(content.strip())
return pretty
def getTitle(self, link):
html = URL(link).download()
body = DOM(html).body.by_class('column2 gridPanel grid8')[0]
title = body('h1')[0].content
return title
def visit(self, link, source=None):
match = re.search("in.reuters.com/article/\d{4}/\d{2}/\d{2}/", link.url)
if match:
splitted_url = link.url.split('/')
article_date = datetime.datetime(int(splitted_url[4]), int(splitted_url[5]), int(splitted_url[6]))
title = self.getTitle(link.url)
encodedContent = self.htmlParser(link.url)
self.whoosh.addDocument(title, link.url, article_date, encodedContent)
print "Date:", article_date, "\nTitle:", str(encode(title)), "\nUrl:", link.url, "\n\n"
print "----------------------------------------------------------------------------------------------"
def fail(self, link):
print "failed:", encode(link.url),"\n"
def priority(self, link, method=DEPTH):
match = re.search("in.reuters.com/article/\d{4}/\d{2}/\d{2}/", link.url)
if match:
return Spider.priority(self, link, method)
else:
return 0.0 | Carlosmr/WhooshSearcher | modules/spiders.py | Python | mit | 6,999 | [
"VisIt"
] | e63a907173ff5d4ed0b3a52acd9e7bf1edd6e18add2a8aa5fef364e89536e4b7 |
"""Support for control of ElkM1 sensors."""
from elkm1_lib.const import (
SettingFormat,
ZoneLogicalStatus,
ZonePhysicalStatus,
ZoneType,
)
from elkm1_lib.util import pretty_const, username
import voluptuous as vol
from homeassistant.components.sensor import SensorEntity
from homeassistant.const import VOLT
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import entity_platform
from . import ElkAttachedEntity, create_elk_entities
from .const import ATTR_VALUE, DOMAIN, ELK_USER_CODE_SERVICE_SCHEMA
SERVICE_SENSOR_COUNTER_REFRESH = "sensor_counter_refresh"
SERVICE_SENSOR_COUNTER_SET = "sensor_counter_set"
SERVICE_SENSOR_ZONE_BYPASS = "sensor_zone_bypass"
SERVICE_SENSOR_ZONE_TRIGGER = "sensor_zone_trigger"
UNDEFINED_TEMPATURE = -40
ELK_SET_COUNTER_SERVICE_SCHEMA = {
vol.Required(ATTR_VALUE): vol.All(vol.Coerce(int), vol.Range(0, 65535))
}
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Create the Elk-M1 sensor platform."""
elk_data = hass.data[DOMAIN][config_entry.entry_id]
entities = []
elk = elk_data["elk"]
create_elk_entities(elk_data, elk.counters, "counter", ElkCounter, entities)
create_elk_entities(elk_data, elk.keypads, "keypad", ElkKeypad, entities)
create_elk_entities(elk_data, [elk.panel], "panel", ElkPanel, entities)
create_elk_entities(elk_data, elk.settings, "setting", ElkSetting, entities)
create_elk_entities(elk_data, elk.zones, "zone", ElkZone, entities)
async_add_entities(entities, True)
platform = entity_platform.current_platform.get()
platform.async_register_entity_service(
SERVICE_SENSOR_COUNTER_REFRESH,
{},
"async_counter_refresh",
)
platform.async_register_entity_service(
SERVICE_SENSOR_COUNTER_SET,
ELK_SET_COUNTER_SERVICE_SCHEMA,
"async_counter_set",
)
platform.async_register_entity_service(
SERVICE_SENSOR_ZONE_BYPASS,
ELK_USER_CODE_SERVICE_SCHEMA,
"async_zone_bypass",
)
platform.async_register_entity_service(
SERVICE_SENSOR_ZONE_TRIGGER,
{},
"async_zone_trigger",
)
def temperature_to_state(temperature, undefined_temperature):
"""Convert temperature to a state."""
return temperature if temperature > undefined_temperature else None
class ElkSensor(ElkAttachedEntity, SensorEntity):
"""Base representation of Elk-M1 sensor."""
def __init__(self, element, elk, elk_data):
"""Initialize the base of all Elk sensors."""
super().__init__(element, elk, elk_data)
self._state = None
@property
def state(self):
"""Return the state of the sensor."""
return self._state
async def async_counter_refresh(self):
"""Refresh the value of a counter from the panel."""
if not isinstance(self, ElkCounter):
raise HomeAssistantError("supported only on ElkM1 Counter sensors")
self._element.get()
async def async_counter_set(self, value=None):
"""Set the value of a counter on the panel."""
if not isinstance(self, ElkCounter):
raise HomeAssistantError("supported only on ElkM1 Counter sensors")
self._element.set(value)
async def async_zone_bypass(self, code=None):
"""Bypass zone."""
if not isinstance(self, ElkZone):
raise HomeAssistantError("supported only on ElkM1 Zone sensors")
self._element.bypass(code)
async def async_zone_trigger(self):
"""Trigger zone."""
if not isinstance(self, ElkZone):
raise HomeAssistantError("supported only on ElkM1 Zone sensors")
self._element.trigger()
class ElkCounter(ElkSensor):
"""Representation of an Elk-M1 Counter."""
@property
def icon(self):
"""Icon to use in the frontend."""
return "mdi:numeric"
def _element_changed(self, element, changeset):
self._state = self._element.value
class ElkKeypad(ElkSensor):
"""Representation of an Elk-M1 Keypad."""
@property
def temperature_unit(self):
"""Return the temperature unit."""
return self._temperature_unit
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self._temperature_unit
@property
def icon(self):
"""Icon to use in the frontend."""
return "mdi:thermometer-lines"
@property
def extra_state_attributes(self):
"""Attributes of the sensor."""
attrs = self.initial_attrs()
attrs["area"] = self._element.area + 1
attrs["temperature"] = self._state
attrs["last_user_time"] = self._element.last_user_time.isoformat()
attrs["last_user"] = self._element.last_user + 1
attrs["code"] = self._element.code
attrs["last_user_name"] = username(self._elk, self._element.last_user)
attrs["last_keypress"] = self._element.last_keypress
return attrs
def _element_changed(self, element, changeset):
self._state = temperature_to_state(
self._element.temperature, UNDEFINED_TEMPATURE
)
class ElkPanel(ElkSensor):
"""Representation of an Elk-M1 Panel."""
@property
def icon(self):
"""Icon to use in the frontend."""
return "mdi:home"
@property
def extra_state_attributes(self):
"""Attributes of the sensor."""
attrs = self.initial_attrs()
attrs["system_trouble_status"] = self._element.system_trouble_status
return attrs
def _element_changed(self, element, changeset):
if self._elk.is_connected():
self._state = (
"Paused" if self._element.remote_programming_status else "Connected"
)
else:
self._state = "Disconnected"
class ElkSetting(ElkSensor):
"""Representation of an Elk-M1 Setting."""
@property
def icon(self):
"""Icon to use in the frontend."""
return "mdi:numeric"
def _element_changed(self, element, changeset):
self._state = self._element.value
@property
def extra_state_attributes(self):
"""Attributes of the sensor."""
attrs = self.initial_attrs()
attrs["value_format"] = SettingFormat(self._element.value_format).name.lower()
return attrs
class ElkZone(ElkSensor):
"""Representation of an Elk-M1 Zone."""
@property
def icon(self):
"""Icon to use in the frontend."""
zone_icons = {
ZoneType.FIRE_ALARM.value: "fire",
ZoneType.FIRE_VERIFIED.value: "fire",
ZoneType.FIRE_SUPERVISORY.value: "fire",
ZoneType.KEYFOB.value: "key",
ZoneType.NON_ALARM.value: "alarm-off",
ZoneType.MEDICAL_ALARM.value: "medical-bag",
ZoneType.POLICE_ALARM.value: "alarm-light",
ZoneType.POLICE_NO_INDICATION.value: "alarm-light",
ZoneType.KEY_MOMENTARY_ARM_DISARM.value: "power",
ZoneType.KEY_MOMENTARY_ARM_AWAY.value: "power",
ZoneType.KEY_MOMENTARY_ARM_STAY.value: "power",
ZoneType.KEY_MOMENTARY_DISARM.value: "power",
ZoneType.KEY_ON_OFF.value: "toggle-switch",
ZoneType.MUTE_AUDIBLES.value: "volume-mute",
ZoneType.POWER_SUPERVISORY.value: "power-plug",
ZoneType.TEMPERATURE.value: "thermometer-lines",
ZoneType.ANALOG_ZONE.value: "speedometer",
ZoneType.PHONE_KEY.value: "phone-classic",
ZoneType.INTERCOM_KEY.value: "deskphone",
}
return f"mdi:{zone_icons.get(self._element.definition, 'alarm-bell')}"
@property
def extra_state_attributes(self):
"""Attributes of the sensor."""
attrs = self.initial_attrs()
attrs["physical_status"] = ZonePhysicalStatus(
self._element.physical_status
).name.lower()
attrs["logical_status"] = ZoneLogicalStatus(
self._element.logical_status
).name.lower()
attrs["definition"] = ZoneType(self._element.definition).name.lower()
attrs["area"] = self._element.area + 1
attrs["triggered_alarm"] = self._element.triggered_alarm
return attrs
@property
def temperature_unit(self):
"""Return the temperature unit."""
if self._element.definition == ZoneType.TEMPERATURE.value:
return self._temperature_unit
return None
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
if self._element.definition == ZoneType.TEMPERATURE.value:
return self._temperature_unit
if self._element.definition == ZoneType.ANALOG_ZONE.value:
return VOLT
return None
def _element_changed(self, element, changeset):
if self._element.definition == ZoneType.TEMPERATURE.value:
self._state = temperature_to_state(
self._element.temperature, UNDEFINED_TEMPATURE
)
elif self._element.definition == ZoneType.ANALOG_ZONE.value:
self._state = self._element.voltage
else:
self._state = pretty_const(
ZoneLogicalStatus(self._element.logical_status).name
)
| w1ll1am23/home-assistant | homeassistant/components/elkm1/sensor.py | Python | apache-2.0 | 9,302 | [
"Elk"
] | ea5f38078b090f9da6ae4740b1f5c6d96eb3f5a2276fe2d005fa52196662fef5 |
# -*- coding: utf-8 -*-
# Copyright 2010-2014 Will Barton.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
# THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import re
import csv
from collections import OrderedDict, namedtuple
import unittest
from utils import Size, EquatorialCoordinate
### Object Types
# The standardized object types for Observation Charts
OBJECT_TYPES = [
'Star', # 0
'Double Star', # 1
'Triple Star', # 2
'Galaxy', # 3
'Open Cluster', # 4
'Globular Cluster', # 5
'Planetary Nebula', # 6
'Bright Nebula', # 7
'Milky Way', # 8
'Not Used', # 9
]
# Abbrev Description Example
# -----------------------------------------------------
# * Single Star
# ** Double Star
# *** Triple Star
# Ast Asterism
# Gxy Galaxy
# GxyCld Bright cloud/knot in a galaxy
# GC Globular Cluster
# HIIRgn HII Region
# Neb Nebula (emission or reflection)
# NF Not Found
# OC Open Cluster
# PN Planetary Nebula
# SNR Supernova Remnant
# MWSC Milky Way Star Cloud
### Objects and Catalogs
#### CelestialObject
# The base class for everything in the sky.
class CelestialObject(object):
# Internally used to map alias:catalog
__aliases = None
# `type`, the type of the object.
type = None
# `ra`, the mean right ascention of the object
ra = None
# `dec`, the mean declination of the object
dec = None
# `size`, the apparent size of the object in the sky
size = None
# `magnitude`, the apparent magnitude of the object in the sky
magnitude = None
# `catalog`, the primary source catalog for this object
catalog = None
# The object's identifier in its primary catalog
identifier = None
# The object's positional angle (pretty much unique to galaxies)
angle = None
def __init__(self, identifier, catalog, type=None, ra=None,
dec=None, magnitude=None, size=None, aliases=None):
self.identifier = identifier
self.catalog = catalog
self.type = type
self.ra = ra
self.dec = dec
self.magnitude = magnitude
self.size = size
def __repr__(self):
return "{cls}(catalog={catalog}, identifier={identifier} type={type} magnitude={magnitude})".format(
cls=self.__class__.__name__,
catalog=self.catalog,
identifier=self.identifier,
magnitude=self.magnitude,
type=OBJECT_TYPES[self.type])
# `id`, a string of self.catalog + self.identifier, i.e. `HIP27989`
# for Betelgeuse or NGC1976 for the Orion Nebula.
@property
def id(self):
return ''.join((self.catalog, self.identifier))
# `catalogs`, a list of the catalogs the object appears in. Catalogs
# must be added using the `add_alias` method, because a catalog must
# have a corrosponding identifier within that catalog. The object's
# `catalog` and `identifier` will always appear first.
@property
def catalogs(self):
# Make sure we return the primary catalog
if self.__aliases is None:
self.__aliases = OrderedDict([(self.identifier, self.catalog),])
return [v for v in self.__aliases.values() if v is not None]
# `aliases_dict`, a list of all the identifiers for this object,
# corrosponding to the list of catalogs.
# The index of aliases *with* catalogs will corrospond to the
# `catalogs` list. Aliases *without* catalogs will be placed at the
# end of the aliases list in the order they were added, from the
# `len(catalogs)` position onward.
@property
def aliases_dict(self):
# Make sure we return the primary identifier
if self.__aliases is None:
self.__aliases = OrderedDict([(self.identifier, self.catalog),])
return list(self.__aliases.keys())
# `aliases` is a list of all the `{catalog}{alias}` strings
# for each alias that belongs to a catalog, or just `{alias}` if
# the alias has no catalogs.
@property
def aliases(self):
# Make sure we return the primary identifier
if self.__aliases is None:
self.__aliases = OrderedDict([(self.identifier, self.catalog),])
return ["".join(filter(None, reversed(pair)))
for pair in self.__aliases.items()]
# Common API for adding aliases. Aliases *can* have catalogs, but
# are not required. Example would be the Orion Nebula: It is
# NGC1976, M42, LBN974, and 'The Orion Nebula'. Three of those have
# catalogs (NGC, M, LBN), but one does not. 'The Orion Nebula' will
# always appear at the end of the list. The object's `catalog` and
# `identifier` will
#
# This is also the only way to add a catalog.
def add_alias(self, alias, catalog=None):
# Make sure the primary identifier is the first item.
if self.__aliases is None:
self.__aliases = OrderedDict([(self.identifier, self.catalog),])
self.__aliases[alias] = catalog
class TestCelestialObject(unittest.TestCase):
# Aliases are the primary functionality of the CelestialObject. It
# isn't exactly abstract, it can be used to an actual object, but
# objects are likely to come from a catalog, and it would be better
# to use a catalog-specific class to handle any catalog-specific
# quirks.
def test_aliases_dict(self):
c = CelestialObject('1976', 'NGC')
self.assertEqual(c.aliases_dict, ['1976',])
def test_catalogs(self):
c = CelestialObject('1976', 'NGC')
self.assertEqual(c.catalogs, ['NGC',])
def test_add_alias(self):
c = CelestialObject('1976', 'NGC')
c.add_alias('42', 'M')
self.assertEqual(c.aliases_dicts, ['1976', '42'])
self.assertEqual(c.catalogs, ['NGC', 'M'])
c.add_alias('The Orion Nebula')
self.assertEqual(c.aliases_dicts, ['1976', '42', 'The Orion Nebula'])
self.assertEqual(c.catalogs, ['NGC', 'M'])
#### NGCObject
# This regular expression matches the object size format of the HCNGC
# catalog. Size is given major diameter x minor diameter in arc minutes.
ngc_size_re = re.compile(r'([0-9\.]+)[\'`"] ?([xX] ?([0-9\.]+)[\'`"])?')
# A mapping of NGC types to the Observation Charts types
ngc_object_types = {
'*': 0,
'**': 1,
'***': 2,
'Ast': 9,
'Gxy': 3,
'GxyCld': 3,
'GC': 5,
'HIIRgn': 7,
'Neb': 7,
'NF': 9,
'OC': 4,
'PN': 6,
'SNR': 7,
'MWSC': 8,
'OC+Neb': 4,
'Neb?': 7
}
# Object from the New General Catalog of deep sky objects.
class NGCObject(CelestialObject):
# Initialization is meant to take a csv.DictReader row as keyword
# args.
def __init__(self, **kwargs):
self.__NGCNo = kwargs['NGCNo']
self.__RA_2000 = kwargs['RA_2000']
self.__DEC_2000 = kwargs['DEC_2000']
self.__Const = kwargs['Const']
self.__ObjectType = kwargs['ObjectType']
self.__Size = kwargs['Size']
self.__Bmag = kwargs['Bmag']
self.__Vmag = kwargs['Vmag']
self.__AlsoCatalogedAs = kwargs['AlsoCatalogedAs']
self.__PA = kwargs['PA']
self.ra = EquatorialCoordinate(self.__RA_2000)
self.dec = EquatorialCoordinate(self.__DEC_2000)
# parse the size
size_match = ngc_size_re.match(self.__Size)
if size_match:
major = size_match.groups()[0]
minor = size_match.groups()[2]
self.size = Size(major=float(major),
minor=(float(minor) if minor is not None else 0))
else:
self.size = Size(major=0, minor=0)
# Some objects don't have a Vmag... I'm not sure why.
try:
self.magnitude = float(self.__Vmag)
except ValueError as e:
try:
self.magnitude = float(self.__Bmag)
except Exception as e:
self.magnitude = 20
try:
self.angle = float(self.__PA)
except ValueError:
self.angle = None
self.identifier = self.__NGCNo
self.catalog = 'NGC'
# Lookup the NGC type and map to one of our types
self.type = ngc_object_types[self.__ObjectType]
# The HCNGC Catalog gives us some aliases. Add them.
# This is imperfect because the catalog doesn't use a consistent
# seperator between catalog and identifier.
aliases = [re.split('[ -]', a.strip(), 1)
for a in self.__AlsoCatalogedAs.split(',')]
for pair in aliases:
self.add_alias(*reversed(pair))
class TestNGCObject(unittest.TestCase):
def setUp(self):
self.ngc_object_dict = {
'ObjectType': 'OC+Neb',
'L': '1',
'HistoricalNotes': 'H.C.',
'NGCNo': '1976',
'GC': '1179',
'VSfcBrt': '…',
'Const': 'Ori',
'ObjectClassif': '3:02:03',
'GSCSmallRegionNr': '4774',
'DEC_2000': '-05º 23\' 27"',
'ICEquiv': '…',
'TelescopeType': 'Refractor',
'PA': '…',
'Size': "90'X60'",
'JH': '360',
'Diam_inch': '-',
'Bmag': '4',
'NGCEquiv': '…',
'RA_2000': '05h 35m 17.2s',
'AlsoCatalogedAs': 'M 42, LBN 974, Sh2-281',
'POSS RedPlateNr': '1477',
'HeraldBobroffASTROATLAS': 'C-53,D-24',
'Year': '1610',
'POSSBluePlateNr': '1477',
'Vmag': '…',
'WH': '…',
'Uranometria2000': '225,226,270,271',
'ObservingNotes': 'S.G.',
'SourcesUsed': 'N,O,S,l,s,6,8,0,M,D,n',
'OriginalNGCSummaryDescription': '!!! Theta Orionis and the great neb',
'Discoverer': 'Nicolas Peiresc'}
def test_size(self):
ngc_object = NGCObject(**self.ngc_object_dict)
self.assertEqual(ngc_object.identifier, '1976')
#### The NGC Catalog.
# This class simply inherits from OrderedDict. It takes a file (or
# stream), parses it, and populates the dict.
class NGCCatalog(OrderedDict):
def __init__(self, stream):
super().__init__()
reader = csv.DictReader(stream)
for row in reader:
ngc_object = NGCObject(**row)
self[ngc_object.identifier] = ngc_object
class TestNGCCatalog(unittest.TestCase):
def test_init(self):
import io
ngc_orion = '''NGCNo,L,GC,JH,WH,RA_2000,DEC_2000,Const,OriginalNGCSummaryDescription,Discoverer,Year,TelescopeType,Diam_inch,ObjectType,ObjectClassif,Size,PA,Vmag,Bmag,VSfcBrt,NGCEquiv,ICEquiv,AlsoCatalogedAs,HistoricalNotes,ObservingNotes,Uranometria2000,HeraldBobroffASTROATLAS,GSCSmallRegionNr,POSSBluePlateNr,POSS RedPlateNr,SourcesUsed
5194,1,3572,1622,…,13h 29m 52.1s,"+47º 11' 43""",CVn,"!!!, Great Spiral neb",Charles Messier,1773,Refractor,3.3,Gxy,Sc I,11'X7.8',163,8.5,9.1,13.1,…,…,"M 51A, UGC 8493, ARP 85, MCG+08-25-012, CGCG 246.008, VV 403, IRAS 13277+4727, PGC 47404",H.C.,S.G.,76,"C-11,C-29",3460,1593,1593,"N,O,S,U,1,Z,m,0,6,8,D,n"
1976,1,1179,360,…,05h 35m 17.2s,"-05º 23' 27""",Ori,!!! Theta Orionis and the great neb,Nicolas Peiresc,1610,Refractor,-,OC+Neb,3:02:03,90'X60',…,…,4,…,…,…,"M 42, LBN 974, Sh2-281",H.C.,S.G.,"225,226,270,271","C-53,D-24",4774,1477,1477,"N,O,S,l,s,6,8,0,M,D,n"'''
stream = io.StringIO(initial_value=ngc_orion)
ngc_catalog = NGCCatalog(stream)
self.assertTrue('1976' in ngc_catalog)
#### HYGStar
# A star from the HYG catalog. The star's HR number is prefered as
# the identifier here.
class HYGStar(CelestialObject):
# Initialization is meant to take a csv.DictReader row as keyword
# args.
def __init__(self, **kwargs):
self.__StarID = kwargs['StarID']
self.__HIP = kwargs['HIP']
self.__HD = kwargs['HD']
self.__HR = kwargs['HR']
self.__BayerFlamsteed = kwargs['BayerFlamsteed']
self.__ProperName = kwargs['ProperName']
self.__RA = kwargs['RA']
self.__Dec = kwargs['Dec']
self.__Mag = kwargs['Mag']
self.__AbsMag = kwargs['AbsMag']
self.__Spectrum = kwargs['Spectrum']
self.__ColorIndex = kwargs['ColorIndex']
# The HYG catalog contains HIP, HD, and HR identifiers the
# `catalog` property will corrospond to the one we prefer for
# the `identifier`.
self.identifier = self.__HIP
self.catalog = 'HIP'
self.type = 0
self.size = Size(-1,-1)
# NOTE: HYG RA is in degrees.
self.ra = EquatorialCoordinate(self.__RA, hours=True)
self.dec = EquatorialCoordinate(self.__Dec, degrees=True)
self.magnitude = float(self.__Mag)
# HYG gives us a lot of aliases. Add them.
self.add_alias(self.__HD, 'HD')
self.add_alias(self.__HR, 'HR')
self.add_alias(self.__ProperName)
# Parse out the Flamsteed and Bayer components and add
# seperately
# self.add_alias(self.__BayerFlamsteed, )
class TestHYGStar(unittest.TestCase):
def setUp(self):
self.hyg_object_dict = {
'VX': '-1.693e-05',
'Distance': '131.061598951507',
'X': '2.738',
'StarID': '27919',
'HD': '39801',
'Z': '16.89611',
'VZ': '9.611e-06',
'BayerFlamsteed': '58Alp Ori',
'VY': '2.0769e-05',
'PMRA': '27.33',
'Mag': '0.45',
'HR': '2061',
'Y': '129.93909',
'Spectrum': 'M2Ib',
'ColorIndex': '1.500',
'RA': '5.91952477',
'HIP': '27989',
'AbsMag': '-5.1373773102256',
'Dec': '07.40703634',
'PMDec': '10.86',
'ProperName': 'Betelgeuse',
'Gliese': '',
'RV': '21'}
def test_init(self):
# Betelgeuse, HIP 27989
hyg_object = HYGStar(**self.hyg_object_dict)
self.assertEqual(hyg_object.identifier, '27989')
self.assertEqual(hyg_object.ra.hours, 5.91952477)
self.assertEqual(hyg_object.ra.degrees, 88.79287155)
self.assertEqual(hyg_object.dec.degrees, 7.40703634)
#### The HYG Catalog
# This class simply inherits from OrderedDict. It takes a file (or
# stream), parses it, and populates the dict.
class HYGStarCatalog(OrderedDict):
def __init__(self, stream):
super().__init__()
reader = csv.DictReader(stream)
for row in reader:
hyg_star = HYGStar(**row)
self[hyg_star.identifier] = hyg_star
class TestHYGStarCatalog(unittest.TestCase):
def test_init(self):
import io
hyg_betelgeuse = '''StarID,HIP,HD,HR,Gliese,BayerFlamsteed,ProperName,RA,Dec,Distance,PMRA,PMDec,RV,Mag,AbsMag,Spectrum,ColorIndex,X,Y,Z,VX,VY,VZ
27919,27989,39801,2061,,58Alp Ori,Betelgeuse,5.91952477,07.40703634,131.061598951507,27.33,10.86,21,0.45,-5.1373773102256,M2Ib,1.500,2.738,129.93909,16.89611,-1.693e-05,2.0769e-05,9.611e-06'''
stream = io.StringIO(initial_value=hyg_betelgeuse)
hyg_catalog = HYGStarCatalog(stream)
self.assertTrue('27989' in hyg_catalog)
if __name__ == "__main__":
unittest.main()
| willbarton/observation-charts | src/observation/catalogs/objects.py | Python | bsd-3-clause | 17,122 | [
"Galaxy"
] | 513784231edc722ce7d801229c88fcddb81400ffaebc2455a525e4d478687acc |
#!/usr/bin/python
work_dir = ''
import numpy as np
from scipy.io import FortranFile as ufmt
if __name__ == '__main__':
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
# See GALAXY 14.50 Manual, Sec. 9.2, P54
header_dtype = [('n1', '<i4'), ('n2', '<i4'), ('n3', '<i4'),
('ncoor', '<i4'), ('np', '<i4'), ('time', '<f4'),
('pm', '<f4'), ('pertbn', '<i4')]
def save_snap_galaxy_pcs(filename, snap):
# unpack snapshot
cps = snap['cps']
# Get ptcl number
n1, n2, n3 = 0, 0, 0
if(cps.has_key('C1')): n1 = cps['C1']['N_pcs']
if(cps.has_key('C2')): n2 = cps['C2']['N_pcs']
if(cps.has_key('C3')): n3 = cps['C3']['N_pcs']
N_pcs = n1 + n2 + n3
# Make array
pcs = np.empty(shape = (N_pcs, 6), dtype = 'f4')
if n1 != 0: pcs[:n1] = cps['C1']['pcs']
if n2 != 0: pcs[n1: n1 + n2] = cps['C2']['pcs']
if n3 != 0: pcs[n1 + n2: n1 + n2 + n3] = cps['C1']['pcs']
# prepare header,
header = np.empty(1, dtype = header_dtype)
header[0]['n1'] = n1
header[0]['n2'] = n2
header[0]['n3'] = n3
header[0]['ncoor'] = 6
header[0]['np'] = 5000
header[0]['time'] = snap['time']
header[0]['pm'] = snap['pm']
header[0]['pertbn'] = 0
# open a file, write the header
pcs_fs = ufmt(filename, 'w')
pcs_fs.write_record(header)
# write pcs array in batches of 5k ptcls
N_put, chunk_size = 0, 5000 * 6
pcs = pcs.reshape((-1,)) # into 1d array
while N_put < N_pcs * 6:
chunk_t = pcs[N_put: N_put + chunk_size]
pcs_fs.write_record(chunk_t)
N_put += chunk_t.size
pcs_fs.close()
return 0
def read_snap_galaxy_pcs(filename):
pcs_ds = ufmt(filename, 'r')
header = pcs_ds.read_record(dtype = header_dtype)[0]
# read header info / GALAXY 14.50 Manual, 9.2
n1, n2, n3 = header['n1'], header['n2'], header['n3']
N_pcs = n1 + n2 + n3
chunk_size = header['ncoor'] * header['np']
# assume 3D problem with equal-mass particles for each component
assert header['ncoor'] == 6
# read ptcls in batches
N_get = 0
pcs = np.empty(N_pcs * 6, dtype = 'f4')
while N_get < N_pcs * 6:
chunk_t = pcs_ds.read_reals(dtype = 'f4')
pcs[N_get: N_get + chunk_size] = chunk_t
N_get += chunk_t.size
pcs = pcs.reshape((-1, 6))
pcs_ds.close()
# Make them into components
snap = {'cps' : {},
'pm' : header['pm'],
'time': header['time']}
if n1 != 0: # component 1 has mtcls
snap['cps']['C1'] = {'N_pcs': n1,
'pm' : header['pm'],
'pcs' : pcs[:n1]}
if n2 != 0: # component 2 has ptcls
snap['cps']['C2'] = {'N_pcs': n2,
'pm' : header['pm'],
'pcs' : pcs[n1: n1 + n2]}
if n3 != 0: # component 3 has ptcls
snap['cps']['C3'] = {'N_pcs': n3,
'pm' : header['pm'],
'pcs' : pcs[n1 + n2: n1 + n2 + n3]}
return snap
# diff test
if False:
import os # for diff
dic = read_snap_galaxy_pcs('run999.pcs0')
save_snap_galaxy_pcs('test.pcs0', dic)
df = os.system('diff run999.pcs0 test.pcs0')
if(df): print "diff test failed."
else: print "diff test passed."
| shiaki/iterative-modelling | src/pcs_snap.py | Python | bsd-3-clause | 3,204 | [
"Galaxy"
] | ac0f003396f871e895f974c18824d2fe1b2cfdda376060abfe744665f7736b9d |
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2014 Stanford University and the Authors
#
# Authors: Kyle A. Beauchamp
# Contributors: Robert McGibbon, Matthew Harrigan, Carlos Xavier Hernandez
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
import os
import tempfile
import mdtraj as md
import numpy as np
from mdtraj.utils.six.moves import cPickle
from mdtraj.utils import import_
from mdtraj.testing import get_fn, eq, skipif, assert_raises
try:
from simtk.openmm import app
HAVE_OPENMM = True
except ImportError:
HAVE_OPENMM = False
try:
import pandas as pd
HAVE_PANDAS = True
except ImportError:
HAVE_PANDAS = False
@skipif(not HAVE_OPENMM)
def test_topology_openmm():
topology = md.load(get_fn('1bpi.pdb')).topology
# the openmm trajectory doesn't have the distinction
# between resSeq and index, so if they're out of whack
# in the openmm version, that cant be preserved
for residue in topology.residues:
residue.resSeq = residue.index
mm = topology.to_openmm()
assert isinstance(mm, app.Topology)
topology2 = md.Topology.from_openmm(mm)
eq(topology, topology2)
@skipif(not HAVE_OPENMM)
def test_topology_openmm_boxes():
u = import_('simtk.unit')
traj = md.load(get_fn('1vii_sustiva_water.pdb'))
mmtop = traj.topology.to_openmm(traj=traj)
box = mmtop.getUnitCellDimensions() / u.nanometer
@skipif(not HAVE_PANDAS)
def test_topology_pandas():
topology = md.load(get_fn('native.pdb')).topology
atoms, bonds = topology.to_dataframe()
topology2 = md.Topology.from_dataframe(atoms, bonds)
eq(topology, topology2)
topology3 = md.Topology.from_dataframe(atoms) # Make sure you default arguement of None works, see issue #774
@skipif(not HAVE_PANDAS)
def test_topology_pandas_TIP4PEW():
topology = md.load(get_fn('GG-tip4pew.pdb')).topology
atoms, bonds = topology.to_dataframe()
topology2 = md.Topology.from_dataframe(atoms, bonds)
eq(topology, topology2)
def test_topology_numbers():
topology = md.load(get_fn('1bpi.pdb')).topology
assert len(list(topology.atoms)) == topology.n_atoms
assert len(list(topology.residues)) == topology.n_residues
assert all([topology.atom(i).index == i for i in range(topology.n_atoms)])
@skipif(not HAVE_PANDAS)
def test_topology_unique_elements_bpti():
traj = md.load(get_fn('bpti.pdb'))
top, bonds = traj.top.to_dataframe()
atoms = np.unique(["C", "O", "N", "H", "S"])
eq(atoms, np.unique(top.element.values))
def test_chain():
top = md.load(get_fn('bpti.pdb')).topology
chain = top.chain(0)
assert chain.n_residues == len(list(chain.residues))
atoms = list(chain.atoms)
assert chain.n_atoms == len(atoms)
for i in range(chain.n_atoms):
assert atoms[i] == chain.atom(i)
def test_residue():
top = md.load(get_fn('bpti.pdb')).topology
residue = top.residue(0)
assert len(list(residue.atoms)) == residue.n_atoms
atoms = list(residue.atoms)
for i in range(residue.n_atoms):
assert residue.atom(i) == atoms[i]
def test_nonconsective_resSeq():
t = md.load(get_fn('nonconsecutive_resSeq.pdb'))
yield lambda : eq(np.array([r.resSeq for r in t.top.residues]), np.array([1, 3, 5]))
df1 = t.top.to_dataframe()
df2 = md.Topology.from_dataframe(*df1).to_dataframe()
yield lambda : eq(df1[0], df2[0])
# round-trip through a PDB load/save loop
fd, fname = tempfile.mkstemp(suffix='.pdb')
os.close(fd)
t.save(fname)
t2 = md.load(fname)
yield lambda : eq(df1[0], t2.top.to_dataframe()[0])
os.unlink(fname)
def test_pickle():
# test pickling of topology (bug #391)
cPickle.loads(cPickle.dumps(md.load(get_fn('bpti.pdb')).topology))
def test_atoms_by_name():
top = md.load(get_fn('bpti.pdb')).topology
atoms = list(top.atoms)
for atom1, atom2 in zip(top.atoms_by_name('CA'), top.chain(0).atoms_by_name('CA')):
assert atom1 == atom2
assert atom1 in atoms
assert atom1.name == 'CA'
assert len(list(top.atoms_by_name('CA'))) == sum(1 for _ in atoms if _.name == 'CA')
assert top.residue(15).atom('CA') == [a for a in top.residue(15).atoms if a.name == 'CA'][0]
assert_raises(KeyError, lambda: top.residue(15).atom('sdfsdsdf'))
def test_select_atom_indices():
top = md.load(get_fn('native.pdb')).topology
yield lambda: eq(top.select_atom_indices('alpha'), np.array([8]))
yield lambda: eq(top.select_atom_indices('minimal'),
np.array([4, 5, 6, 8, 10, 14, 15, 16, 18]))
assert_raises(ValueError, lambda: top.select_atom_indices('sdfsdfsdf'))
@skipif(not HAVE_OPENMM)
def test_top_dataframe_openmm_roundtrip():
t = md.load(get_fn('2EQQ.pdb'))
top, bonds = t.top.to_dataframe()
t.topology = md.Topology.from_dataframe(top, bonds)
omm_top = t.top.to_openmm()
def test_n_bonds():
t = md.load(get_fn('2EQQ.pdb'))
for atom in t.top.atoms:
if atom.element.symbol == 'H':
assert atom.n_bonds == 1
elif atom.element.symbol == 'C':
assert atom.n_bonds in [3, 4]
elif atom.element.symbol == 'O':
assert atom.n_bonds in [1, 2]
def test_load_unknown_topology():
try:
md.load(get_fn('frame0.dcd'), top=get_fn('frame0.dcd'))
except IOError as e:
# we want to make sure there's a nice error message than includes
# a list of the supported topology formats.
assert all(s in str(e) for s in ('.pdb', '.psf', '.prmtop'))
else:
assert False # fail
def test_select_pairs_args():
traj = md.load(get_fn('tip3p_300K_1ATM.pdb'))
assert len(traj.top.select_pairs(selection1='name O', selection2='name O')) == 258 * (258 - 1) // 2
assert (eq(traj.top.select_pairs(selection1="(name O) or (name =~ 'H.*')", selection2="(name O) or (name =~ 'H.*')").sort(),
traj.top.select_pairs(selection1='all', selection2='all').sort()))
assert (eq(traj.top.select_pairs(selection1="name O", selection2="name H1").sort(),
traj.top.select_pairs(selection1="name H1", selection2="name O").sort()))
assert (eq(traj.top.select_pairs(selection1=range(traj.n_atoms), selection2="(name O) or (name =~ 'H.*')").sort(),
traj.top.select_pairs(selection1='all', selection2='all').sort()))
def test_to_fasta():
t = md.load(get_fn('2EQQ.pdb'))
assert t.topology.to_fasta(0) == "ENFSGGCVAGYMRTPDGRCKPTFYQLIT"
| hainm/mdtraj | mdtraj/tests/test_topology.py | Python | lgpl-2.1 | 7,339 | [
"MDTraj",
"OpenMM"
] | 80293481198b6145b697195e061ac7c10fee20ab40b7ea46db45c992e166f0bc |
# Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, visit
# https://nvlabs.github.io/stylegan2/license.html
from . import run_context
from . import submit
| google/lecam-gan | third_party/dnnlib/submission/__init__.py | Python | apache-2.0 | 274 | [
"VisIt"
] | e53c22c853c105651d0bc238126c8ed0136f9b2107221857b533c7ffef9db5fc |
# -*- coding: utf-8 -*-
# Part of the psychopy.iohub library.
# Copyright (C) 2012-2016 iSolver Software Solutions
# Distributed under the terms of the GNU General Public License (GPL).
"""iohub wintab util objects / functions for stylus, position traces,
and validation process psychopy graphics.
"""
from __future__ import division, absolute_import
import math
from collections import OrderedDict
import numpy as np
from psychopy import visual, core
from psychopy.visual.basevisual import MinimalStim
class PenPositionStim(MinimalStim):
"""Draws the current pen x,y position with graphics that represent the
pressure, z axis, and tilt data for the wintab sample used."""
def __init__(self, win, name=None, autoLog=None, depth=-10000):
self.win = win
self.depth = depth
super(PenPositionStim, self).__init__(name, autoLog)
# Pen Hovering Related
# Opaticy is changed based on pen's z axis if data for z axis
# is available. Opacity of min_opacity is used when pen is at the
# furthest hover distance (z value) supported by the device.
# Opacity of 1.0 is used when z value == 0, meaning pen is touching
# digitizer surface.
self.min_opacity = 0.0
# If z axis is supported, hover_color specifies the color of the pen
# position dot when z val > 0.
self.hover_color = 'red'
# Pen Pressure Related
# Smallest radius (in norm units) that the pen position gaussian blob
# will have, which occurs when pen pressure value is 0
self.min_size = 0.033
# As pen pressure value increases, so does position gaussian blob
# radius (in norm units). Max radius is reached when pressure is
# at max device pressure value, and is equal to min_size+size_range
self.size_range = 0.1666
# Color of pen position blob when pressure > 0.
self.touching_color = 'green'
# Pen tilt Related
# Color of line graphic used to represent the pens tilt relative to
# the digitizer surface.
self.tiltline_color = (1, 1, 0)
# Create a Gausian blob stim to use for pen position graphic
self.pen_guass = visual.PatchStim(win, units='norm', tex='none',
mask='gauss', pos=(0, 0),
size=(self.min_size,self.min_size),
color=self.hover_color,
autoLog=False,
opacity=0.0)
# Create a line stim to use for pen position graphic
self.pen_tilt_line = visual.Line(win, units='norm', start=[0, 0],
end=[0.5, 0.5],
lineColor=self.tiltline_color,
opacity=0.0)
def updateFromEvent(self, evt):
"""Update the pen position and tilt graphics based on the data from
a wintab sample event.
:param evt: iohub wintab sample event
:return:
"""
# update the pen position stim based on
# the last tablet event's data
if evt.pressure > 0:
# pen is touching tablet surface
self.pen_guass.color = self.touching_color
else:
# pen is hovering just above tablet surface
self.pen_guass.color = self.hover_color
if evt.device.axis['pressure']['supported']:
# change size of pen position blob based on samples pressure
# value
pnorm = evt.pressure / evt.device.axis['pressure']['range']
self.pen_guass.size = self.min_size + pnorm * self.size_range
# set the position of the gauss blob to be the pen x,y value converted
# to norm screen coords.
self.pen_guass.pos = evt.getNormPos()
# if supported, update all graphics opacity based on the samples z value
# otherwise opacity is always 1.0
if evt.device.axis['z']['supported']:
z = evt.device.axis['z']['range'] - evt.z
znorm = z / evt.device.axis['z']['range']
sopacity = self.min_opacity + znorm * (1.0 - self.min_opacity)
self.pen_guass.opacity = self.pen_tilt_line.opacity = sopacity
else:
self.pen_guass.opacity = self.pen_tilt_line.opacity = 1.0
# Change the tilt line start position to == pen position
self.pen_tilt_line.start = self.pen_guass.pos
# Change the tilt line end position based on samples tilt value
# If tilt is not supported, it will always return 0,0
# so no line is drawn.
t1, t2 = evt.tilt
pen_tilt_xy = 0, 0
if t1 != t2 != 0:
pen_tilt_xy = t1 * math.sin(t2), t1 * math.cos(t2)
pen_pos = self.pen_guass.pos
tiltend = (pen_pos[0] + pen_tilt_xy[0], pen_pos[1] + pen_tilt_xy[1])
self.pen_tilt_line.end = tiltend
def draw(self):
"""Draw the PenPositionStim to the opengl back buffer. This needs
to be called prior to calling win.flip() for the stim is to be
displayed.
:return: None
"""
self.pen_guass.draw()
self.pen_tilt_line.draw()
def clear(self):
"""Hide the graphics on the screen, even if they are drawn, by
setting opacity to 0.
:return: None
"""
self.pen_guass.opacity = 0.0
self.pen_tilt_line.opacity = 0.0
def __del__(self):
self.win = None
class PenTracesStim(MinimalStim):
"""Graphics representing where the pen has been moved on the digitizer
surface. Positions where sample pressure > 0 are included.
Implemented as a list of visual.ShapeStim, each representing a
single pen trace/segment (series on pen samples with pressure >
0). For improved performance, a single pen trace can have
max_trace_len points before a new ShapeStim is created and made
the 'current' pen trace'.
"""
def __init__( self, win, maxlen=256, name=None, autoLog=None, depth=-1000):
self.depth = depth
self.win = win
super(PenTracesStim, self).__init__(name, autoLog)
# A single pen trace can have at most max_trace_len points.
self.max_trace_len = maxlen
# The list of ShapeStim representing pen traces
self.pentracestim = []
# The ShapeStim state new / upcoming position points will be added to.
self.current_pentrace = None
# A list representation of the current_pentrace.vertices
self.current_points = []
# The last pen position added to a pen trace.
self.last_pos = [0, 0]
@property
def traces(self):
"""List of np arrays, each np array is the set of vertices for one
pen trace.
:return: list
"""
return [pts.vertices for pts in self.pentracestim]
def updateFromEvents(self, sample_events):
"""
Update the stim graphics based on 0 - n pen sample events.
:param sample_events:
:return: None
"""
for pevt in sample_events:
if 'FIRST_ENTER' in pevt.status:
self.end()
if pevt.pressure > 0:
lpx, lpy = self.last_pos
px, py = pevt.getPixPos(self.win)
if lpx != px or lpy != py:
if len(self.current_points) >= self.max_trace_len:
self.end()
self.append((lpx, lpy))
self.last_pos = (px, py)
self.append(self.last_pos)
else:
self.end()
def draw(self):
"""Draws each pen trace ShapeStim to the opengl back buffer. This
method must be called prior to calling win.flip() if it is to
appear on the screen.
:return: None
"""
for pts in self.pentracestim:
pts.draw()
def start(self, first_point):
"""Start a new pen trace, by creating a new ShapeStim, adding it to
the pentracestim list, and making it the current_pentrace.
:param first_point: the first point in the ShapStim being craeted.
:return: None
"""
self.end()
self.current_points.append(first_point)
self.current_pentrace = visual.ShapeStim(self.win,
units='pix',
lineWidth=2,
lineColor=(-1, -1, -1),
lineColorSpace='rgb',
vertices=self.current_points,
closeShape=False,
pos=(0, 0),
size=1,
ori=0.0,
opacity=1.0,
interpolate=True)
self.pentracestim.append(self.current_pentrace)
def end(self):
"""Stop using the current_pentrace ShapeStim. Next time a pen
sample position is added to the PenTracesStim instance, a new
ShapeStim will created and added to the pentracestim list.
:return: None
"""
self.current_pentrace = None
self.current_points = []
self.last_pos = [0, 0]
def append(self, pos):
"""Add a pen position (in pix coords) to the current_pentrace
ShapeStim vertices.
:param pos: (x,y) tuple
:return: None
"""
if self.current_pentrace is None:
self.start(pos)
else:
self.current_points.append(pos)
self.current_pentrace.vertices = self.current_points
def clear(self):
"""Remove all ShapStim being used. Next time this stim is drawn, no
pen traces will exist.
:return:
"""
self.end()
del self.pentracestim[:]
def __del__(self):
self.clear()
self.win = None
#
# Pen position validation process code
#
class ScreenPositionValidation(object):
NUM_VALID_SAMPLES_PER_TARG = 100
TARGET_TIMEOUT = 10.0
def __init__(self, win, io, target_stim=None, pos_grid=None,
display_pen_pos=True, force_quit=True, intro_title=None,
intro_text1=None, intro_text2=None, intro_target_pos=None):
"""ScreenPositionValidation is used to perform a pen position
accuracy test for an iohub wintab device.
:param win: psychopy Window instance to ude for the validation graphics
:param io: iohub connection instance
:param target_stim: None to use default, or psychopy.iohub.util.targetpositionsequence.TargetStim instance
:param pos_grid: None to use default, or psychopy.iohub.util.targetpositionsequence.PositionGrid instance
:param display_pen_pos: True to add calculated pen position graphic
:param force_quit: Not Used
:param intro_title: None to use default, str or unicode to set the text used for the introduction screen title, or an instance of psychopy.visual.TextStim
:param intro_text1: None to use default, str or unicode to set the text used for the introduction text part 1, or an instance of psychopy.visual.TextStim
:param intro_text2: None to use default, str or unicode to set the text used for the introduction text part 2, or an instance of psychopy.visual.TextStim
:param intro_target_pos: None to use default, or (x,y) position to place the target graphic on the introduction screen. (x,y) position must be specified in 'norm' coordinate space.
:return:
"""
from psychopy.iohub.util.targetpositionsequence import TargetStim, PositionGrid
self.win = win
self.io = io
self._lastPenSample = None
self._targetStim = target_stim
self._positionGrid = pos_grid
self._forceQuit = force_quit
self._displayPenPosition = display_pen_pos
# IntroScreen Graphics
intro_graphics = self._introScreenGraphics = OrderedDict()
# Title Text
title_stim = visual.TextStim(self.win, units='norm',
pos=(0, .9),
height=0.1,
text='Pen Position Validation')
if isinstance(intro_title, basestring):
title_stim.setText(intro_title)
elif isinstance(intro_title, visual.TextStim):
title_stim = intro_title
intro_graphics['title'] = title_stim
# Intro Text part 1
text1_stim = visual.TextStim(self.win, units='norm',
pos=(0, .65),
height=0.05,
text='On the following screen, '
'press the pen on the target '
'graphic when it appears, '
'as accurately as '
'possible, until the target '
'moves to a different '
'location. Then press at the '
'next target location. '
'Hold the stylus in exactly '
'the same way as you would '
'hold a pen for normal '
'handwriting.',
wrapWidth=1.25
)
if isinstance(intro_text1, basestring):
text1_stim.setText(intro_text1)
elif isinstance(intro_text1, visual.TextStim):
text1_stim = intro_text1
intro_graphics['text1'] = text1_stim
# Intro Text part 2
text2_stim = visual.TextStim(self.win, units='norm',
pos=(0, -0.2),
height=0.066,
color='green',
text='Press the pen on the above '
'target to start the '
'validation, or the ESC key '
'to skip the procedure.')
if isinstance(intro_text2, basestring):
text2_stim.setText(intro_text2)
elif isinstance(intro_text2, visual.TextStim):
text2_stim = intro_text2
intro_graphics['text2'] = text2_stim
self._penStim = None
if self._displayPenPosition:
# Validation Screen Graphics
self._penStim = visual.Circle(self.win,
radius=4,
fillColor=[255, 0, 0],
lineColor=[255, 0, 0],
lineWidth=0,
edges=8, # int(np.pi*radius),
units='pix',
lineColorSpace='rgb255',
fillColorSpace='rgb255',
opacity=0.9,
contrast=1,
interpolate=True,
autoLog=False)
if self._targetStim is None:
self._targetStim = TargetStim(win,
radius=16,
fillcolor=[64, 64, 64],
edgecolor=[192, 192, 192],
edgewidth=1,
dotcolor=[255, 255, 255],
dotradius=3,
units='pix',
colorspace='rgb255',
opacity=1.0,
contrast=1.0
)
if intro_target_pos:
self._targetStim.setPos(intro_target_pos)
intro_graphics['target'] = self._targetStim
if self._positionGrid is None:
self._positionGrid = PositionGrid(
winSize=win.monitor.getSizePix(),
shape=[
3,
3],
scale=0.9,
posList=None,
noiseStd=None,
firstposindex=0,
repeatfirstpos=True)
# IntroScreen Graphics
finished_graphics = self._finsihedScreenGraphics = OrderedDict()
finished_graphics['title'] = visual.TextStim(
self.win, units='norm', pos=(
0, .9), height=0.1, text='Validation Complete')
finished_graphics['result_status'] = visual.TextStim(
self.win, units='norm', pos=(
0, .7), height=0.07, color='blue', text='Result: {}')
finished_graphics['result_stats'] = visual.TextStim(self.win, units='norm', pos=(
0, .6), height=0.05, text='{}/{} Points Validated. Min, Max, Mean Errors: {}, {}, {}')
finished_graphics['exit_text'] = visual.TextStim(
self.win, units='norm', pos=(
0, .5), height=0.05, text='Press any key to continue...')
@property
def targetStim(self):
return self._targetStim
@targetStim.setter
def targetStim(self, ts):
self._targetStim = ts
@property
def positionGrid(self):
return self._positionGrid
@positionGrid.setter
def positionGrid(self, ts):
self._positionGrid = ts
def _enterIntroScreen(self):
kb = self.io.devices.keyboard
pen = self.io.devices.tablet
exit_screen = False
hitcount = 0
pen.reporting = True
kb.getPresses()
while exit_screen is False:
for ig in self._introScreenGraphics.values():
ig.draw()
samples = pen.getSamples()
if samples:
self._drawPenStim(samples[-1])
spos = samples[-1].getPixPos(self.win)
if samples[-1].pressure > 0 and \
self._introScreenGraphics['target'].contains(spos):
if hitcount > 10:
exit_screen = True
hitcount = hitcount + 1
else:
hitcount = 0
self.win.flip()
if 'escape' in kb.getPresses():
exit_screen = True
pen.reporting = False
return False
pen.reporting = False
return True
def _enterValidationSequence(self):
val_results = dict(target_data=dict(), avg_err=0, min_err=1000,
max_err=-1000, status='PASSED', point_count=0,
ok_point_count=0)
self._lastPenSample = None
kb = self.io.devices.keyboard
pen = self.io.devices.tablet
self._positionGrid.randomize()
pen.reporting = True
for tp in self._positionGrid:
self._targetStim.setPos(tp)
self._targetStim.draw()
targ_onset_time = self.win.flip()
pen.clearEvents()
val_sample_list = []
while len(val_sample_list) < self.NUM_VALID_SAMPLES_PER_TARG:
if core.getTime() - targ_onset_time > self.TARGET_TIMEOUT:
break
self._targetStim.draw()
samples = pen.getSamples()
for s in samples:
spos = s.getPixPos(self.win)
if s.pressure > 0 and self.targetStim.contains(spos):
dx = math.fabs(tp[0] - spos[0])
dy = math.fabs(tp[1] - spos[1])
perr = math.sqrt(dx * dx + dy * dy)
val_sample_list.append((spos[0], spos[1], perr))
else:
val_sample_list = []
if samples:
self._drawPenStim(samples[-1])
self._lastPenSample = samples[-1]
elif self._lastPenSample:
self._drawPenStim(self._lastPenSample)
self.win.flip()
tp = int(tp[0]), int(tp[1])
val_results['target_data'][tp] = None
val_results['point_count'] = val_results['point_count'] + 1
if val_sample_list:
pos_acc_array = np.asarray(val_sample_list)
serr_array = pos_acc_array[:, 2]
targ_err_stats = val_results['target_data'][tp] = dict()
targ_err_stats['samples'] = pos_acc_array
targ_err_stats['count'] = len(val_sample_list)
targ_err_stats['min'] = serr_array.min()
targ_err_stats['max'] = serr_array.max()
targ_err_stats['mean'] = serr_array.mean()
targ_err_stats['median'] = np.median(serr_array)
targ_err_stats['stdev'] = serr_array.std()
val_results['min_err'] = min(
val_results['min_err'], targ_err_stats['min'])
val_results['max_err'] = max(
val_results['max_err'], targ_err_stats['max'])
val_results['avg_err'] = val_results[
'avg_err'] + targ_err_stats['mean']
val_results['ok_point_count'] = val_results[
'ok_point_count'] + 1
else:
val_results['status'] = 'FAILED'
self._lastPenSample = None
if val_results['ok_point_count'] > 0:
val_results['avg_err'] = val_results[
'avg_err'] / val_results['ok_point_count']
pen.reporting = False
return val_results
def _enterFinishedScreen(self, results):
kb = self.io.devices.keyboard
status = results['status']
ok_point_count = results['ok_point_count']
min_err = results['min_err']
max_err = results['max_err']
avg_err = results['avg_err']
point_count = results['point_count']
self._finsihedScreenGraphics['result_status'].setText(
'Result: {}'.format(status))
self._finsihedScreenGraphics['result_stats'].setText(
'%d/%d '
'Points Validated.'
'Min, Max, Mean '
'Errors: '
'%.3f, %.3f, %.3f'
'' %
(ok_point_count, point_count, min_err, max_err, avg_err))
for ig in self._finsihedScreenGraphics.values():
ig.draw()
self.win.flip()
kb.clearEvents()
while not kb.getPresses():
for ig in self._finsihedScreenGraphics.values():
ig.draw()
self.win.flip()
def _drawPenStim(self, s):
if self._displayPenPosition:
spos = s.getPixPos(self.win)
if spos:
self._penStim.setPos(spos)
if s.pressure == 0:
self._penStim.setFillColor([255, 0, 0])
self._penStim.setLineColor([255, 0, 0])
else:
self._penStim.setFillColor([0, 0, 255])
self._penStim.setLineColor([0, 0, 255])
self._penStim.draw()
def run(self):
"""Starts the validation process. This function will not return
until the validation is complete. The validation results are
returned in dict format.
:return: dist containing validation results.
"""
continue_val = self._enterIntroScreen()
if continue_val is False:
return None
# delay about 0.5 sec before staring validation
ftime = self.win.flip()
while core.getTime() - ftime < 0.5:
self.win.flip()
self.io.clearEvents()
val_results = self._enterValidationSequence()
# delay about 0.5 sec before showing validation end screen
ftime = self.win.flip()
while core.getTime() - ftime < 0.5:
self.win.flip()
self.io.clearEvents()
self._enterFinishedScreen(val_results)
self.io.clearEvents()
self.win.flip()
return val_results
# returning None indicates to experiment that the vaidation process
# was terminated by the user
# return None
def free(self):
self.win = None
self.io = None
self._finsihedScreenGraphics.clear()
self._introScreenGraphics.clear()
self._targetStim = None
self._penStim = None
def __del__(self):
self.free()
| isolver/OpenHandWrite | distribution/getwrite/experiments/Builder_AV_Example/wintabgraphics.py | Python | gpl-3.0 | 25,146 | [
"Gaussian"
] | 3bab5073b90330394763dd1cf1751a0ee0e8db83f8dae39f6e269eec4d765850 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# models.py
#
# Copyright 2014 Gary Dalton <gary@ggis.biz>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
"""
Module models.py documentation
Sets up the models for Twenty47
"""
import datetime
from twenty47 import db, app, subscription_updated
from flask.ext.security import Security, MongoEngineUserDatastore, \
UserMixin, RoleMixin
from flask_security.forms import RegisterForm, Required, TextField
class Subscriber(db.EmbeddedDocument):
SUBSCRIBER_STATUS = ("NONE", "DENIED", "PENDING", "APPROVED")
METHOD = (('None', 'None'), ('Email', 'Email'), ('SMS Phone', 'SMS Phone'), ('Both', 'Both'))
methods = db.StringField(max_length=100, choices=METHOD, default='None')
email = db.EmailField()
email_arn = db.StringField(max_length=250)
smsPhone = db.StringField(max_length=100)
sms_arn = db.StringField(max_length=250)
enabled = db.BooleanField(default=False)
status = db.StringField(max_length=50, default="NONE", choices=SUBSCRIBER_STATUS)
class Role(db.Document, RoleMixin):
"""
Role class sets up the collection in mongoengine
"""
name = db.StringField(max_length=80, unique=True)
description = db.StringField()
created_at = db.DateTimeField(default=datetime.datetime.now(), required=True)
modified_at = db.DateTimeField()
class User(db.Document, UserMixin):
"""
User class sets up the collection in mongoengine
"""
firstName = db.StringField(max_length=200, required=True)
lastName = db.StringField(max_length=200, required=True)
password = db.StringField(max_length=255, required=True)
email = db.EmailField(required=True, unique=True)
comments = db.StringField()
created_at = db.DateTimeField(default=datetime.datetime.now(), required=True)
modified_at = db.DateTimeField()
active = db.BooleanField(default=False)
confirmed_at = db.DateTimeField()
last_login_at = db.DateTimeField()
current_login_at = db.DateTimeField()
last_login_ip = db.StringField(max_length=200)
current_login_ip = db.StringField(max_length=200)
login_count = db.IntField()
roles = db.ListField(db.ReferenceField(Role), default=[])
subscription = db.EmbeddedDocumentField(Subscriber)
# Setup Flask-Security
class ExtendedRegisterForm(RegisterForm):
"""
Extends the Registration form to include name information.
"""
firstName = TextField('First Name', [Required()])
lastName = TextField('Last Name', [Required()])
class ExtendMEUserDatastore(MongoEngineUserDatastore):
"""
Extend the MongoEgineUserDatastore to actually carry out some not
fully mplemented methods.
"""
def activate_user(self, user):
"""Activates a specified user. Returns `True` if a change was made.
:param user: The user to activate
"""
if not user.active:
user.active = True
self.put(user)
subscription_updated.send(app, user=user)
return True
return False
def deactivate_user(self, user):
"""Deactivates a specified user. Returns `True` if a change was made.
:param user: The user to deactivate
"""
if user.active:
user.active = False
self.put(user)
subscription_updated.send(app, user=user)
return True
return False
user_datastore = ExtendMEUserDatastore(db, User, Role)
security = Security(app, user_datastore,
confirm_register_form=ExtendedRegisterForm )
'''
Setup the mongo data needed for 247
'''
class IncidentType(db.Document):
name = db.StringField(max_length=200, required=True)
shortCode = db.StringField(max_length=30, required=True)
order = db.IntField()
class UnitsImpacted(db.Document):
name = db.StringField(max_length=200, required=True)
shortCode = db.StringField(max_length=30, required=True)
order = db.IntField()
class AssistanceRequested(db.Document):
name = db.StringField(max_length=200, required=True)
shortCode = db.StringField(max_length=30, required=True)
order = db.IntField()
class Dispatch(db.Document):
operator = db.StringField(max_length=200, required=True)
incidentTime = db.DateTimeField(default=datetime.datetime.now(), required=True)
dispatchTime = db.DateTimeField(default=datetime.datetime.now(), required=True)
streetAddress = db.StringField(max_length=255, required=True)
moreStreetAddress = db.StringField(max_length=255)
city = db.StringField(max_length=200)
state = db.StringField(max_length=2, default='WI', required=True)
postalCode = db.StringField(max_length=20)
county = db.StringField(max_length=200, required=True)
incidentType = db.StringField(max_length=100)
unitsImpacted = db.StringField(max_length=100)
assistanceRequested = db.StringField(max_length=100)
responderName = db.StringField(max_length=200)
responderPhone = db.StringField(max_length=200)
created_at = db.DateTimeField(default=datetime.datetime.now(), required=True)
'''
# Create a user to test with
@app.before_first_request
def create_user():
user_datastore.create_user(firstName='Gary', lastName='Dalton', username='garyroot', password='garygoat', email='gary@ggis.biz')
'''
| gary-dalton/Twenty47 | twenty47/models.py | Python | mit | 6,040 | [
"Dalton"
] | c8aceb28b7de054610f0731219ec90a014c2797e45c7903271716c61b69d748d |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import mxnet as mx
import numpy as np
import rl_data
import sym
import argparse
import logging
import os
import gym
from datetime import datetime
import time
parser = argparse.ArgumentParser(description='Traing A3C with OpenAI Gym')
parser.add_argument('--test', action='store_true', help='run testing', default=False)
parser.add_argument('--log-file', type=str, help='the name of log file')
parser.add_argument('--log-dir', type=str, default="./log", help='directory of the log file')
parser.add_argument('--model-prefix', type=str, help='the prefix of the model to load')
parser.add_argument('--save-model-prefix', type=str, help='the prefix of the model to save')
parser.add_argument('--load-epoch', type=int, help="load the model on an epoch using the model-prefix")
parser.add_argument('--kv-store', type=str, default='device', help='the kvstore type')
parser.add_argument('--gpus', type=str, help='the gpus will be used, e.g "0,1,2,3"')
parser.add_argument('--num-epochs', type=int, default=120, help='the number of training epochs')
parser.add_argument('--num-examples', type=int, default=1000000, help='the number of training examples')
parser.add_argument('--batch-size', type=int, default=32)
parser.add_argument('--input-length', type=int, default=4)
parser.add_argument('--lr', type=float, default=0.0001)
parser.add_argument('--wd', type=float, default=0)
parser.add_argument('--t-max', type=int, default=4)
parser.add_argument('--gamma', type=float, default=0.99)
parser.add_argument('--beta', type=float, default=0.08)
args = parser.parse_args()
def log_config(log_dir=None, log_file=None, prefix=None, rank=0):
reload(logging)
head = '%(asctime)-15s Node[' + str(rank) + '] %(message)s'
if log_dir:
logging.basicConfig(level=logging.DEBUG, format=head)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
if not log_file:
log_file = (prefix if prefix else '') + datetime.now().strftime('_%Y_%m_%d-%H_%M.log')
log_file = log_file.replace('/', '-')
else:
log_file = log_file
log_file_full_name = os.path.join(log_dir, log_file)
handler = logging.FileHandler(log_file_full_name, mode='w')
formatter = logging.Formatter(head)
handler.setFormatter(formatter)
logging.getLogger().addHandler(handler)
logging.info('start with arguments %s', args)
else:
logging.basicConfig(level=logging.DEBUG, format=head)
logging.info('start with arguments %s', args)
def train():
# kvstore
kv = mx.kvstore.create(args.kv_store)
model_prefix = args.model_prefix
if model_prefix is not None:
model_prefix += "-%d" % (kv.rank)
save_model_prefix = args.save_model_prefix
if save_model_prefix is None:
save_model_prefix = model_prefix
log_config(args.log_dir, args.log_file, save_model_prefix, kv.rank)
devs = mx.cpu() if args.gpus is None else [
mx.gpu(int(i)) for i in args.gpus.split(',')]
epoch_size = args.num_examples / args.batch_size
if args.kv_store == 'dist_sync':
epoch_size /= kv.num_workers
# disable kvstore for single device
if 'local' in kv.type and (
args.gpus is None or len(args.gpus.split(',')) is 1):
kv = None
# module
dataiter = rl_data.GymDataIter('Breakout-v0', args.batch_size, args.input_length, web_viz=True)
net = sym.get_symbol_atari(dataiter.act_dim)
module = mx.mod.Module(net, data_names=[d[0] for d in dataiter.provide_data], label_names=('policy_label', 'value_label'), context=devs)
module.bind(data_shapes=dataiter.provide_data,
label_shapes=[('policy_label', (args.batch_size,)), ('value_label', (args.batch_size, 1))],
grad_req='add')
# load model
if args.load_epoch is not None:
assert model_prefix is not None
_, arg_params, aux_params = mx.model.load_checkpoint(model_prefix, args.load_epoch)
else:
arg_params = aux_params = None
# save model
checkpoint = None if save_model_prefix is None else mx.callback.do_checkpoint(save_model_prefix)
init = mx.init.Mixed(['fc_value_weight|fc_policy_weight', '.*'],
[mx.init.Uniform(0.001), mx.init.Xavier(rnd_type='gaussian', factor_type="in", magnitude=2)])
module.init_params(initializer=init,
arg_params=arg_params, aux_params=aux_params)
# optimizer
module.init_optimizer(kvstore=kv, optimizer='adam',
optimizer_params={'learning_rate': args.lr, 'wd': args.wd, 'epsilon': 1e-3})
# logging
np.set_printoptions(precision=3, suppress=True)
T = 0
dataiter.reset()
score = np.zeros((args.batch_size, 1))
final_score = np.zeros((args.batch_size, 1))
for epoch in range(args.num_epochs):
if save_model_prefix:
module.save_params('%s-%04d.params'%(save_model_prefix, epoch))
for _ in range(epoch_size/args.t_max):
tic = time.time()
# clear gradients
for exe in module._exec_group.grad_arrays:
for g in exe:
g[:] = 0
S, A, V, r, D = [], [], [], [], []
for t in range(args.t_max + 1):
data = dataiter.data()
module.forward(mx.io.DataBatch(data=data, label=None), is_train=False)
act, _, val = module.get_outputs()
V.append(val.asnumpy())
if t < args.t_max:
act = act.asnumpy()
act = [np.random.choice(dataiter.act_dim, p=act[i]) for i in range(act.shape[0])]
reward, done = dataiter.act(act)
S.append(data)
A.append(act)
r.append(reward.reshape((-1, 1)))
D.append(done.reshape((-1, 1)))
err = 0
R = V[args.t_max]
for i in reversed(range(args.t_max)):
R = r[i] + args.gamma * (1 - D[i]) * R
adv = np.tile(R - V[i], (1, dataiter.act_dim))
batch = mx.io.DataBatch(data=S[i], label=[mx.nd.array(A[i]), mx.nd.array(R)])
module.forward(batch, is_train=True)
pi = module.get_outputs()[1]
h = -args.beta*(mx.nd.log(pi+1e-7)*pi)
out_acts = np.amax(pi.asnumpy(), 1)
out_acts=np.reshape(out_acts,(-1,1))
out_acts_tile=np.tile(-np.log(out_acts + 1e-7),(1, dataiter.act_dim))
module.backward([mx.nd.array(out_acts_tile*adv), h])
print('pi', pi[0].asnumpy())
print('h', h[0].asnumpy())
err += (adv**2).mean()
score += r[i]
final_score *= (1-D[i])
final_score += score * D[i]
score *= 1-D[i]
T += D[i].sum()
module.update()
logging.info('fps: %f err: %f score: %f final: %f T: %f'%(args.batch_size/(time.time()-tic), err/args.t_max, score.mean(), final_score.mean(), T))
print(score.squeeze())
print(final_score.squeeze())
def test():
log_config()
devs = mx.cpu() if args.gpus is None else [
mx.gpu(int(i)) for i in args.gpus.split(',')]
# module
dataiter = robo_data.RobosimsDataIter('scenes', args.batch_size, args.input_length, web_viz=True)
print(dataiter.provide_data)
net = sym.get_symbol_thor(dataiter.act_dim)
module = mx.mod.Module(net, data_names=[d[0] for d in dataiter.provide_data], label_names=('policy_label', 'value_label'), context=devs)
module.bind(data_shapes=dataiter.provide_data,
label_shapes=[('policy_label', (args.batch_size,)), ('value_label', (args.batch_size, 1))],
for_training=False)
# load model
assert args.load_epoch is not None
assert args.model_prefix is not None
module.load_params('%s-%04d.params'%(args.model_prefix, args.load_epoch))
N = args.num_epochs * args.num_examples / args.batch_size
R = 0
T = 1e-20
score = np.zeros((args.batch_size,))
for t in range(N):
dataiter.clear_history()
data = dataiter.next()
module.forward(data, is_train=False)
act = module.get_outputs()[0].asnumpy()
act = [np.random.choice(dataiter.act_dim, p=act[i]) for i in range(act.shape[0])]
dataiter.act(act)
time.sleep(0.05)
_, reward, _, done = dataiter.history[0]
T += done.sum()
score += reward
R += (done*score).sum()
score *= (1-done)
if t % 100 == 0:
logging.info('n %d score: %f T: %f'%(t, R/T, T))
if __name__ == '__main__':
if args.test:
test()
else:
train()
| wangyum/mxnet | example/reinforcement-learning/a3c/a3c.py | Python | apache-2.0 | 9,642 | [
"Gaussian"
] | 15b341d9c0a7f779133548819465724bb222d11104a63c59d937991c6058e6ac |
# Orca
#
# Copyright 2016 Igalia, S.L.
# Author: Joanmarie Diggs <jdiggs@igalia.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2016 Igalia, S.L."
__license__ = "LGPL"
from orca import speech_generator
class SpeechGenerator(speech_generator.SpeechGenerator):
def __init__(self, script):
super().__init__(script)
def _generateDescription(self, obj, **args):
# The text in the description is the same as the text in the page
# tab and similar to (and sometimes the same as) the prompt.
return []
| GNOME/orca | src/orca/scripts/terminal/speech_generator.py | Python | lgpl-2.1 | 1,335 | [
"ORCA"
] | 5d746b615561ca4e4ba9eefb236c9f7c2d8a138695195539cc3031ea8c01248e |
# Time-stamp: <2019-09-25 10:15:20 taoliu>
"""Module Description
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD License (see the file LICENSE included with
the distribution).
"""
# ------------------------------------
# python modules
# ------------------------------------
import sys
import os
import re
import logging
from argparse import ArgumentError
from subprocess import Popen, PIPE
from math import log
from MACS2.IO.Parser import BEDParser, ELANDResultParser, ELANDMultiParser, \
ELANDExportParser, SAMParser, BAMParser, BAMPEParser,\
BEDPEParser, BowtieParser, guess_parser
# ------------------------------------
# constants
# ------------------------------------
efgsize = {"hs":2.7e9,
"mm":1.87e9,
"ce":9e7,
"dm":1.2e8}
# ------------------------------------
# Misc functions
# ------------------------------------
def opt_validate ( options ):
"""Validate options from a OptParser object.
Ret: Validated options object.
"""
# gsize
try:
options.gsize = efgsize[options.gsize]
except:
try:
options.gsize = float(options.gsize)
except:
logging.error("Error when interpreting --gsize option: %s" % options.gsize)
logging.error("Available shortcuts of effective genome sizes are %s" % ",".join(list(efgsize.keys())))
sys.exit(1)
# format
options.gzip_flag = False # if the input is gzip file
options.format = options.format.upper()
if options.format == "ELAND":
options.parser = ELANDResultParser
elif options.format == "BED":
options.parser = BEDParser
elif options.format == "ELANDMULTI":
options.parser = ELANDMultiParser
elif options.format == "ELANDEXPORT":
options.parser = ELANDExportParser
elif options.format == "SAM":
options.parser = SAMParser
elif options.format == "BAM":
options.parser = BAMParser
options.gzip_flag = True
elif options.format == "BAMPE":
options.parser = BAMPEParser
options.gzip_flag = True
options.nomodel = True
elif options.format == "BEDPE":
options.parser = BEDPEParser
options.nomodel = True
elif options.format == "BOWTIE":
options.parser = BowtieParser
elif options.format == "AUTO":
options.parser = guess_parser
else:
logging.error("Format \"%s\" cannot be recognized!" % (options.format))
sys.exit(1)
# duplicate reads
if options.keepduplicates != "auto" and options.keepduplicates != "all":
if not options.keepduplicates.isdigit():
logging.error("--keep-dup should be 'auto', 'all' or an integer!")
sys.exit(1)
# shiftsize>0
#if options.shiftsize: # only if --shiftsize is set, it's true
# options.extsize = 2 * options.shiftsize
#else: # if --shiftsize is not set
# options.shiftsize = options.extsize / 2
if options.extsize < 1 :
logging.error("--extsize must >= 1!")
sys.exit(1)
# refine_peaks, call_summits can't be combined with --broad
#if options.broad and (options.refine_peaks or options.call_summits):
# logging.error("--broad can't be combined with --refine-peaks or --call-summits!")
# sys.exit(1)
if options.broad and options.call_summits:
logging.error("--broad can't be combined with --call-summits!")
sys.exit(1)
if options.pvalue:
# if set, ignore qvalue cutoff
options.log_qvalue = None
options.log_pvalue = log(options.pvalue,10)*-1
else:
options.log_qvalue = log(options.qvalue,10)*-1
options.log_pvalue = None
if options.broad:
options.log_broadcutoff = log(options.broadcutoff,10)*-1
# uppercase the format string
options.format = options.format.upper()
# d_min is non-negative
if options.d_min < 0:
logging.error("Minimum fragment size shouldn't be negative!" % options.d_min)
sys.exit(1)
# upper and lower mfold
options.lmfold = options.mfold[0]
options.umfold = options.mfold[1]
if options.lmfold > options.umfold:
logging.error("Upper limit of mfold should be greater than lower limit!" % options.mfold)
sys.exit(1)
# output filenames
options.peakxls = os.path.join( options.outdir, options.name+"_peaks.xls" )
options.peakbed = os.path.join( options.outdir, options.name+"_peaks.bed" )
options.peakNarrowPeak = os.path.join( options.outdir, options.name+"_peaks.narrowPeak" )
options.peakBroadPeak = os.path.join( options.outdir, options.name+"_peaks.broadPeak" )
options.peakGappedPeak = os.path.join( options.outdir, options.name+"_peaks.gappedPeak" )
options.summitbed = os.path.join( options.outdir, options.name+"_summits.bed" )
options.bdg_treat = os.path.join( options.outdir, options.name+"_treat_pileup.bdg" )
options.bdg_control= os.path.join( options.outdir, options.name+"_control_lambda.bdg" )
if options.cutoff_analysis:
options.cutoff_analysis_file = os.path.join( options.outdir, options.name+"_cutoff_analysis.txt" )
else:
options.cutoff_analysis_file = "None"
#options.negxls = os.path.join( options.name+"_negative_peaks.xls" )
#options.diagxls = os.path.join( options.name+"_diag.xls" )
options.modelR = os.path.join( options.outdir, options.name+"_model.r" )
#options.pqtable = os.path.join( options.outdir, options.name+"_pq_table.txt" )
# logging object
logging.basicConfig(level=(4-options.verbose)*10,
format='%(levelname)-5s @ %(asctime)s: %(message)s ',
datefmt='%a, %d %b %Y %H:%M:%S',
stream=sys.stderr,
filemode="w"
)
options.error = logging.critical # function alias
options.warn = logging.warning
options.debug = logging.debug
options.info = logging.info
options.argtxt = "\n".join((
"# Command line: %s" % " ".join(sys.argv[1:]),\
"# ARGUMENTS LIST:",\
"# name = %s" % (options.name),\
"# format = %s" % (options.format),\
"# ChIP-seq file = %s" % (options.tfile),\
"# control file = %s" % (options.cfile),\
"# effective genome size = %.2e" % (options.gsize),\
#"# tag size = %d" % (options.tsize),\
"# band width = %d" % (options.bw),\
"# model fold = %s\n" % (options.mfold),\
))
if options.pvalue:
if options.broad:
options.argtxt += "# pvalue cutoff for narrow/strong regions = %.2e\n" % (options.pvalue)
options.argtxt += "# pvalue cutoff for broad/weak regions = %.2e\n" % (options.broadcutoff)
options.argtxt += "# qvalue will not be calculated and reported as -1 in the final output.\n"
else:
options.argtxt += "# pvalue cutoff = %.2e\n" % (options.pvalue)
options.argtxt += "# qvalue will not be calculated and reported as -1 in the final output.\n"
else:
if options.broad:
options.argtxt += "# qvalue cutoff for narrow/strong regions = %.2e\n" % (options.qvalue)
options.argtxt += "# qvalue cutoff for broad/weak regions = %.2e\n" % (options.broadcutoff)
else:
options.argtxt += "# qvalue cutoff = %.2e\n" % (options.qvalue)
if options.maxgap:
options.argtxt += "# The maximum gap between significant sites = %d\n" % options.maxgap
else:
options.argtxt += "# The maximum gap between significant sites is assigned as the read length/tag size.\n"
if options.minlen:
options.argtxt += "# The minimum length of peaks = %d\n" % options.minlen
else:
options.argtxt += "# The minimum length of peaks is assigned as the predicted fragment length \"d\".\n"
if options.downsample:
options.argtxt += "# Larger dataset will be randomly sampled towards smaller dataset.\n"
if options.seed >= 0:
options.argtxt += "# Random seed has been set as: %d\n" % options.seed
else:
if options.scaleto == "large":
options.argtxt += "# Smaller dataset will be scaled towards larger dataset.\n"
else:
options.argtxt += "# Larger dataset will be scaled towards smaller dataset.\n"
if options.ratio != 1.0:
options.argtxt += "# Using a custom scaling factor: %.2e\n" % (options.ratio)
if options.cfile:
options.argtxt += "# Range for calculating regional lambda is: %d bps and %d bps\n" % (options.smalllocal,options.largelocal)
else:
options.argtxt += "# Range for calculating regional lambda is: %d bps\n" % (options.largelocal)
if options.broad:
options.argtxt += "# Broad region calling is on\n"
else:
options.argtxt += "# Broad region calling is off\n"
if options.fecutoff != 1.0:
options.argtxt += "# Additional cutoff on fold-enrichment is: %.2f\n" % (options.fecutoff)
if options.format in ["BAMPE", "BEDPE"]:
# neutralize SHIFT
options.shift = 0
options.argtxt += "# Paired-End mode is on\n"
else:
options.argtxt += "# Paired-End mode is off\n"
#if options.refine_peaks:
# options.argtxt += "# Refining peak for read balance is on\n"
if options.call_summits:
options.argtxt += "# Searching for subpeak summits is on\n"
if options.do_SPMR and options.store_bdg:
options.argtxt += "# MACS will save fragment pileup signal per million reads\n"
return options
def diff_opt_validate ( options ):
"""Validate options from a OptParser object.
Ret: Validated options object.
"""
# format
options.gzip_flag = False # if the input is gzip file
# options.format = options.format.upper()
# fox this stuff
# if True: pass
# elif options.format == "AUTO":
# options.parser = guess_parser
# else:
# logging.error("Format \"%s\" cannot be recognized!" % (options.format))
# sys.exit(1)
if options.peaks_pvalue:
# if set, ignore qvalue cutoff
options.peaks_log_qvalue = None
options.peaks_log_pvalue = log(options.peaks_pvalue,10)*-1
options.track_score_method = 'p'
else:
options.peaks_log_qvalue = log(options.peaks_qvalue,10)*-1
options.peaks_log_pvalue = None
options.track_score_method = 'q'
if options.diff_pvalue:
# if set, ignore qvalue cutoff
options.log_qvalue = None
options.log_pvalue = log(options.diff_pvalue,10)*-1
options.score_method = 'p'
else:
options.log_qvalue = log(options.diff_qvalue,10)*-1
options.log_pvalue = None
options.score_method = 'q'
# output filenames
options.peakxls = options.name+"_diffpeaks.xls"
options.peakbed = options.name+"_diffpeaks.bed"
options.peak1xls = options.name+"_diffpeaks_by_peaks1.xls"
options.peak2xls = options.name+"_diffpeaks_by_peaks2.xls"
options.bdglogLR = options.name+"_logLR.bdg"
options.bdgpvalue = options.name+"_logLR.bdg"
options.bdglogFC = options.name+"_logLR.bdg"
options.call_peaks = True
if not (options.peaks1 == '' or options.peaks2 == ''):
if options.peaks1 == '':
raise ArgumentError('peaks1', 'Must specify both peaks1 and peaks2, or neither (to call peaks again)')
elif options.peaks2 == '':
raise ArgumentError('peaks2', 'Must specify both peaks1 and peaks2, or neither (to call peaks again)')
options.call_peaks = False
options.argtxt = "\n".join((
"# ARGUMENTS LIST:",\
"# name = %s" % (options.name),\
# "# format = %s" % (options.format),\
"# ChIP-seq file 1 = %s" % (options.t1bdg),\
"# control file 1 = %s" % (options.c1bdg),\
"# ChIP-seq file 2 = %s" % (options.t2bdg),\
"# control file 2 = %s" % (options.c2bdg),\
"# Peaks, condition 1 = %s" % (options.peaks1),\
"# Peaks, condition 2 = %s" % (options.peaks2),\
""
))
else:
options.argtxt = "\n".join((
"# ARGUMENTS LIST:",\
"# name = %s" % (options.name),\
# "# format = %s" % (options.format),\
"# ChIP-seq file 1 = %s" % (options.t1bdg),\
"# control file 1 = %s" % (options.c1bdg),\
"# ChIP-seq file 2 = %s" % (options.t2bdg),\
"# control file 2 = %s" % (options.c2bdg),\
""
))
if options.peaks_pvalue:
options.argtxt += "# treat/control -log10(pvalue) cutoff = %.2e\n" % (options.peaks_log_pvalue)
options.argtxt += "# treat/control -log10(qvalue) will not be calculated and reported as -1 in the final output.\n"
else:
options.argtxt += "# treat/control -log10(qvalue) cutoff = %.2e\n" % (options.peaks_log_qvalue)
if options.diff_pvalue:
options.argtxt += "# differential pvalue cutoff = %.2e\n" % (options.log_pvalue)
options.argtxt += "# differential qvalue will not be calculated and reported as -1 in the final output.\n"
else:
options.argtxt += "# differential qvalue cutoff = %.2e\n" % (options.log_qvalue)
# logging object
logging.basicConfig(level=(4-options.verbose)*10,
format='%(levelname)-5s @ %(asctime)s: %(message)s ',
datefmt='%a, %d %b %Y %H:%M:%S',
stream=sys.stderr,
filemode="w"
)
options.error = logging.critical # function alias
options.warn = logging.warning
options.debug = logging.debug
options.info = logging.info
return options
def opt_validate_filterdup ( options ):
"""Validate options from a OptParser object.
Ret: Validated options object.
"""
# gsize
try:
options.gsize = efgsize[options.gsize]
except:
try:
options.gsize = float(options.gsize)
except:
logging.error("Error when interpreting --gsize option: %s" % options.gsize)
logging.error("Available shortcuts of effective genome sizes are %s" % ",".join(list(efgsize.keys())))
sys.exit(1)
# format
options.gzip_flag = False # if the input is gzip file
options.format = options.format.upper()
if options.format == "ELAND":
options.parser = ELANDResultParser
elif options.format == "BED":
options.parser = BEDParser
elif options.format == "BEDPE":
options.parser = BEDPEParser
elif options.format == "ELANDMULTI":
options.parser = ELANDMultiParser
elif options.format == "ELANDEXPORT":
options.parser = ELANDExportParser
elif options.format == "SAM":
options.parser = SAMParser
elif options.format == "BAM":
options.parser = BAMParser
options.gzip_flag = True
elif options.format == "BOWTIE":
options.parser = BowtieParser
elif options.format == "BAMPE":
options.parser = BAMPEParser
options.gzip_flag = True
elif options.format == "BEDPE":
options.parser = BEDPEParser
elif options.format == "AUTO":
options.parser = guess_parser
else:
logging.error("Format \"%s\" cannot be recognized!" % (options.format))
sys.exit(1)
# duplicate reads
if options.keepduplicates != "auto" and options.keepduplicates != "all":
if not options.keepduplicates.isdigit():
logging.error("--keep-dup should be 'auto', 'all' or an integer!")
sys.exit(1)
# uppercase the format string
options.format = options.format.upper()
# logging object
logging.basicConfig(level=(4-options.verbose)*10,
format='%(levelname)-5s @ %(asctime)s: %(message)s ',
datefmt='%a, %d %b %Y %H:%M:%S',
stream=sys.stderr,
filemode="w"
)
options.error = logging.critical # function alias
options.warn = logging.warning
options.debug = logging.debug
options.info = logging.info
return options
def opt_validate_randsample ( options ):
"""Validate options from a OptParser object.
Ret: Validated options object.
"""
# format
options.gzip_flag = False # if the input is gzip file
options.format = options.format.upper()
if options.format == "ELAND":
options.parser = ELANDResultParser
elif options.format == "BED":
options.parser = BEDParser
elif options.format == "ELANDMULTI":
options.parser = ELANDMultiParser
elif options.format == "ELANDEXPORT":
options.parser = ELANDExportParser
elif options.format == "SAM":
options.parser = SAMParser
elif options.format == "BAM":
options.parser = BAMParser
options.gzip_flag = True
elif options.format == "BOWTIE":
options.parser = BowtieParser
elif options.format == "BAMPE":
options.parser = BAMPEParser
options.gzip_flag = True
elif options.format == "BEDPE":
options.parser = BEDPEParser
elif options.format == "AUTO":
options.parser = guess_parser
else:
logging.error("Format \"%s\" cannot be recognized!" % (options.format))
sys.exit(1)
# uppercase the format string
options.format = options.format.upper()
# percentage or number
if options.percentage:
if options.percentage > 100.0:
logging.error("Percentage can't be bigger than 100.0. Please check your options and retry!")
sys.exit(1)
elif options.number:
if options.number <= 0:
logging.error("Number of tags can't be smaller than or equal to 0. Please check your options and retry!")
sys.exit(1)
# logging object
logging.basicConfig(level=(4-options.verbose)*10,
format='%(levelname)-5s @ %(asctime)s: %(message)s ',
datefmt='%a, %d %b %Y %H:%M:%S',
stream=sys.stderr,
filemode="w"
)
options.error = logging.critical # function alias
options.warn = logging.warning
options.debug = logging.debug
options.info = logging.info
return options
def opt_validate_refinepeak ( options ):
"""Validate options from a OptParser object.
Ret: Validated options object.
"""
# format
options.gzip_flag = False # if the input is gzip file
options.format = options.format.upper()
if options.format == "ELAND":
options.parser = ELANDResultParser
elif options.format == "BED":
options.parser = BEDParser
elif options.format == "ELANDMULTI":
options.parser = ELANDMultiParser
elif options.format == "ELANDEXPORT":
options.parser = ELANDExportParser
elif options.format == "SAM":
options.parser = SAMParser
elif options.format == "BAM":
options.parser = BAMParser
options.gzip_flag = True
elif options.format == "BOWTIE":
options.parser = BowtieParser
elif options.format == "AUTO":
options.parser = guess_parser
else:
logging.error("Format \"%s\" cannot be recognized!" % (options.format))
sys.exit(1)
# uppercase the format string
options.format = options.format.upper()
# logging object
logging.basicConfig(level=(4-options.verbose)*10,
format='%(levelname)-5s @ %(asctime)s: %(message)s ',
datefmt='%a, %d %b %Y %H:%M:%S',
stream=sys.stderr,
filemode="w"
)
options.error = logging.critical # function alias
options.warn = logging.warning
options.debug = logging.debug
options.info = logging.info
return options
def opt_validate_predictd ( options ):
"""Validate options from a OptParser object.
Ret: Validated options object.
"""
# gsize
try:
options.gsize = efgsize[options.gsize]
except:
try:
options.gsize = float(options.gsize)
except:
logging.error("Error when interpreting --gsize option: %s" % options.gsize)
logging.error("Available shortcuts of effective genome sizes are %s" % ",".join(list(efgsize.keys())))
sys.exit(1)
# format
options.gzip_flag = False # if the input is gzip file
options.format = options.format.upper()
if options.format == "ELAND":
options.parser = ELANDResultParser
elif options.format == "BED":
options.parser = BEDParser
elif options.format == "ELANDMULTI":
options.parser = ELANDMultiParser
elif options.format == "ELANDEXPORT":
options.parser = ELANDExportParser
elif options.format == "SAM":
options.parser = SAMParser
elif options.format == "BAM":
options.parser = BAMParser
options.gzip_flag = True
elif options.format == "BAMPE":
options.parser = BAMPEParser
options.gzip_flag = True
options.nomodel = True
elif options.format == "BEDPE":
options.parser = BEDPEParser
options.nomodel = True
elif options.format == "BOWTIE":
options.parser = BowtieParser
elif options.format == "AUTO":
options.parser = guess_parser
else:
logging.error("Format \"%s\" cannot be recognized!" % (options.format))
sys.exit(1)
# uppercase the format string
options.format = options.format.upper()
# d_min is non-negative
if options.d_min < 0:
logging.error("Minimum fragment size shouldn't be negative!" % options.d_min)
sys.exit(1)
# upper and lower mfold
options.lmfold = options.mfold[0]
options.umfold = options.mfold[1]
if options.lmfold > options.umfold:
logging.error("Upper limit of mfold should be greater than lower limit!" % options.mfold)
sys.exit(1)
options.modelR = os.path.join( options.outdir, options.rfile )
# logging object
logging.basicConfig(level=(4-options.verbose)*10,
format='%(levelname)-5s @ %(asctime)s: %(message)s ',
datefmt='%a, %d %b %Y %H:%M:%S',
stream=sys.stderr,
filemode="w"
)
options.error = logging.critical # function alias
options.warn = logging.warning
options.debug = logging.debug
options.info = logging.info
return options
def opt_validate_pileup ( options ):
"""Validate options from a OptParser object.
Ret: Validated options object.
"""
# format
options.gzip_flag = False # if the input is gzip file
options.format = options.format.upper()
if options.format == "ELAND":
options.parser = ELANDResultParser
elif options.format == "BED":
options.parser = BEDParser
elif options.format == "ELANDMULTI":
options.parser = ELANDMultiParser
elif options.format == "ELANDEXPORT":
options.parser = ELANDExportParser
elif options.format == "SAM":
options.parser = SAMParser
elif options.format == "BAM":
options.parser = BAMParser
options.gzip_flag = True
elif options.format == "BOWTIE":
options.parser = BowtieParser
elif options.format == "BAMPE":
options.parser = BAMPEParser
options.gzip_flag = True
elif options.format == "BEDPE":
options.parser = BEDPEParser
else:
logging.error("Format \"%s\" cannot be recognized!" % (options.format))
sys.exit(1)
# uppercase the format string
options.format = options.format.upper()
# logging object
logging.basicConfig(level=(4-options.verbose)*10,
format='%(levelname)-5s @ %(asctime)s: %(message)s ',
datefmt='%a, %d %b %Y %H:%M:%S',
stream=sys.stderr,
filemode="w"
)
options.error = logging.critical # function alias
options.warn = logging.warning
options.debug = logging.debug
options.info = logging.info
# extsize
if options.extsize <= 0 :
logging.error("--extsize must > 0!")
sys.exit(1)
return options
def opt_validate_bdgcmp ( options ):
"""Validate options from a OptParser object.
Ret: Validated options object.
"""
# logging object
logging.basicConfig(level=20,
format='%(levelname)-5s @ %(asctime)s: %(message)s ',
datefmt='%a, %d %b %Y %H:%M:%S',
stream=sys.stderr,
filemode="w"
)
options.error = logging.critical # function alias
options.warn = logging.warning
options.debug = logging.debug
options.info = logging.info
# methods should be valid:
for method in set(options.method):
if method not in [ 'ppois', 'qpois', 'subtract', 'logFE', 'FE', 'logLR', 'slogLR', 'max' ]:
logging.error( "Invalid method: %s" % method )
sys.exit( 1 )
# # of --ofile must == # of -m
if options.ofile:
if len(options.method) != len(options.ofile):
logging.error("The number and the order of arguments for --ofile must be the same as for -m.")
sys.exit(1)
return options
def opt_validate_cmbreps ( options ):
"""Validate options from a OptParser object.
Ret: Validated options object.
"""
# logging object
logging.basicConfig(level=20,
format='%(levelname)-5s @ %(asctime)s: %(message)s ',
datefmt='%a, %d %b %Y %H:%M:%S',
stream=sys.stderr,
filemode="w"
)
options.error = logging.critical # function alias
options.warn = logging.warning
options.debug = logging.debug
options.info = logging.info
# methods should be valid:
if options.method not in [ 'fisher', 'max', 'mean']:
logging.error( "Invalid method: %s" % options.method )
sys.exit( 1 )
if len( options.ifile ) < 2:
logging.error("Combining replicates needs at least two replicates!")
sys.exit( 1 )
# # of -i must == # of -w
# if not options.weights:
# options.weights = [ 1.0 ] * len( options.ifile )
# if len( options.ifile ) != len( options.weights ):
# logging.error("Must provide same number of weights as number of input files.")
# sys.exit( 1 )
# if options.method == "fisher" and len( options.ifile ) > 3:
# logging.error("NOT IMPLEMENTED! Can't combine more than 3 replicates using Fisher's method.")
# sys.exit( 1 )
return options
def opt_validate_bdgopt ( options ):
"""Validate options from a OptParser object.
Ret: Validated options object.
"""
# logging object
logging.basicConfig(level=20,
format='%(levelname)-5s @ %(asctime)s: %(message)s ',
datefmt='%a, %d %b %Y %H:%M:%S',
stream=sys.stderr,
filemode="w"
)
options.error = logging.critical # function alias
options.warn = logging.warning
options.debug = logging.debug
options.info = logging.info
# methods should be valid:
if options.method.lower() not in [ 'multiply', 'add', 'p2q', 'max', 'min']:
logging.error( "Invalid method: %s" % options.method )
sys.exit( 1 )
if options.method.lower() in [ 'multiply', 'add' ] and not options.extraparam:
logging.error( "Need EXTRAPARAM for method multiply or add!")
sys.exit( 1 )
return options
| taoliu/MACS | MACS2/OptValidator.py | Python | bsd-3-clause | 28,454 | [
"Bowtie"
] | 56da4bd230c4b8a6a58f5f8bbd44e85fcbbaa754ef28d42c9ea59d6c233b144a |
"""Support integration with Illumina sequencer machines.
"""
import glob
import json
import os
import operator
import subprocess
from xml.etree.ElementTree import ElementTree
import logbook
import requests
import yaml
from bcbio import utils
from bcbio.log import setup_local_logging
from bcbio.illumina import demultiplex, samplesheet, transfer
from bcbio.galaxy import nglims
# ## bcbio-nextgen integration
def check_and_postprocess(args):
"""Check for newly dumped sequencer output, post-processing and transferring.
"""
with open(args.process_config) as in_handle:
config = yaml.safe_load(in_handle)
setup_local_logging(config)
for dname in _find_unprocessed(config):
lane_details = nglims.get_runinfo(config["galaxy_url"], config["galaxy_apikey"], dname,
utils.get_in(config, ("process", "storedir")))
if isinstance(lane_details, dict) and "error" in lane_details:
print "Flowcell not found in Galaxy: %s" % lane_details
else:
lane_details = _tweak_lane(lane_details, dname)
fcid_ss = samplesheet.from_flowcell(dname, lane_details)
_update_reported(config["msg_db"], dname)
fastq_dir = demultiplex.run_bcl2fastq(dname, fcid_ss, config)
bcbio_config, ready_fastq_dir = nglims.prep_samples_and_config(dname, lane_details, fastq_dir, config)
transfer.copy_flowcell(dname, ready_fastq_dir, bcbio_config, config)
_start_processing(dname, bcbio_config, config)
def _tweak_lane(lane_details, dname):
"""Potentially tweak lane information to handle custom processing, reading a lane_config.yaml file.
"""
tweak_config_file = os.path.join(dname, "lane_config.yaml")
if os.path.exists(tweak_config_file):
with open(tweak_config_file) as in_handle:
tweak_config = yaml.safe_load(in_handle)
if tweak_config.get("uniquify_lanes"):
out = []
for ld in lane_details:
ld["name"] = "%s-%s" % (ld["name"], ld["lane"])
out.append(ld)
return out
return lane_details
def _remap_dirname(local, remote):
"""Remap directory names from local to remote.
"""
def do(x):
return x.replace(local, remote, 1)
return do
def _start_processing(dname, sample_file, config):
"""Initiate processing: on a remote server or locally on a cluster.
"""
to_remote = _remap_dirname(dname, os.path.join(utils.get_in(config, ("process", "dir")),
os.path.basename(dname)))
args = {"work_dir": to_remote(os.path.join(dname, "analysis")),
"run_config": to_remote(sample_file),
"fc_dir": to_remote(dname)}
# call a remote server
if utils.get_in(config, ("process", "server")):
print "%s/run?args=%s" % (utils.get_in(config, ("process", "server")), json.dumps(args))
requests.get(url="%s/run" % utils.get_in(config, ("process", "server")),
params={"args": json.dumps(args)})
# submit to a cluster scheduler
elif "submit_cmd" in config["process"] and "bcbio_batch" in config["process"]:
with utils.chdir(utils.safe_makedir(args["work_dir"])):
batch_script = "submit_bcbio.sh"
with open(batch_script, "w") as out_handle:
out_handle.write(config["process"]["bcbio_batch"].format(fcdir=args["fc_dir"],
run_config=args["run_config"]))
submit_cmd = utils.get_in(config, ("process", "submit_cmd"))
subprocess.check_call(submit_cmd.format(batch_script=batch_script), shell=True)
else:
raise ValueError("Unexpected processing approach: %s" % config["process"])
def add_subparser(subparsers):
"""Add command line arguments for post-processing sequencer results.
"""
parser = subparsers.add_parser("sequencer", help="Post process results from a sequencer.")
parser.add_argument("process_config", help="YAML file specifying sequencer details for post-processing.")
return parser
# ## Dump directory processing
def _find_unprocessed(config):
"""Find any finished directories that have not been processed.
"""
reported = _read_reported(config["msg_db"])
for dname in _get_directories(config):
if os.path.isdir(dname) and dname not in reported:
if _is_finished_dumping(dname):
yield dname
def _get_directories(config):
for directory in config["dump_directories"]:
for dname in sorted(glob.glob(os.path.join(directory, "*[Aa]*[Xx][Xx]"))):
if os.path.isdir(dname):
yield dname
def _is_finished_dumping(directory):
"""Determine if the sequencing directory has all files.
The final checkpoint file will differ depending if we are a
single or paired end run.
"""
#if _is_finished_dumping_checkpoint(directory):
# return True
# Check final output files; handles both HiSeq and GAII
run_info = os.path.join(directory, "RunInfo.xml")
hi_seq_checkpoint = "Basecalling_Netcopy_complete_Read%s.txt" % \
_expected_reads(run_info)
to_check = ["Basecalling_Netcopy_complete_SINGLEREAD.txt",
"Basecalling_Netcopy_complete_READ2.txt",
hi_seq_checkpoint]
return reduce(operator.or_,
[os.path.exists(os.path.join(directory, f)) for f in to_check])
def _is_finished_dumping_checkpoint(directory):
"""Recent versions of RTA (1.10 or better), write the complete file.
This is the most straightforward source but as of 1.10 still does not
work correctly as the file will be created at the end of Read 1 even
if there are multiple reads.
"""
check_file = os.path.join(directory, "Basecalling_Netcopy_complete.txt")
check_v1, check_v2 = (1, 10)
if os.path.exists(check_file):
with open(check_file) as in_handle:
line = in_handle.readline().strip()
if line:
version = line.split()[-1]
v1, v2 = [float(v) for v in version.split(".")[:2]]
if ((v1 > check_v1) or (v1 == check_v1 and v2 >= check_v2)):
return True
def _expected_reads(run_info_file):
"""Parse the number of expected reads from the RunInfo.xml file.
"""
reads = []
if os.path.exists(run_info_file):
tree = ElementTree()
tree.parse(run_info_file)
read_elem = tree.find("Run/Reads")
reads = read_elem.findall("Read")
return len(reads)
# ## Flat file of processed directories
def _read_reported(msg_db):
"""Retrieve a list of directories previous reported.
"""
reported = []
if os.path.exists(msg_db):
with open(msg_db) as in_handle:
for line in in_handle:
reported.append(line.strip())
return reported
def _update_reported(msg_db, new_dname):
"""Add a new directory to the database of reported messages.
"""
with open(msg_db, "a") as out_handle:
out_handle.write("%s\n" % new_dname)
| Cyberbio-Lab/bcbio-nextgen | bcbio/illumina/machine.py | Python | mit | 7,179 | [
"Galaxy"
] | 15ad1afdc958734fdc1b4ad81a54715840de756bd749c4dbe9991b6099a8b72e |
#Twitter Profiler app. This is a simple script to configure the Twitter API
import tweepy
import time #https://github.com/tweepy/tweepy
# Twitter API credentials. Get yours from apps.twitter.com. Twitter acct rquired
# If you need help, visit https://dev.twitter.com/oauth/overview
consumer_key = ""
consumer_secret = ""
access_key = ""
access_secret = ""
# this function collects a twitter profile request and returns a Twitter object
def get_profile(screen_name):
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_key, access_secret)
api = tweepy.API(auth)
try:
#https://dev.twitter.com/rest/reference/get/users/show describes get_user
user_profile = api.get_user(screen_name)
except:
user_profile = "broken"
return user_profile
# uses the function to query a Twitter user. Try s = get_profile("cd_conrad")
s = get_profile("cd_conrad")
print "Name: " + s.name
print "Location: " + s.location
print "Description: " + s.description
| cdconrad/shiftkey-py | week1/src/twitter-profiler.py | Python | mit | 1,022 | [
"VisIt"
] | c59b24208f5487499a298b1b531374abf5bd24853e61a930d9ce3f52b4e319b3 |
import numpy as np
import shtns
import sphTrans as sph
import matplotlib.pyplot as plt
import time
import AdamsBashforth
nlons = 256 # number of longitudes
ntrunc = int(nlons/3) # spectral truncation (for alias-free computations)
nlats = int(nlons/2) # for gaussian grid.
dt = 150 # time step in seconds
itmax = 6*int(86400/dt) # integration length in days
# parameters for test
rsphere = 6.37122e6 # earth radius
omega = 7.292e-5 # rotation rate
grav = 9.80616 # gravity
hbar = 10.e3 # resting depth
umax = 80. # jet speed
phi0 = np.pi/7.; phi1 = 0.5*np.pi - phi0; phi2 = 0.25*np.pi
en = np.exp(-4.0/(phi1-phi0)**2)
alpha = 1./3.; beta = 1./15.
hamp = 120. # amplitude of height perturbation to zonal jet
efold = 3.*3600. # efolding timescale at ntrunc for hyperdiffusion
ndiss = 8 # order for hyperdiffusion
# setup up spherical harmonic instance, set lats/lons of grid
x = sph.Spharmt(nlons,nlats,ntrunc,rsphere,gridtype='gaussian')
lons,lats = np.meshgrid(x.lons, x.lats)
f = 2.*omega*np.sin(lats) # coriolis
# zonal jet.
vg = np.zeros((nlats,nlons),np.float)
u1 = (umax/en)*np.exp(1./((x.lats-phi0)*(x.lats-phi1)))
ug = np.zeros((nlats),np.float)
ug = np.where(np.logical_and(x.lats < phi1, x.lats > phi0), u1, ug)
ug.shape = (nlats,1)
ug = ug*np.ones((nlats,nlons),dtype=np.float) # broadcast to shape (nlats,nlonss)
# height perturbation.
hbump = hamp*np.cos(lats)*np.exp(-(lons/alpha)**2)*np.exp(-(phi2-lats)**2/beta)
# initial vorticity, divergence in spectral space
vrtspec, divspec = x.getvrtdivspec(ug,vg)
vrtg = x.spectogrd(vrtspec)
divg = x.spectogrd(divspec)
# create hyperdiffusion factor
hyperdiff_fact = np.exp((-dt/efold)*(x.lap/x.lap[-1])**(ndiss/2))
# solve nonlinear balance eqn to get initial zonal geopotential,
# add localized bump (not balanced).
vrtg = x.spectogrd(vrtspec)
tmpg1 = ug*(vrtg+f); tmpg2 = vg*(vrtg+f)
tmpspec1, tmpspec2 = x.getvrtdivspec(tmpg1,tmpg2)
tmpspec2 = x.grdtospec(0.5*(ug**2+vg**2))
phispec = x.invlap*tmpspec1 - tmpspec2
phig = grav*(hbar + hbump) + x.spectogrd(phispec)
phispec = x.grdtospec(phig)
vrtsp = vrtspec
divsp = divspec
phisp = phispec
u = ug
v = vg
def dfdt(t,fn,args=None):
[vrtsp, divsp, phisp] = fn
vrt = x.spectogrd(vrtsp)
u,v = x.getuv(vrtsp,divsp)
phi = x.spectogrd(phisp)
tmp1 = u*(vrt+f)
tmp2 = v*(vrt+f)
tmpa, tmpb = x.getvrtdivspec(tmp1, tmp2)
dvrtsp = -tmpb
tmpc = x.spectogrd(tmpa)
tmp3 = u*phi
tmp4 = v*phi
tmpd, tmpe = x.getvrtdivspec(tmp3,tmp4)
dphisp = -tmpe
tmpf = x.grdtospec(phi+ 0.5*(u**2+v**2))
ddivsp = tmpa - x.lap*tmpf
return [dvrtsp, ddivsp, dphisp]
def diffusion(dt,F):
[vrtsp,divsp, phisp] = F
vrtsp *= hyperdiff_fact
divsp *= hyperdiff_fact
return [vrtsp, divsp, phisp]
stepfwd = AdamsBashforth.AdamBash(dfdt,diffusion, ncycle=0)
tmax = 6*86400
t=0
dt = 150
plt.ion()
while(t< tmax):
t,[vrtsp,divsp,phisp] = stepfwd.integrate(t,[vrtsp,divsp,phisp], dt)
print 'Time:', t
vrt = x.spectogrd(vrtsp)
div = x.spectogrd(divsp)
phi = x.spectogrd(phisp)
plt.clf()
pv = (0.5*hbar*grav/omega)*(vrt+f)/phi
plt.imshow(pv)
plt.pause(1e-3)
| JoyMonteiro/parSpectral | pspec/shallowWater.py | Python | bsd-3-clause | 3,160 | [
"Gaussian"
] | 014f5f8e4eb0a930db70d1007073004797d8b0af6d74e14dc0c78b61467fe32a |
from __future__ import division
from astropy.io import fits
from astropy import units as u
from astroML.plotting import hist as histML
from matplotlib import pyplot as plt
from matplotlib import cm
import numpy as np
import bgb
fits_path = "../fits"
paper_path = "../paper"
def get_data():
# Plot the peak frequency vs. peak luminosity for the blazar sequence,
# distinguishing points by both redshift and clustering strength (B_gB)
# Based on Figure 4 in Meyer et al. (2011)
all_blazars = fits.getdata('{0}/meyer_bz_allseds.fits'.format(fits_path),1)
lowz = 0.043 # Redshift at which angular size of 10 arcmin = 500 kpc
highz = 0.75 # Redshift at which an M* galaxy reaches the SDSS completeness limit
neighbors = 10
ztemp = []
for blazar in all_blazars:
try:
z = np.float(blazar['used_redshift'])
except ValueError:
z = blazar['z']
ztemp.append(z)
_zarr = np.array(ztemp)
# Include only blazars with redshifts in SDSS completeness limit
redshift_limits = (_zarr > lowz) & (_zarr < highz)
# Include only blazars in the "trusted extended" or "unknown extended" categories of Meyer+11
blazar_type_limits = (all_blazars['sed_code'] == 'Uex') | (all_blazars['sed_code'] == 'Tex')
# Include only blazars that had a sufficient number of galaxies in SDSS to estimate environment
counting_limits = (all_blazars['n500_cmag'] >= neighbors) & (all_blazars['bg'] > 0)
b = all_blazars[redshift_limits & blazar_type_limits]
zarr = _zarr[redshift_limits & blazar_type_limits]
# Compute spatial clustering amplitude for the sample
bmeyer,bmeyer_err = [],[]
for blazar,z in zip(b,zarr):
bgb_val,bgb_err= bgb.get_bgb(blazar['n500_cmag'], blazar['bg'], blazar['fieldsize']*u.arcsec, z, blazar['cmag'], lf = 'dr6ages')
bmeyer.append(bgb_val.value)
bmeyer_err.append(bgb_err.value)
return bmeyer,b,zarr
def plot_blazar_sequence(bgb_data,bdata,zarr,savefig=False):
# Axis limits for the plot
leftx = 0.12
rightx = 0.20
yheight = 0.80
xwidth = 1. - leftx - rightx
xsize = 12
ysize = xsize * xwidth / yheight
fig = plt.figure(figsize=(xsize,ysize))
ax = fig.add_subplot(111,position=[0.12,0.10,xwidth,yheight])
# Plot the blazars on the nu_peak vs. peak luminosity plot. Color = redshift, size = B_gB
nupeak = np.array(bdata['nupeak'],dtype=float)
lpeak = np.array(bdata['lpeak'],dtype=float)
sizescale = 100. / (max(bgb_data) - min(bgb_data))
minsize = 5.
smin = 10
smax = 140
bmin = -500
bmax = 1000
def size_bgb(x):
shifted = x - bmin
stemp = (shifted * (smax - smin) / (bmax - bmin)) + smin
return stemp
sizearr = size_bgb(np.array(bgb_data))
def bscatter(axis,index,marker='o',label='BL Lac',vmin=0,vmax=0.75):
sc = axis.scatter(nupeak[index],lpeak[index],
marker=marker, label=label,
c = zarr[index], s=sizearr[index],
cmap = cm.jet,
vmin=vmin, vmax=vmax)
return sc
# Categorize blazars by spectral type
bllac = (bdata['btype'] == 'BLLac') | (bdata['btype'] == 'Plotkin_blazar') | (bdata['btype'] == 'HBL') | (bdata['btype'] == 'lBLLac')
fsrq = (bdata['btype'] == 'FSRQ')
uncertain = (bdata['btype'] == 'BLLac_candidate') | (bdata['btype'] == 'blazar_uncertain') | (bdata['btype'] == 'Blazar')
sc_bllac = bscatter(ax,bllac,'o','BL Lac')
sc_fsrq = bscatter(ax,fsrq,'s','FSRQ')
sc_uncertain = bscatter(ax,uncertain,'+','Uncertain')
# Add dashed lines indicating the blazar sequence from theoretical predictions of Meyer+11
# Single-component jet
track_a = np.loadtxt("../meyer/track_a.txt")
x0,y0 = track_a[-1]
dx = track_a[0][0] - track_a[1][0]
dy = track_a[0][1] - track_a[1][1]
ax.arrow(x0,y0,dx,dy,lw=2,fc='k',ec='k',head_width=0.1)
# Decelerating jet
track_b = np.loadtxt("../meyer/track_b.txt")
seg_x = track_b[1:][:,0]
seg_y = track_b[1:][:,1]
ax.plot(seg_x,seg_y,lw=2,color='k')
x0,y0 = track_b[1]
dx = track_b[0][0] - track_b[1][0]
dy = track_b[0][1] - track_b[1][1]
ax.arrow(x0,y0,dx,dy,lw=2,fc='k',ec='k',head_width=0.1)
ax.set_xlabel(r'$\log(\nu_{\rm peak})$ [Hz]',fontsize=22)
ax.set_ylabel(r'$\log(\nu {\rm L}_\nu$) [erg s$^{-1}$]',fontsize=22)
# Original limits on Figure 4 (Meyer+11)
ax.set_xlim(12,18)
ax.set_ylim(41,48)
# More sensible limits for the range of our sample
#ax.set_xlim(12,17)
#ax.set_ylim(43.5,47)
# Set up dummy axis to make an extra legend for the point sizes
xdummy,ydummy = [0],[0]
p1Artist=ax.scatter(xdummy,ydummy,marker='o',color='k',s=size_bgb(-500),label='-500')
p2Artist=ax.scatter(xdummy,ydummy,marker='o',color='k',s=size_bgb(0),label='0')
p3Artist=ax.scatter(xdummy,ydummy,marker='o',color='k',s=size_bgb(500),label='500')
p0Artist=ax.scatter(xdummy,ydummy,marker='o',color='k',s=size_bgb(1000),label='1000')
p4Artist=ax.scatter(xdummy,ydummy,marker='o',color='k',s=size_bgb(1500),label='1500')
m2Artist=ax.scatter(xdummy,ydummy,marker='o',color='b',s=size_bgb(0),label='BL Lac')
m3Artist=ax.scatter(xdummy,ydummy,marker='s',color='b',s=size_bgb(0),label='FSRQ')
m4Artist=ax.scatter(xdummy,ydummy,marker='+',color='b',s=size_bgb(0),label='Uncertain')
handles,labels = ax.get_legend_handles_labels()
# Main legend
legend1=ax.legend(handles[8:],labels[8:],scatterpoints=1)
# Sizing legend
ax.legend(handles[3:8],labels[3:8],scatterpoints=1,bbox_to_anchor=(1.02,0.35),loc=2)
plt.gca().add_artist(legend1)
# Add colorbar for the blazar redshift
cb_axis=fig.add_axes([0.82,0.45,0.05,0.40])
cb = plt.colorbar(sc_bllac,cax = cb_axis, orientation='vertical')
cb.set_label('blazar redshift',fontsize=16)
if savefig:
plt.savefig('{0}/figures/bgb_blazarsequence_allseds.pdf'.format(paper_path))
else:
plt.show()
def plot_blazar_sequence_hist(bgb_data,savefig=False):
# Plot just the distribution of B_gB for the Meyer+11 sample
fig = plt.figure()
ax = fig.add_subplot(111)
histML(bgb_data, bins=25, ax=ax, histtype='step', color='b',weights=np.zeros_like(bgb_data) + 1./len(bgb_data))
ax.set_title('Matched SDSS and (TEX+UEX) samples')
ax.set_xlabel(r'$B_{gB}$',fontsize=24)
ax.set_ylabel('Count',fontsize=16)
if savefig:
fig.savefig('{0}/figures/bgb_blazarsequence_hist.pdf'.format(paper_path))
else:
plt.show()
return None
if __name__ == "__main__":
bgb_data,bdata,zarr = get_data()
plot_blazar_sequence(bgb_data,bdata,zarr,savefig=True)
plot_blazar_sequence_hist(bgb_data,savefig=True)
| willettk/blazar_clustering | python/blazars_meyer.py | Python | mit | 6,869 | [
"Galaxy"
] | 7f00711f248e9b811bf3a37f7d4d388a8e3817130aa40c408b3e1a4dbbc7b448 |
import re
import time
from copy import deepcopy
import itertools
import MDAnalysis
from MDAnalysis.analysis import distances
import numpy as np
from .Biochemistry import *
from .LogPool import *
from ..cy_modules.cy_gridsearch import cy_find_contacts
def weight_function(value):
"""weight function to score contact distances"""
return 1.0 / (1.0 + np.exp(5.0 * (value - 4.0)))
def chunks(seq, num):
"""splits the list seq in num (almost) equally sized chunks."""
avg = len(seq) / float(num)
out = []
last = 0.0
while last < len(seq):
out.append(seq[int(last):int(last + avg)])
last += avg
return out
class ConvBond(object):
"""Python object of MDAnalysis bond for running jobs in parallel."""
def __init__(self, bonds):
super(ConvBond, self).__init__()
self.types = []
self.indices = []
for b in bonds:
self.indices.append(deepcopy(b.indices))
self.types.append(deepcopy(b.type))
def types(self):
return self.types
def to_indices(self):
return self.indices
# newer version with gridsearch
def loop_trajectory_grid(sel1c, sel2c, indices1, indices2, config, suppl, selfInteraction):
cutoff, hbondcutoff, hbondcutangle = config
# resname_array = comm.bcast(resname_array, root=0)
# resid_array = comm.bcast(resid_array, root=0)
# name_array = comm.bcast(name_array, root=0)
bonds = suppl[0]
# segids = comm.bcast(segids, root=0)
# backbone = comm.bcast(backbone, root=0)
name_array = suppl[1]
resid_array = []
segids = []
if (selfInteraction):
resid_array = suppl[2]
segids = suppl[3]
allRankContacts = []
# start = time.time()
for s1, s2 in zip(sel1c, sel2c):
frame = 0
currentFrameContacts = []
natoms1 = len(s1)
natoms2 = len(s2)
pos1 = np.array(np.reshape(s1, (1, natoms1 * 3)), dtype=np.float64)
pos2 = np.array(np.reshape(s2, (1, natoms2 * 3)), dtype=np.float64)
xyz1 = np.array(pos1, dtype=np.float32)
xyz2 = np.array(pos2, dtype=np.float32)
# 2d array with index of atom1 being the index of the first dimension
# individual lists contain atom2 indices
nbList1 = cy_find_contacts(xyz1, natoms1, xyz2, natoms2, cutoff)
# nbList1 = res[:natoms1]
# nbList2 = res[natoms1:]
idx1 = 0
for atom1sNeighbors in nbList1:
for idx2 in atom1sNeighbors:
convindex1 = indices1[frame][idx1] # idx1 converted to global atom indexing
convindex2 = indices2[frame][idx2] # idx2 converted to global atom indexing
# jump out of loop if hydrogen contacts are found, only contacts between heavy atoms are considered,
# hydrogen bonds can still be detected!
if re.match("H(.*)", name_array[convindex1]) or re.match("H(.*)", name_array[convindex2]):
continue
# distance between atom1 and atom2
# check if residues are more than 4 apart, and in the same segment
if selfInteraction:
if (resid_array[convindex1] - resid_array[convindex2]) < 5 and segids[convindex1] == segids[convindex2]:
continue
# distance = distarray[idx1, idx2]
# weight = weight_function(distance)
dvec = pos1[0][3*idx1:3*idx1+3] - pos2[0][3*idx2:3*idx2+3]
distance = np.sqrt(dvec.dot(dvec))
# if (distance - distarray[idx1, idx2]) > 0.001:
# print("Error in distance calculations!")
# return
# # print(convindex1, convindex2, distance, distarray[idx1, idx2])
# if (distance > cutoff):
# print("Distances must be smaller/equal cutoff!")
# return
weight = weight_function(distance)
# HydrogenBondAlgorithm
hydrogenBonds = []
# FF independent hydrogen bonds
if (name_array[convindex1][0] in HydrogenBondAtoms.atoms and name_array[convindex2][0] in HydrogenBondAtoms.atoms):
# print("hbond? %s - %s" % (type_array[convindex1], type_array[convindex2]))
# search for hatom, check numbering in bond!!!!!!!!!!
b1 = bonds[convindex1]
b2 = bonds[convindex2]
# b1 = all_sel[convindex1].bonds
# b2 = all_sel[convindex2].bonds
# search for hydrogen atoms bound to atom 1
bondcount1 = 0
hydrogenAtomsBoundToAtom1 = []
# new code
for b in b1.types:
hydrogen = next((x for x in b if x.startswith("H")), 0)
# print(b)
if hydrogen != 0:
# print("h bond to atom1")
bondindices1 = b1.to_indices()[bondcount1]
# print bondindices1
# for j in bondindices1:
# print(type_array[j+1])
hydrogenidx = next(
(j for j in bondindices1 if name_array[j].startswith("H")), -1)
if hydrogenidx != -1:
# print(type_array[hydrogenidx])
hydrogenAtomsBoundToAtom1.append(hydrogenidx)
bondcount1 += 1
# search for hydrogen atoms bound to atom 2
bondcount2 = 0
hydrogenAtomsBoundToAtom2 = []
# print(b2)
for b in b2.types:
hydrogen = next((x for x in b if x.startswith("H")), 0)
# print(b)
if hydrogen != 0:
# print("h bond to atom2")
bondindices2 = b2.to_indices()[bondcount2]
hydrogenidx = next(
(k for k in bondindices2 if name_array[k].startswith("H")), -1)
if hydrogenidx != -1:
# print(type_array[hydrogenidx])
hydrogenAtomsBoundToAtom2.append(hydrogenidx)
bondcount2 += 1
# check hbond criteria for hydrogen atoms bound to first atom
for global_hatom in hydrogenAtomsBoundToAtom1:
conv_hatom = np.where(indices1[frame] == global_hatom)[0][0]
# print(typeHeavy)
#
# TODO: FF independent version
# if (typeHeavy == AtomHBondType.acc or typeHeavy == AtomHBondType.both) and (distarray[conv_hatom, idx2] <= hbondcutoff):
# dist = distarray[conv_hatom, idx2]
# dist = np.linalg.norm(sel1.positions[conv_hatom] - sel2.positions[idx2])
dist = np.linalg.norm(pos1[0][3*conv_hatom:3*conv_hatom+3] - pos2[0][3*idx2:3*idx2+3])
if (dist <= hbondcutoff):
donorPosition = s1[idx1]
hydrogenPosition = np.array(s1[conv_hatom], dtype=np.float64)
acceptorPosition = np.array(s2[idx2], dtype=np.float64)
v1 = hydrogenPosition - acceptorPosition
v2 = hydrogenPosition - donorPosition
v1norm = np.linalg.norm(v1)
v2norm = np.linalg.norm(v2)
dot = np.dot(v1, v2)
angle = np.degrees(np.arccos(dot / (v1norm * v2norm)))
# print(angle)
if angle >= hbondcutangle:
# print("new hbond")
new_hbond = HydrogenBond(convindex1, convindex2, global_hatom, dist, angle,
hbondcutoff,
hbondcutangle)
hydrogenBonds.append(new_hbond)
# print(str(convindex1) + " " + str(convindex2)
# print("hbond found: %d,%d,%d"%(convindex1,global_hatom,convindex2))
# print(angle)
for global_hatom in hydrogenAtomsBoundToAtom2:
conv_hatom = np.where(indices2[frame] == global_hatom)[0][0]
# TODO: FF independent version
# if (typeHeavy == AtomHBondType.acc or typeHeavy == AtomHBondType.both) and (distarray[idx1, conv_hatom] <= hbondcutoff):
# FIXME: WTF?
# if (distarray[conv_hatom, idx2] <= hbondcutoff):
# dist = distarray[idx1, conv_hatom]
# dist = np.linalg.norm(sel1.positions[idx1] - sel2.positions[conv_hatom])
dist = np.linalg.norm(pos1[0][3*idx1:3*idx1+3] - pos2[0][3*conv_hatom:3*conv_hatom+3])
if (dist <= hbondcutoff):
donorPosition = s2[idx2]
hydrogenPosition = np.array(s2[conv_hatom], dtype=np.float64)
acceptorPosition = np.array(s1[idx1], dtype=np.float64)
v1 = hydrogenPosition - acceptorPosition
v2 = hydrogenPosition - donorPosition
v1norm = np.linalg.norm(v1)
v2norm = np.linalg.norm(v2)
dot = np.dot(v1, v2)
angle = np.degrees(np.arccos(dot / (v1norm * v2norm)))
if angle >= hbondcutangle:
new_hbond = HydrogenBond(convindex2, convindex1, global_hatom, dist, angle,
hbondcutoff,
hbondcutangle)
hydrogenBonds.append(new_hbond)
# print str(convindex1) + " " + str(convindex2)
# print "hbond found: %d,%d,%d"%(convindex2,global_hatom,convindex1)
# print angle
# finalize
newAtomContact = AtomContact(int(frame), float(distance), float(weight), int(convindex1),
int(convindex2),
hydrogenBonds)
currentFrameContacts.append(newAtomContact)
idx1 += 1
allRankContacts.append(currentFrameContacts)
frame += 1
return allRankContacts
def loop_trajectory(sel1c, sel2c, indices1, indices2, config, suppl, selfInteraction):
"""Invoked to analyze trajectory chunk for contacts as a single thread."""
# print(len(sel1c) , len(sel2c))
# indices1 = suppl[0]
# indices2 = suppl[1]
cutoff, hbondcutoff, hbondcutangle = config
# resname_array = comm.bcast(resname_array, root=0)
# resid_array = comm.bcast(resid_array, root=0)
# name_array = comm.bcast(name_array, root=0)
bonds = suppl[0]
# segids = comm.bcast(segids, root=0)
# backbone = comm.bcast(backbone, root=0)
name_array = suppl[1]
resid_array = []
segids = []
if (selfInteraction):
resid_array = suppl[2]
segids = suppl[3]
allRankContacts = []
# start = time.time()
for s1, s2 in zip(sel1c, sel2c):
frame = 0
currentFrameContacts = []
result = np.ndarray(shape=(len(s1), len(s2)), dtype=float)
distarray = distances.distance_array(s1, s2, box=None, result=result)
contacts = np.where(distarray <= cutoff)
for idx1, idx2 in itertools.izip(contacts[0], contacts[1]):
convindex1 = indices1[frame][idx1] # idx1 converted to global atom indexing
convindex2 = indices2[frame][idx2] # idx2 converted to global atom indexing
# jump out of loop if hydrogen contacts are found, only contacts between heavy atoms are considered,
# hydrogen bonds can still be detected!
if re.match("H(.*)", name_array[convindex1]) or re.match("H(.*)", name_array[convindex2]):
continue
if selfInteraction:
if (resid_array[convindex1] - resid_array[convindex2]) < 5 and segids[convindex1] == segids[convindex2]:
continue
# distance between atom1 and atom2
distance = distarray[idx1, idx2]
weight = weight_function(distance)
hydrogenBonds = []
if (name_array[convindex1][0] in HydrogenBondAtoms.atoms and name_array[convindex2][0] in HydrogenBondAtoms.atoms):
# print("hbond? %s - %s" % (type_array[convindex1], type_array[convindex2]))
# search for hatom, check numbering in bond!!!!!!!!!!
b1 = bonds[convindex1]
b2 = bonds[convindex2]
bondcount1 = 0
hydrogenAtomsBoundToAtom1 = []
# new code
for b in b1.types:
# b = bnd.type
hydrogen = next((xx for xx in b if xx.startswith("H")), 0)
# print(b)
if hydrogen != 0:
# print("h bond to atom1")
bondindices1 = b1.to_indices()[bondcount1]
# print bondindices1
# for j in bondindices1:
# print(self.type_array[j+1])
hydrogenidx = next(
(j for j in bondindices1 if name_array[j].startswith("H")), -1)
if hydrogenidx != -1:
# print(self.type_array[hydrogenidx])
hydrogenAtomsBoundToAtom1.append(hydrogenidx)
bondcount1 += 1
# search for hydrogen atoms bound to atom 2
bondcount2 = 0
hydrogenAtomsBoundToAtom2 = []
for b in b2.types:
# b = bnd2.type
hydrogen = next((xx for xx in b if xx.startswith("H")), 0)
# print(b)
if hydrogen != 0:
# print("h bond to atom2")
bondindices2 = b2.to_indices()[bondcount2]
hydrogenidx = next(
(k for k in bondindices2 if name_array[k].startswith("H")), -1)
if hydrogenidx != -1:
# print(type_array[hydrogenidx])
hydrogenAtomsBoundToAtom2.append(hydrogenidx)
bondcount2 += 1
for global_hatom in hydrogenAtomsBoundToAtom1:
conv_hatom = np.where(indices1[frame] == global_hatom)[0][0]
dist = distarray[conv_hatom, idx2]
if dist <= hbondcutoff:
donorPosition = s1[idx1]
hydrogenPosition = s1[conv_hatom]
acceptorPosition = s2[idx2]
v1 = hydrogenPosition - acceptorPosition
v2 = hydrogenPosition - donorPosition
v1norm = np.linalg.norm(v1)
v2norm = np.linalg.norm(v2)
dot = np.dot(v1, v2)
angle = np.degrees(np.arccos(dot / (v1norm * v2norm)))
if angle >= hbondcutangle:
new_hbond = HydrogenBond(convindex1, convindex2, global_hatom, dist, angle,
hbondcutoff,
hbondcutangle)
hydrogenBonds.append(new_hbond)
# print str(convindex1) + " " + str(convindex2)
# print "hbond found: %d,%d,%d"%(convindex1,global_hatom,convindex2)
# print angle
for global_hatom in hydrogenAtomsBoundToAtom2:
conv_hatom = np.where(indices2[frame] == global_hatom)[0][0]
dist = distarray[idx1, conv_hatom]
if dist <= hbondcutoff:
donorPosition = s2[idx2]
hydrogenPosition = s2[conv_hatom]
acceptorPosition = s1[idx1]
v1 = hydrogenPosition - acceptorPosition
v2 = hydrogenPosition - donorPosition
v1norm = np.linalg.norm(v1)
v2norm = np.linalg.norm(v2)
dot = np.dot(v1, v2)
angle = np.degrees(np.arccos(dot / (v1norm * v2norm)))
if angle >= hbondcutangle:
new_hbond = HydrogenBond(convindex2, convindex1, global_hatom, dist, angle,
hbondcutoff,
hbondcutangle)
hydrogenBonds.append(new_hbond)
newAtomContact = AtomContact(int(frame), float(distance), float(weight), int(convindex1), int(convindex2),
hydrogenBonds)
currentFrameContacts.append(newAtomContact)
allRankContacts.append(currentFrameContacts)
frame += 1
return allRankContacts
def run_load_parallel(nproc, psf, dcd, cutoff, hbondcutoff, hbondcutangle, sel1text, sel2text):
"""Invokes nproc threads to run trajectory loading and contact analysis in parallel."""
# nproc = int(self.settingsView.coreBox.value())
pool = LoggingPool(nproc)
# manager = multiprocessing.Manager()
# d=manager.list(trajArgs)
# load psf and dcd
u = MDAnalysis.Universe(psf, dcd)
# define selections according to sel1text and sel2text
selfInteraction = False
if sel2text == "self":
sel1 = u.select_atoms(sel1text)
sel2 = u.select_atoms(sel1text)
selfInteraction = True
else:
sel1 = u.select_atoms(sel1text)
sel2 = u.select_atoms(sel2text)
# write properties of all atoms to lists
all_sel = u.select_atoms("all")
backbone_sel = u.select_atoms("backbone")
resname_array = []
resid_array = []
name_array = []
bonds = []
segids = []
backbone = []
for atom in all_sel.atoms:
resname_array.append(atom.resname)
resid_array.append(atom.resid)
name_array.append(atom.name)
bonds.append(ConvBond(atom.bonds))
segids.append(atom.segid)
for atom in backbone_sel:
backbone.append(atom.index)
if (len(sel1.atoms) == 0 or len(sel2.atoms) == 0):
raise Exception
sel1coords = []
sel2coords = []
start = time.time()
indices1 = []
indices2 = []
for ts in u.trajectory:
# define selections according to sel1text and sel2text
if "around" in sel1text:
sel1 = u.select_atoms(sel1text)
if "around" in sel2text:
sel2 = u.select_atoms(sel2text)
# write atomindices for each selection to list
sel1coords.append(sel1.positions)
sel2coords.append(sel2.positions)
# tempindices1 = []
# for at in sel1.atoms:
# tempindices1.append(at.index)
# tempindices2 = []
# for at in sel2.atoms:
# tempindices2.append(at.index)
indices1.append(sel1.indices)
indices2.append(sel2.indices)
# contactResults = []
# loop over trajectory
# totalFrameNumber = len(u.trajectory)
# start = time.time()
sel1c = chunks(sel1coords, nproc)
sel2c = chunks(sel2coords, nproc)
sel1ind = chunks(indices1, nproc)
sel2ind = chunks(indices2, nproc)
# print(len(sel1ind), len(sel2ind))
# show trajectory information and selection information
# print("trajectory with %d frames loaded" % len(u.trajectory))
print("Running on %d cores" % nproc)
results = []
rank = 0
for c in zip(sel1c, sel2c, sel1ind, sel2ind):
if (selfInteraction):
results.append(pool.apply_async(loop_trajectory_grid, args=(c[0], c[1], c[2], c[3],
[cutoff, hbondcutoff, hbondcutangle],
[bonds, name_array, resid_array, segids], selfInteraction)))
else:
results.append(pool.apply_async(loop_trajectory_grid, args=(c[0], c[1], c[2], c[3],
[cutoff, hbondcutoff, hbondcutangle],
[bonds, name_array], selfInteraction)))
rank += 1
pool.close()
pool.join()
# stop = time.time()
allContacts = []
for res in results:
rn = res.get()
# print(len(rn))
allContacts.extend(rn)
# pickle.dump(allContacts,open("parallel_results.dat","w"))
# print("frames: ", len(allContacts))
# print("time: ", str(stop-start), rank)
return [allContacts, resname_array, resid_array, name_array, segids, backbone]
| maxscheurer/pycontact | PyContact/core/multi_trajectory.py | Python | gpl-3.0 | 22,417 | [
"MDAnalysis"
] | 8644d00ab537a635baa1cc7eb026164cc8dc7333f28b9b41f87b36403040e1af |
#-*- coding:utf-8 -*-
#
#
# Layout Module
#
# unittesting in tests/test_layout_u.py
#
"""
.. currentmodule:: pylayers.gis.layout
.. autosummary::
"""
from __future__ import print_function
try:
from tvtk.api import tvtk
from mayavi import mlab
except:
print('Layout:Mayavi is not installed')
import pdb
import sys
import os
import copy
import glob
import time
import tqdm
import numpy as np
import numpy.random as rd
import scipy as sp
import scipy.sparse as sparse
import doctest
import triangle
import matplotlib.pyplot as plt
import matplotlib.colors as clr
import networkx as nx
import pandas as pd
from itertools import combinations, product
import ast
from networkx.readwrite import write_gpickle, read_gpickle
from mpl_toolkits.basemap import Basemap
import shapely.geometry as sh
import shapefile as shp
from shapely.ops import cascaded_union
from descartes.patch import PolygonPatch
from numpy import array
import PIL.Image as Image
import hashlib
import pylayers.gis.kml as gkml
#from pathos.multiprocessing import ProcessingPool as Pool
#from pathos.multiprocessing import cpu_count
from functools import partial
if sys.version_info.major==2:
from urllib2 import urlopen
import ConfigParser
else:
from urllib.request import urlopen
import configparser as ConfigParser
# from cStringIO import StringIO
# from multiprocessing import Pool
def _pickle_method(method):
func_name = method.im_func.__name__
obj = method.im_self
cls = method.im_class
if func_name.startswith('__') and not func_name.endswith('__'): #deal with mangled names
cls_name = cls.__name__.lstrip('_')
func_name = '_' + cls_name + func_name
return _unpickle_method, (func_name, obj, cls)
def _unpickle_method(func_name, obj, cls):
for cls in cls.__mro__:
try:
func = cls.__dict__[func_name]
except KeyError:
pass
else:
break
return func.__get__(obj, cls)
import types
if sys.version_info.major==2:
import copy_reg
copy_reg.pickle(types.MethodType, _pickle_method, _unpickle_method)
else:
import copyreg
copyreg.pickle(types.MethodType, _pickle_method, _unpickle_method)
import pylayers.antprop.slab as sb
from pylayers.util import geomutil as geu
from pylayers.util import plotutil as plu
from pylayers.util import pyutil as pyu
from pylayers.util import graphutil as gru
from pylayers.util import cone
# Handle furnitures
import pylayers.gis.furniture as fur
import pylayers.gis.osmparser as osm
from pylayers.gis.selectl import SelectL
import pylayers.util.graphutil as gph
import pylayers.util.project as pro
from pylayers.util.project import logger
def pbar(verbose,**kwargs):
if verbose:
pbar=tqdm.tqdm(**kwargs)
return pbar
class Layout(pro.PyLayers):
""" Handling Layout
Attributes
----------
Gs : Graph of points and segment (structure)
Gt : Graph of convex cycles (topology)
Gv : Graph of visibility (visibility)
Gi : Graph of interactions (interactions)
Gr : Graph of rooms (rooms)
Nnode : Number of nodes of Gs
Nedge : Number of edges of Gs
pt : points sequence
tahe : tail head
Notes
-----
This class uses `networkx` to store Layout information
Gs : structure
Gt : topology
Gv : visibility
Gi : interaction
Gr : room
Gm :
Gw : ways
Np
Ns
Nss
ax : (xmin,ymin,xmax,ymax)
axn : (0,Dx,0,Dy)
filefur
filegeom
filematini
fileslabini
hasboundary
segboundary
min_sx
min_sy
max_sx
max_sy
labels
lbltg
lboundary
listtransition
loadosm
lsss
name
normal
p2pc
pg
pt : points coordinates
tahe : segment tail head
tgs : graph to segment
tsg : segment to graph
upnt : array of point index
s2pc : segment to point coordinates
s2pu : segment to point index
sgsg
sl
typ : 'indoor' | 'outdoor'
coordinates : 'cart','lonlat'
version
_filename
_hash
_shseg : keys / segment index
values / shapely LineString
dca : keys / Gt node
values / list of air wall
degree : keys / point degree
values / array of index
display : dictionnary for controling various visualization
dsseg :
indoor : if True allow indoor penetration
isbuilt
diffraction
maxheight
zceil
zfloor
zmin
"""
def __init__(self,arg='',**kwargs):
""" object constructor
Parameters
----------
arg : string or tuple
layout file name, address or (lat,lon) or '(lat,lon)'
mat :
material dB file name
slab :
slab dB file name
fur :
furniture file name
force : boolean
check : boolean
build : boolean
verbose : boolean
bcartesian : boolean
xlim : '(xmin,xmax,ymin,ymax) | () default'
dist_m : int
typ : string
'indoor' | 'outdoor'
"""
self.arg = arg
self._filematini = kwargs.pop('mat','matDB.ini')
self._fileslabini = kwargs.pop('slab','slabDB.ini')
self._filefur = kwargs.pop('fur','')
self.bcheck = kwargs.pop('bcheck',False)
self.bbuild = kwargs.pop('bbuild',False)
self.bgraphs = kwargs.pop('bgraphs',False)
self.bverbose = kwargs.pop('bverbose',False)
self.bcartesian = kwargs.pop('bcartesian',True)
self.xlim = kwargs.pop('xlim',())
self.dist_m = kwargs.pop('dist_m',400)
self.typ = kwargs.pop('typ','outdoor')
self.labels = {}
self.Np = 0
self.Ns = 0
self.Nss = 0
self.lsss = []
#
# Initializing graphs
# Gs Gr Gt Gm
self.Gs = nx.Graph(name='Gs')
self.Gr = nx.Graph(name='Gr')
self.Gt = nx.Graph(name='Gt')
self.Gm = nx.Graph(name='Gm')
self.Gs.pos = {}
self.tahe = np.zeros(([2, 0]), dtype=int)
self.lbltg = []
self.Gt.pos = {}
self._shseg = {}
self.hasboundary = False
self.coordinates = 'cart'
self.version = '1.3'
assert(self.typ in ['indoor','outdoor','floorplan'])
self.isbuilt = False
self.loadosm = False
#
# setting display option
self.display = {}
self.display['title'] = ''
self.display['ticksoff'] = True
self.display['nodes'] = False
self.display['ndsize'] = 10
self.display['ndlabel'] = False
self.display['ndlblsize'] = 20
self.display['edlblsize'] = 20
self.display['fontsize'] = 20
self.display['edlabel'] = False
self.display['edges'] = True
self.display['ednodes'] = False
self.display['subseg'] = True
self.display['isonb'] = True
self.display['transition'] = True
self.display['visu'] = False
self.display['thin'] = False
self.display['scaled'] = True
self.display['alpha'] = 0.5
self.display['layer'] = []
self.display['clear'] = False
self.display['activelayer'] = 'AIR'
self.display['layers'] = []
self.display['overlay'] = False
self.display['overlay_flip'] = ""
# self.display['overlay_file']="/home/buguen/Pyproject/data/image/"
self.display['overlay_file'] = ""
self.display['overlay_axis'] = ""
# self.display['layerset'] = self.sl.keys()
if self.xlim!=():
self.display['box']= self.xlim
else:
self.display['box'] = (-50, 50, -50, 50)
self.name = {}
self.ax = self.display['box']
self.zmin = 0
self.maxheight = 3.
newfile = False
loadlay = False
loadosm = False
loadres = False
#
# Layout main argument
#
if type(self.arg)==tuple:
self.arg = str(self.arg)
#
# Layout Point of Interest DataFrame
#
# A Layout is equipped with a DataFrame of Points of Interest
#
# dfpoi type =['tree','human','tx','rx','support']
#
self.dfpoi = pd.DataFrame(columns=['name','type','lon','lat','alt','x',',y','z','radius'])
if type(self.arg) is bytes:
self.arg = self.arg.decode('utf-8')
arg, ext = os.path.splitext(self.arg)
if arg != '':
if ext == '.ini':
self._filename = self.arg
loadlay = True
if ext == '.lay':
self._filename = self.arg
loadlay = True
elif ext == '.osm':
self._filename = arg + '.lay'
loadosm = True
elif ext == '.res':
self._filename = arg + '.lay'
loadres = True
else:
self.typ = 'outdoor'
else: # No argument
self._filename = 'newfile.lay'
newfile = True
self.sl = sb.SlabDB(fileslab=self._fileslabini, filemat=self._filematini)
self.zfloor = 0.
self.zceil = self.maxheight
if not newfile:
if loadlay:
filename = pyu.getlong(self._filename, pro.pstruc['DIRLAY'])
if os.path.exists(filename): # which exists
self.load()
else: # which do not exist
newfile = True
print("new file - creating a void Layout", self._filename)
elif loadosm: # load .osm file
self.importosm(fileosm=self.arg, cart=self.bcartesian, typ=self.typ)
self.loadosm = True
elif loadres:
self.importres(_fileres=self.arg)
self.sl = sb.SlabDB()
elif '(' in str(arg): # load from osmapi latlon (string or tuple
latlon = eval(self.arg)
self.importosm(latlon=latlon, dist_m=self.dist_m, cart=self.bcartesian, typ=self.typ)
self.loadosm = True
else: # load from address geocoding
self.importosm(address=self.arg, dist_m=self.dist_m, cart=self.bcartesian , typ=self.typ)
self.loadosm = True
# add boundary if it not exist
if (not self.hasboundary) or (self.xlim != ()):
self.boundary(xlim = self.xlim)
self.subseg()
self.updateshseg()
try:
self.geomfile()
except:
print("problem to construct geomfile")
#
# check layout
#
self.bconsistent = True
if self.bcheck:
self.bconsistent,dseg = self.check()
# if Layout is correctly described
# check if the graph gpickle files have been built
if self.bconsistent:
#
# build and save graphs
#
if self.bbuild:
# ans = raw_input('Do you want to build the layout (y/N) ? ')
# if ans.lower()=='y'
self.build()
self.lbltg.append('s')
self.dumpw()
#
# load graphs from file
#
elif self.bgraphs:
if os.path.splitext(self._filename)[1]=='.lay':
dirname = self._filename.replace('.lay','')
path = os.path.join(pro.basename,
'struc',
'gpickle',
dirname)
if os.path.exists(path):
# load graph Gt
# and compare the self._hash from ini file
# with the hash store in node 0 of Gt at time of the last build
# If they are different a rebuild is needeed
# Otherwise all the stored graphs are loaded
#
self.dumpr('t')
# If node 0 exists : the layout has been built
# If .ini file has changed rebuild
if self._hash == self.Gt.node[0]['hash']:
self.dumpr('stvirw')
self.isbuilt = True
bbuild = False
else:
print(".lay file has changed you must rebuild the grahs")
else:
# if graph are requested and it not exists a pickle of a graph
# they are built
self.build()
self.lbltg.append('s')
self.dumpw()
def __repr__(self):
st = '\n'
st = st + "----------------\n"
home = os.path.expanduser('~')
with open(os.path.join(home, '.pylayers'),'r') as f:
paths = f.readlines()
uporj = paths.index('project\n')
project = paths[uporj+1]
st = st + "Project : " + project+'\n'
if hasattr(self, '_hash'):
st = st + self._filename + ' : ' + self._hash + "\n"
else:
st = st + self._filename + "\n"
if self.isbuilt:
st = st + 'Built with : ' + self.Gt.node[0]['hash'] + "\n"
st = st + 'Type : ' + self.typ+'\n'
if self.display['overlay_file'] != '':
filename = pyu.getlong(
self.display['overlay_file'], os.path.join('struc', 'images'))
st = st + "Image('" + filename + "')\n"
st = st + "Coordinates : " + self.coordinates + "\n"
if hasattr(self,'extent'):
st = st + "----------------\n"
st = st+ str(self.extent)+'\n'
if hasattr(self,'extent_c'):
st = st + "----------------\n"
st = st+ str(self.extent_c)+'\n'
if hasattr(self, 'Gs'):
st = st + "----------------\n"
st = st + "Gs : "+str(len(self.Gs.node))+"("+str(self.Np)+'/'+str(self.Ns)+'/'+str(len(self.lsss))+') :'+str(len(self.Gs.edges()))+'\n'
if hasattr(self,'Gt'):
st = st + "Gt : "+str(len(self.Gt.node))+' : '+str(len(self.Gt.edges()))+'\n'
if hasattr(self,'Gv'):
st = st + "Gv : "+str(len(self.Gv.node))+' : '+str(len(self.Gv.edges()))+'\n'
if hasattr(self,'Gi'):
st = st + "Gi : "+str(len(self.Gi.node))+' : '+str(len(self.Gi.edges()))+'\n'
if hasattr(self,'Gr'):
st = st + "Gr : "+str(len(self.Gr.node))+' : '+str(len(self.Gr.edges()))+'\n'
if hasattr(self,'Gw'):
st = st + "Gw : "+str(len(self.Gw.node))+' : '+str(len(self.Gw.edges()))+'\n'
st = st + "----------------\n\n"
if hasattr(self, 'degree'):
for k in self.degree:
if (k < 2) or (k > 3):
st = st + 'degree ' + \
str(k) + ' : ' + str(self.degree[k]) + "\n"
else:
st = st + 'number of node points of degree ' + \
str(k) + ' : ' + str(len(self.degree[k])) + "\n"
st = st + "\n"
st = st + "xrange : " + str(self.ax[0:2]) + "\n"
st = st + "yrange : " + str(self.ax[2:]) + "\n"
if hasattr(self,'pg'):
st = st + "center : " + "( %.2f,%.2f)" % (self.pg[0],self.pg[1]) + "\n"
if hasattr(self,'radius'):
st = st + "radius : %.2f " % self.radius + "\n"
# st = st + "\nUseful dictionnaries" + "\n----------------\n"
# if hasattr(self,'dca'):
# st = st + "dca {cycle : []} cycle with an airwall" +"\n"
# if hasattr(self,'di'):
# st = st + "di {interaction : [nstr,typi]}" +"\n"
# if hasattr(self,'sl'):
# st = st + "sl {slab name : slab dictionary}" +"\n"
# if hasattr(self,'name'):
# st = st + "name : {slab :seglist} " +"\n"
# st = st + "\nUseful arrays"+"\n----------------\n"
# if hasattr(self,'pt'):
# st = st + "pt : numpy array of points " +"\n"
# if hasattr(self,'normal'):
# st = st + "normal : numpy array of normal " +"\n"
# if hasattr(self,'offset'):
# st = st + "offset : numpy array of offset " +"\n"
# if hasattr(self,'tsg'):
# st = st + "tsg : get segment index in Gs from tahe" +"\n"
# if hasattr(self,'isss'):
# st = st + "isss : sub-segment index above Nsmax"+"\n"
# if hasattr(self,'tgs'):
# st = st + "tgs : get segment index in tahe from self.Gs" +"\n"
# if hasattr(self,'upnt'):
# st = st + "upnt : get point id index from self.pt"+"\n"
# #if hasattr(self,'iupnt'):
# # st = st + "iupnt : get point index in self.pt from point id "+"\n"
# if hasattr(self,'lsss'):
# st = st + "lsss : list of segments with sub-segment"+"\n"
# if hasattr(self,'sridess'):
# st = st + "stridess : stride to calculate the index of a subsegment" +"\n"
# if hasattr(self,'sla'):
# st = st + "sla : list of all slab names (Nsmax+Nss+1)" +"\n"
# if hasattr(self,'degree'):
# st = st + "degree : degree of nodes " +"\n"
# st = st + "\nUseful tip" + "\n----------------\n"
# st = st + "Point p in Gs => p_coord:\n"
# #st = st + "p -> u = self.iupnt[-p] -> p_coord = self.pt[:,u]\n\n"
#st = st + "Segment s in Gs => s_ab coordinates \n"
#st = st + "s2pc : segment to point coordinates (sparse) [p1,p2] = L.s2pc.toarray().reshape(2,2).T \n"
#st = st + \
# "s -> u = self.tgs[s] -> v = self.tahe[:,u] -> s_ab = self.pt[:,v]\n\n"
return(st)
def __add__(self, other):
""" addition
One can add either a numpy array or an other layout
"""
Ls = copy.deepcopy(self)
if type(other) == np.ndarray:
for k in Ls.Gs.pos:
Ls.Gs.pos[k] = Ls.Gs.pos[k] + other[0:2]
else:
offp = -min(Ls.Gs.nodes())
offs = max(Ls.Gs.nodes())
other.offset_index(offp=offp, offs=offs)
Ls.Gs.node.update(other.Gs.node)
Ls.Gs.edge.update(other.Gs.edge)
Ls.Gs.adj.update(other.Gs.adj)
Ls.Gs.pos.update(other.Gs.pos)
Ls.Np = Ls.Np + other.Np
Ls.Ns = Ls.Ns + other.Ns
Ls.Nss = Ls.Nss + other.Nss
return(Ls)
def __mul__(self, alpha):
""" scale the layout
other : scaling factor (np.array or int or float)
Returns
-------
Ls : Layout
scaled layout
"""
Ls = copy.deepcopy(self)
Gs = Ls.Gs
if type(alpha) != np.ndarray:
assert((type(alpha) == float) or (
type(alpha) == int)), " not float"
alpha = np.array([alpha, alpha, alpha])
else:
assert(len(alpha) == 3), " not 3D"
#
# scaling x & y
#
x = np.array(Gs.pos.values())[:, 0]
x = x * alpha[0]
y = np.array(Gs.pos.values())[:, 1]
y = y * alpha[1]
xy = np.vstack((x, y)).T
Ls.Gs.pos = dict(zip(Gs.pos.keys(), tuple(xy)))
#
# scaling z
#
nseg = filter(lambda x: x > 0, Gs.nodes())
for k in nseg:
Ls.Gs.node[k]['z'] = tuple(
(np.array(Ls.Gs.node[k]['z']) - self.zmin) * alpha[2] + self.zmin)
if 'ss_z' in Ls.Gs.node[k]:
Ls.Gs.node[k]['ss_z'] = list(
(np.array(Ls.Gs.node[k]['ss_z']) - self.zmin) * alpha[2] + self.zmin)
#
# updating numpy array from graph
#
Ls.g2npy()
return Ls
def switch(self):
""" switch coordinates
"""
if hasattr(self,'m'):
if self.coordinates=='cart':
for k in self.Gs.pos.keys():
self.Gs.pos[k] = self.m( self.Gs.pos[k][0], self.Gs.pos[k][1], inverse=True)
self.coordinates ='latlon'
elif self.coordinates=='latlon':
for k in self.Gs.pos.keys():
self.Gs.pos[k] = self.m( self.Gs.pos[k][0], self.Gs.pos[k][1])
self.coordinates ='cart'
nodes = self.Gs.nodes()
upnt = [n for n in nodes if n < 0]
self.pt[0, :] = np.array([self.Gs.pos[k][0] for k in upnt])
self.pt[1, :] = np.array([self.Gs.pos[k][1] for k in upnt])
def _help(self):
st = ''
st = st + "\nUseful dictionnaries" + "\n----------------\n"
if hasattr(self,'dca'):
st = st + "dca {cycle : []} cycle with an airwall" +"\n"
if hasattr(self,'di'):
st = st + "di {interaction : [nstr,typi]}" +"\n"
if hasattr(self,'sl'):
st = st + "sl {slab name : slab dictionary}" +"\n"
if hasattr(self,'name'):
st = st + "name : {slab :seglist} " +"\n"
st = st + "\nUseful arrays"+"\n----------------\n"
if hasattr(self,'pt'):
st = st + "pt : numpy array of points " +"\n"
if hasattr(self,'normal'):
st = st + "normal : numpy array of normal " +"\n"
if hasattr(self,'offset'):
st = st + "offset : numpy array of offset " +"\n"
if hasattr(self,'tsg'):
st = st + "tsg : get segment index in Gs from tahe" +"\n"
if hasattr(self,'isss'):
st = st + "isss : sub-segment index above Nsmax"+"\n"
if hasattr(self,'tgs'):
st = st + "tgs : get segment index in tahe from self.Gs" +"\n"
if hasattr(self,'upnt'):
st = st + "upnt : get point id index from self.pt"+"\n"
st = st + "\nUseful Sparse arrays"+"\n----------------\n"
if hasattr(self,'sgsg'):
st = st + "sgsg : "+"get common point of 2 segment (usage self.sgsg[seg1,seg2] => return common point \n"
if hasattr(self,'s2pc'):
st = st + "s2pc : "+"from a Gs segment node to its 2 extremal points (tahe) coordinates\n"
if hasattr(self,'s2pu'):
st = st + "s2pc : "+"from a Gs segment node to its 2 extremal points (tahe) index\n"
if hasattr(self,'p2pu'):
st = st + "p2pc : "+"from a Gs point node to its coordinates\n"
st = st + "\nUseful lists"+"\n----------------\n"
#if hasattr(self,'iupnt'):
# st = st + "iupnt : get point index in self.pt from point id "+"\n"
if hasattr(self,'lsss'):
st = st + "lsss : list of segments with sub-segment"+"\n"
if hasattr(self,'sridess'):
st = st + "stridess : stride to calculate the index of a subsegment" +"\n"
if hasattr(self,'sla'):
st = st + "sla : list of all slab names (Nsmax+Nss+1)" +"\n"
if hasattr(self,'degree'):
st = st + "degree : degree of nodes " +"\n"
st = st + "\nUseful tip" + "\n----------------\n"
st = st + "Point p in Gs => p_coord: Not implemented\n"
# st = st + "p -> u = self.upnt[-p] -> p_coord = self.pt[:,-u]\n\n"
st = st + "Segment s in Gs => s_ab coordinates \n"
st = st + \
"s -> u = self.tgs[s] -> v = self.tahe[:,u] -> s_ab = self.pt[:,v]\n\n"
print(st)
def ls(self, typ='lay'):
""" list the available file in dirstruc
Parameters
----------
typ : string optional
{'lay'|'osm'|'wrl'}
Returns
-------
lfile_s : list
sorted list of all the .str file of strdir
Notes
-----
strdir is defined in the Project module
Examples
--------
Display all available structures
>>> from pylayers.gis.layout import *
>>> L = Layout()
>>> fillist = L.ls()
"""
if typ == 'lay':
pathname = os.path.join(pro.pstruc['DIRLAY'], '*.' + typ)
if typ == 'osm':
pathname = os.path.join(pro.pstruc['DIROSM'], '*.' + typ)
if typ == 'wrl':
pathname = os.path.join(pro.pstruc['DIRWRL'], '*.' + typ)
lfile_l = glob.glob(os.path.join(pro.basename, pathname))
lfile_s = []
for fi in lfile_l:
fis = pyu.getshort(fi)
lfile_s.append(fis)
lfile_s.sort()
return lfile_s
def offset_index(self, offp=0, offs=0):
""" offset points and segment index
Parameters
----------
offp : offset points
offs : offset segments
See Also
--------
Portage vers networkx 2. inacheve
__add__
"""
newpoint = dict((k - offp, v) for k, v in self.Gs.node.items() if k < 0)
assert (np.array(list(newpoint.keys())) < 0).all()
newseg = dict((k + offs, v) for k, v in self.Gs.node.items() if k > 0)
assert (np.array(list(newseg.keys())) > 0).all()
newpoint.update(newseg)
nx.set_node_attributes(self.Gs,newpoint)
#self.Gs.node = newpoint
newppoint = dict((k - offp, v) for k, v in self.Gs.pos.items() if k < 0)
newpseg = dict((k + offs, v) for k, v in self.Gs.pos.items() if k > 0)
newppoint.update(newpseg)
self.Gs.pos = newppoint
# adjascence list of segments
ladjs = [self.Gs.adj[k] for k in self.Gs.adj.keys() if k > 0]
# adjascence list of points
ladjp = [self.Gs.adj[k] for k in self.Gs.adj.keys() if k < 0]
nladjs = map(lambda x: dict((k - offp, v)
for k, v in x.items()), ladjs)
nladjp = map(lambda x: dict((k + offs, v)
for k, v in x.items()), ladjp)
lpt = [k - offp for k in self.Gs.adj.keys() if k < 0]
lseg = [k + offs for k in self.Gs.adj.keys() if k > 0]
dpt = dict(zip(lpt, nladjp))
dseg = dict(zip(lseg, nladjs))
dseg.update(dpt)
print("Todo create a dictionnary of edges")
nx.set_edge_attributes(self.Gs,dseg)
# self.Gs.adj = dseg
# self.Gs.edge = dseg
def check(self, level=0, epsilon = 0.64):
""" Check Layout consistency
Parameters
----------
level : int
Returns
-------
consistent : Boolean
True if consistent
dseg : dictionnary of segments
See Also
--------
GeomUtil.isBetween
Notes
-----
For all segments
get the 2 vertices
for all the other vertices
check if it belongs to segment
If there are points which are not valid they are displayed
In red point with degree == 1 , In black points with degree == 0
"""
bconsistent = True
nodes = self.Gs.nodes()
if len(nodes) > 0:
#
# points
# segments
# degree of segments
useg = [ x for x in nodes if x > 0 ]
upnt = [ x for x in nodes if x < 0 ]
degseg = [nx.degree(self.Gs, x) for x in useg ]
#
# 1) all segments have degree 2
#
assert(np.all(array(degseg) == 2))
#
# degree of points
# maximum degree of points
#
degpnt = [ nx.degree(self.Gs, x) for x in upnt ] # points absolute degrees
degmin = min(degpnt)
degmax = max(degpnt)
#
# No isolated points (degree 0)
# No points of degree 1
#
if (degmin <= 1):
f, a = self.showG('s', aw=1)
deg0 = [ x for x in upnt if nx.degree(self.Gs, x) == 0]
deg1 = [ x for x in upnt if nx.degree(self.Gs, x) == 1]
if len(deg0) > 0:
logger.critical( "It exists degree 0 points : %r", deg0 )
f, a = self.pltvnodes(deg0, fig=f, ax=a)
bconsistent = False
if len(deg1) > 0:
logger.critical( "It exists degree 1 points : %r", deg1 )
f, a = self.pltvnodes(deg1, fig=f, ax=a)
bconsistent = False
# self.deg = {}
# for deg in range(degmax + 1):
# num = filter(lambda x: degpnt[x] == deg, range(
# len(degpnt))) # position of degree 1 point
# npt = map(lambda x: upnt[x], num) # number of degree 1 points
# self.deg[deg] = npt
#
# check if there are duplicate points or segments
#
# TODO argsort x coordinate
#
# get all the nodes
ke = list(self.Gs.pos.keys())
lpos = list(self.Gs.pos.values())
x = np.array([ pp[0] for pp in lpos ] )
y = np.array([ pp[1] for pp in lpos ] )
p = np.vstack((x, y))
d1 = p - np.roll(p, 1, axis=1)
sd1 = np.sum(np.abs(d1), axis=0)
if not sd1.all() != 0:
lu = np.where(sd1 == 0)[0]
for u in lu:
# if ke[u]>0:
# self.del_segment(ke[u])
if ke[u] < 0:
self.del_points(ke[u])
nodes = self.Gs.nodes()
# useg = filter(lambda x : x>0,nodes)
upnt = filter(lambda x: x < 0, nodes)
# iterate on useg : list of segments
# s : n1 <--> n2
#
# Is there a point different from (n1-n2) in betweeen of an existing segment s ?
#
# Not scalable. Double for loop
dseg = {}
if (self.typ == 'indoor') or (self.typ=='outdoor'):
for s in useg:
# n1, n2 = np.array(self.Gs.neighbors(s)) # node s neighbors
n1, n2 = np.array(self.Gs[s]) # node s neighbors
p1 = np.array(self.Gs.pos[n1]) # p1 --- p2
p2 = np.array(self.Gs.pos[n2]) # s
#
# iterate on upnt : list of points
for n in upnt:
if (n1 != n) & (n2 != n):
p = np.array(self.Gs.pos[n])
if geu.isBetween(p1, p2, p,epsilon=epsilon):
if s in dseg:
dseg[s].append(n)
else:
dseg[s]=[n]
logger.critical("segment %d contains point %d", s, n)
bconsistent = False
if level > 0:
cycle = self.Gs.node[s]['ncycles']
if len(cycle) == 0:
logger.critical("segment %d has no cycle", s)
if len(cycle) == 3:
logger.critical(
"segment %d has cycle %s", s, str(cycle))
#
# check if Gs points are unique
# segments can be duplicated
#
P = np.array([self.Gs.pos[k] for k in upnt])
similar = geu.check_point_unicity(P)
if len(similar) != 0:
logger.critical("points at index(es) %s in self.Gs.pos are similar", str(similar))
bconsistent = False
return bconsistent, dseg
def clip(self, xmin, xmax, ymin, ymax):
""" return the list of edges which cross or belong to the clipping zone
Parameters
----------
xmin : float
xmax : float
ymin : float
ymax : float
Returns
-------
seglist : list of segment number
Notes
-----
1) Determine all segments outside the clipping zone
2) Union of the 4 conditions
3) setdiff1d between the whole array of segments and the segments outside
"""
p0 = self.pt[:, self.tahe[0, :]]
p1 = self.pt[:, self.tahe[1, :]]
maxx = np.maximum(p0[0, :], p1[0, :])
maxy = np.maximum(p0[1, :], p1[1, :])
minx = np.minimum(p0[0, :], p1[0, :])
miny = np.minimum(p0[1, :], p1[1, :])
nxp = np.nonzero(maxx < xmin)[0]
nxm = np.nonzero(minx > xmax)[0]
nyp = np.nonzero(maxy < ymin)[0]
nym = np.nonzero(miny > ymax)[0]
u = np.union1d(nxp, nxm)
u = np.union1d(u, nyp)
u = np.union1d(u, nym)
iseg = np.arange(self.Ns)
return np.setdiff1d(iseg, u)
def check_Gi(self):
for nit1 in self.Gi.nodes():
if len(nit1)>1:
cy1 = nit1[-1]
for nint2 in self.Gi[nit1].keys():
if len(nint2) > 1 :
assert nint2[1] == cy1
# for e0,e1 in self.Gi.edges():
def g2npy(self,verbose=False):
""" conversion from graphs to numpy arrays
Parameters
----------
verbose : boolean
Notes
-----
This function updates the following arrays:
+ self.pt (2xNp)
+ self.pg center of gravity
+ self.tahe (2xNs)
+ self.tgs : graph to segment
+ self.tsg : segment to graph
+ self.dca : dictionnary of cycle with an airwall
+ self.s2pu : sparse_lil_matrix
+ self.s2pc : sparse_lil_matrix
+ self.lsss : list of iso segments
+ self.maxheight :
+ self.normal :
assert self.pt[self.iupnt[-1]] == self.pt[:,self.iupnt[-1]]
See Also
--------
extrseg
"""
nodes = self.Gs.nodes()
# nodes include points and segments
# segment index
# useg = filter(lambda x: x > 0, nodes)
useg = [n for n in nodes if n >0]
# points index
# upnt = filter(lambda x: x < 0, nodes)
upnt = [n for n in nodes if n < 0]
# matrix segment-segment
# usage
# self.sgsg[seg1,seg2] => return common point
mno = max(nodes)
#self.sgsg = sparse.lil_matrix((mno+1,mno+1),dtype='int')
# loop over segments
# a segment is always connected to 2 nodes
for s in useg:
# get point index of the segment
# s > 0
# v1.1 lpts = [ x for x in nx.neighbors(self.Gs,s)]
lpts = [ x for x in self.Gs[s] ]
assert(len(lpts)==2)
assert(lpts[0]<0)
assert(lpts[1]<0)
# get point 0 neighbors
a = [ x for x in self.Gs[lpts[0]]]
# a = self.Gs.edge[lpts[0]].keys()
# get point 1 neighbors
#b = self.Gs.edge[lpts[1]].keys()
b = [ x for x in self.Gs[lpts[1]]]
nsa = np.setdiff1d(a,b)
nsb = np.setdiff1d(b,a)
u = np.hstack((nsa,nsb))
npta = [lpts[0]]*len(nsa)
nptb = [lpts[1]]*len(nsb)
ns = np.hstack((npta,nptb))
#self.sgsg[s,u]=ns
# conversion in numpy array
self.upnt = np.array((upnt))
# association
# utmp = np.array(zip(-self.upnt,np.arange(len(self.upnt))))
# mutmp = max(utmp[:,0])
# self.iupnt = -np.ones((mutmp+1),dtype='int')
# self.iupnt[utmp[:,0]]=utmp[:,1]
# degree of segment nodes
degseg = [ nx.degree(self.Gs,x) for x in useg ]
assert(np.all(np.array(degseg) == 2)) # all segments must have degree 2
#
# self.degree : dictionnary (point degree : list of point index)
#
# points absolute degrees
degpnt = np.array([nx.degree(self.Gs, x) for x in upnt])
# lairwall : list of air wall segments
lairwall = []
if 'AIR' in self.name:
lairwall += self.name['AIR']
else:
self.name['AIR'] = []
if '_AIR' in self.name:
lairwall += self.name['_AIR']
else:
self.name['_AIR'] = []
# as self.name['AIR'] and self.name['_AIR'] are tested
# we define them as void list if not defined
#
# function to count airwall connected to a point
# probably this is not the faster solution
#
def nairwall(nupt):
#v1.1 lseg = nx.neighbors(self.Gs, nupt)
lseg = self.Gs[nupt]
n = 0
for ns in lseg:
if ns in lairwall:
n = n + 1
return n
nairwall = np.array([ nairwall(x) for x in upnt])
if verbose:
print('buildging nairwall : Done')
#
# if a node is connected to N air wall ==> deg = deg - N
#
degpnt = degpnt - nairwall
try:
degmax = max(degpnt)
except:
degmax = 1
self.degree = {}
if verbose:
print('Start node degree determination')
for deg in range(degmax + 1):
#num = filter(lambda x: degpnt[x] == deg, range(
# len(degpnt))) # position of degree 1 point
num = [ x for x in range(len(degpnt)) if degpnt[x] == deg ]
# number of degree 1 points
#npt = np.array(map(lambda x: upnt[x], num))
npt = np.array([upnt[x] for x in num])
self.degree[deg] = npt
if verbose:
print('Node degree determination : Done')
#
# convert geometric information in numpy array
#
self.pt = np.array(np.zeros([2, len(upnt)]), dtype=float)
self.tahe = np.array(np.zeros([2, len(useg)]), dtype=int)
self.Np = len(upnt)
self.Ns = len(useg)
self.pt[0, :] = np.array([self.Gs.pos[k][0] for k in upnt])
self.pt[1, :] = np.array([self.Gs.pos[k][1] for k in upnt])
if verbose:
print('pt in np.array : Done')
self.pg = np.sum(self.pt, axis=1) / np.shape(self.pt)[1]
ptc = self.pt-self.pg[:,None]
dptc = np.sqrt(np.sum(ptc*ptc,axis=0))
self.radius = dptc.max()
self.pg = np.hstack((self.pg, 0.))
if self.Ns>0:
ntahe = np.array([ [n for n in nx.neighbors(self.Gs,x) ] for x in useg ])
ntail = ntahe[:,0]
nhead = ntahe[:,1]
# create sparse matrix from a Gs segment node to its 2 extremal points (tahe) index
mlgsn = max(self.Gs.nodes())+1
self.s2pu = sparse.lil_matrix((mlgsn,2),dtype='int')
self.s2pu[useg,:] = ntahe
# convert to compressed row sparse matrix
# to be more efficient on row slicing
self.s2pu = self.s2pu.tocsr()
if self.Ns>0:
aupnt = np.array(upnt)
self.tahe[0, :] = np.array([np.where(aupnt==x)[0][0] for x in ntail ])
self.tahe[1, :] = np.array([np.where(aupnt==x)[0][0] for x in nhead ])
if verbose:
print('tahe in numpy array : Done')
#
# transcoding array between graph numbering (discontinuous) and numpy numbering (continuous)
#
Nsmax = 0
self.tsg = np.array(useg)
try:
Nsmax = max(self.tsg)
except:
logger.warning("No segments in Layout yet")
#
# handling of segment related arrays
#
if Nsmax > 0:
self.tgs = -np.ones(Nsmax + 1, dtype=int)
rag = np.arange(len(useg))
self.tgs[self.tsg] = rag
#
# calculate normal to segment ta-he
#
# This could becomes obsolete once the normal will be calculated at
# creation of the segment
#
X = np.vstack((self.pt[0, self.tahe[0, :]],
self.pt[0, self.tahe[1, :]]))
Y = np.vstack((self.pt[1, self.tahe[0, :]],
self.pt[1, self.tahe[1, :]]))
normx = Y[0, :] - Y[1, :]
normy = X[1, :] - X[0, :]
scale = np.sqrt(normx * normx + normy * normy)
assert (scale.all() > 0), pdb.set_trace()
self.normal = np.vstack(
(normx, normy, np.zeros(len(scale)))) / scale
# for ks in ds:
#
# lsss : list of subsegment
#
# nsmax = max(self.Gs.node.keys())
# Warning
# -------
# nsmax can be different from the total number of segments
# This means that the numerotation of segments do not need to be
# contiguous.
# stridess : length is equal to nsmax+1
# sla is an array of string, index 0 is not used because there is
# no such segment number.
#
self.lsss = [x for x in useg if len(self.Gs.node[x]['iso']) > 0]
# self.isss = []
# self.stridess = np.array(np.zeros(nsmax+1),dtype=int)
# self.stridess = np.empty(nsmax+1,dtype=int)
# +1 is for discarding index 0 (unused here)
# self.offset = np.empty(nsmax+1+self.Nss,dtype=int)
# Storing segment normals
# Handling of subsegments
#
# index is for indexing subsegment after the nsmax value
#
# index = nsmax+1
# for ks in useg:
# k = self.tgs[ks] # index numpy
# self.offset[k] = self.Gs.node[ks]['offset']
# self.Gs.node[ks]['norm'] = self.normal[:,k] # update normal
# nameslab = self.Gs.node[ks]['name'] # update sla array
# assert nameslab!='', "segment "+str(ks)+ " is not defined"
# self.sla[ks] = nameslab
# # stridess is different from 0 only for subsegments
# self.stridess[ks] = 0 # initialize stridess[ks]
# #if index==155:
# if self.Gs.node[ks].has_key('ss_name'): # if segment has sub segment
# nss = len(self.Gs.node[ks]['ss_name']) # retrieve number of sseg
# self.stridess[ks]=index-1 # update stridess[ks] dict
# for uk,slabname in enumerate(self.Gs.node[ks]['ss_name']):
# self.lsss.append(ks)
# self.sla[index] = slabname
# self.isss.append(index)
# self.offset[index] = self.Gs.node[ks]['ss_offset'][uk]
# index = index+1
# append sub segment normal to normal
# create sparse matrix from a Gs segment node to its 2 extremal points (tahe) coordinates
if self.Ns >0:
self.s2pc = sparse.lil_matrix((mlgsn,4))
ptail = self.pt[:,self.tahe[0,:]]
phead = self.pt[:,self.tahe[1,:]]
A = np.vstack((ptail,phead)).T
self.s2pc[self.tsg,:]=A
# convert to compressed row sparse matrix
# to be more efficient on row slicing
self.s2pc = self.s2pc.tocsr()
# for k in self.tsg:
# assert(np.array(self.s2pc[k,:].todense())==self.seg2pts(k).T).all(),pdb.set_trace()
#
# This is wrong and asume a continuous indexation of points
# TODO FIX : This problem cleanly
#
# self.p2pc is only used in Gspos in outputGi_func only caled in case of
# multiprocessing
#
# The temporary fix is to comment the 5 next lines
#
# mino = -min(self.Gs.nodes())+1
# self.p2pc = sparse.lil_matrix((mino,2))
# self.p2pc[-self.upnt,:]=self.pt.T
# self.p2pc = self.p2pc.tocsr()
# normal_ss = self.normal[:,self.tgs[self.lsss]]
# self.normal = np.hstack((self.normal,normal_ss))
# if problem here check file format 'z' should be a string
lheight = array([v[1] for v in
nx.get_node_attributes(self.Gs, 'z').values()
if v[1] < 2000 ])
#assert(len(lheight)>0),logger.error("no valid heights for segments")
if len(lheight)>0:
self.maxheight = np.max(lheight)
else:
self.maxheight = 3
# self.maxheight=3.
# calculate extremum of segments
self.extrseg()
def importshp(self, **kwargs):
""" import layout from shape file
Parameters
----------
_fileshp :
"""
defaults = {'pref': [np.array([25481100, 6676890]), np.array([60.2043716, 24.6591147])],
'dist_m': 250,
'latlon': True,
'bd': [24, 60, 25, 61],
}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
fileshp = pyu.getlong(kwargs['_fileshp'], os.path.join('struc', 'shp'))
polys = shp.Reader(fileshp)
verts = []
for poly in polys.iterShapes():
verts.append(poly.points)
npt = -1
ns = 0
xmin = 1e16
ymin = 1e16
xmax = -1e16
ymax = -1e16
self.name['WALL'] = []
for p in verts:
v = np.array(p) - kwargs['pref'][0][None, :]
nv = np.sqrt(np.sum(v * v, axis=1))
# if at least one point is in the radius the poygon is kept
if (nv < kwargs['dist_m']).any():
npoint = len(p)
for k, point in enumerate(p):
# add a new node unless it is the last already existing
# point
if k != (npoint - 1):
if k == 0:
np0 = npt
self.Gs.add_node(npt)
x = point[0]
y = point[1]
xmin = min(x, xmin)
xmax = max(x, xmax)
ymin = min(y, ymin)
ymax = max(y, ymax)
self.Gs.pos[npt] = (x, y)
npt = npt - 1
# add a new segment from the second point
if (k > 0) & (k < npoint - 1):
ns = ns + 1
self.Gs.add_node(ns, name='WALL', z=[
0, 10], offset=0, transition=False, connect=[npt + 1, npt + 2])
self.Gs.add_edge(npt + 1, ns)
self.Gs.add_edge(ns, npt + 2)
self.Gs.pos[ns] = tuple(
(np.array(self.Gs.pos[npt + 1]) + np.array(self.Gs.pos[npt + 2])) / 2.)
# add a new segment closing the polygon
if k == npoint - 1:
ns = ns + 1
self.Gs.add_node(ns, name='WALL', z=[
0, 10], offset=0, transition=False, connect=[np0, npt + 1])
self.Gs.add_edge(np0, ns)
self.Gs.add_edge(ns, npt + 1)
self.Gs.pos[ns] = tuple(
(np.array(self.Gs.pos[npt + 1]) + np.array(self.Gs.pos[np0])) / 2.)
#
# TODO change lon_0 and lat_0 hard coded
#
self.m = Basemap(llcrnrlon=kwargs['bd'][0], llcrnrlat=kwargs['bd'][1],
urcrnrlon=kwargs['bd'][2], urcrnrlat=kwargs['bd'][3],
resolution='i', projection='cass', lon_0=24.5, lat_0=60.5)
if kwargs['latlon']:
lat_ref = kwargs['pref'][1][0]
lon_ref = kwargs['pref'][1][1]
x_ref, y_ref = self.m(lon_ref, lat_ref)
Dx = kwargs['pref'][0][0] - x_ref
Dy = kwargs['pref'][0][1] - y_ref
pos = np.array(self.Gs.pos.values())
for k, keys in enumerate(self.Gs.pos.keys()):
self.Gs.pos[keys] = self.m( pos[k, 0] - Dx, pos[k, 1] - Dy, inverse=True)
self.coordinates = 'latlon'
def importres(self,_fileres,**kwargs):
""" import res format
col1 : x1 coordinates
col2 : y1 coordinates
col3 : x2 coordinates
col4 : y2 coordinates
col5 : building height
col6 : building number
col7 : building class
col8 : ground height
"""
fileres = pyu.getlong(_fileres, os.path.join('struc', 'res'))
D = np.fromfile(fileres,dtype='int',sep=' ')
self.typ = 'outdoor'
# number of integer
N1 = len(D)
# number of lines
N2 = N1/8
D = D.reshape(N2,8)
# list of coordinates
lcoords = []
# list of ring
lring = []
# list of (z_ground, height_building)
zring = []
#
bdg_old = 1
for e in range(N2):
# p1 point coordinate
p1 = ([D[e,0],D[e,1]])
# p2 point coordinate
p2 = ([D[e,2],D[e,3]])
# (ground height,building height)
#z = (D[e,7]-500,D[e,4])
# (ground height,building height+ground_height)
z = (D[e,7],D[e,4]+D[e,7])
# building number
bdg = D[e,5]
# building class
bdc = D[e,6]
# detect change of building
if (bdg_old-bdg)!=0:
ring = sh.LinearRing(lcoords)
poly = sh.Polygon(ring)
if poly.area>0:
lring.append(ring)
zring.append(z)
lcoords = []
bdg_old=bdg
# update lcoords
if p1 not in lcoords:
lcoords.append(p1)
if p2 not in lcoords:
lcoords.append(p2)
npt = 1
for r1,z1 in zip(lring,zring):
x,y = r1.xy
for k2 in range(len(x)):
new_pt = (x[k2],y[k2])
kpos = self.Gs.pos.keys()
vpos = self.Gs.pos.values()
if new_pt not in vpos:
#
# add node point nde <0 and position
#
current_node_index = -npt
self.Gs.add_node(current_node_index)
self.Gs.pos[-npt] = new_pt
npt = npt + 1
else:
u = [k for k in range(len(vpos)) if (vpos[k] == new_pt)]
current_node_index = kpos[u[0]]
if k2>0: # at least already one point
ns = self.add_segment(current_node_index, previous_node_index, name='WALL', z=z1)
else:
starting_node_index = current_node_index
previous_node_index = current_node_index
# last segment
#ns = self.add_segment(previous_node_index, starting_node_index, name='WALL', z=z1)
def importosm(self, **kwargs):
""" import layout from osm file or osmapi
Parameters
----------
fileosm : string
address : string
address to be geocoded
latlon : tuple
(latitude,longitude) degrees
dist_m : float
distance in meter from the geocoded address (def 200 m )
cart : boolean
conversion in cartesian coordinates
Notes
-----
The best and recommended manner to edit a layout is to use the
josm editor in association with the piclayer plugin.
This plugin allows to place a geo-adjusted image in the background
which is very convenient for editing floorplan of buildings.
In josm editor, nodes are numbered with negative indexes, while in
pylayers they have a positive index.
See Also
--------
pylayers.gis.osmparser.osmparse
"""
self._fileosm = kwargs.pop('fileosm','')
cart = kwargs.pop('cart',False)
#
# zceil ansd zfloor are obtained from actual data
#
# indoor default (0,3)
# outdoor default (0,3000)
#if self.typ=='indoor':
self.zceil = -1e10
self.zfloor = 1e10
if self._fileosm == '': # by using osmapi address or latlon
self.typ = kwargs.pop('typ','indoor')
address = kwargs.pop('address','Rennes')
latlon = kwargs.pop('latlon',0)
if type(latlon) == 'str':
latlon = eval(latlon)
dist_m = kwargs.pop('dist_m',200)
coords, nodes, ways, m , latlon = osm.getosm(address = address,
latlon = latlon,
dist_m = dist_m,
bcart = cart,
typ = self.typ)
self.typ = 'outdoor'
if cart:
self.coordinates='cart'
else:
self.coordinates='latlon'
if latlon == '0':
self._filename = kwargs['address'].replace(' ', '_') + '.lay'
else:
lat = latlon[0]
lon = latlon[1]
self._filename = 'lat_' + \
str(lat).replace('.', '_') + '_lon_' + \
str(lon).replace('.', '_') + '.ini'
else: # by reading an osm file
# The osm file is supposed to be in $PROJECT/struc/osm directory
fileosm = pyu.getlong(self._fileosm, os.path.join('struc', 'osm'))
#coords, nodes, ways, relations, m = osm.osmparse(fileosm, typ=self.typ)
# typ outdoor parse ways.buildings
# typ indoor parse ways.ways
# coords, nodes, ways, relations, m = osm.osmparse(fileosm)
coords, nodes, ways, m , (lat,lon) = osm.getosm(cart = cart,
filename = fileosm,
typ = self.typ)
if cart:
self.coordinates = 'cart'
else:
self.coordinates = 'latlon'
# self.coordinates = 'latlon'
self._filename = self._fileosm.replace('osm', 'lay')
_np = 0 # _ to avoid name conflict with numpy alias
_ns = 0
ns = 0
nss = 0
# Reading points (<0 index)
# Reorganize points coordinates for detecting
# duplicate nodes
# duplicate nodes are saved in dict dup
kp = [k for k in coords.xy]
x = np.array([ coords.xy[x][0] for x in kp ])
y = np.array([ coords.xy[x][1] for x in kp ])
ux = np.argsort(x)
x_prev = -100
y_prev = -100
dup = {} # dictionnary of duplicate nodes
for u in ux:
# if node is not already a duplicate
if x[u] == x_prev:
# 2 consecutive points with same lon => check lat
if y[u] == y_prev:
# node u is a duplicate
# udate dup dictionnary
# printu_prev ,k_prev, x_prev,y_prev
# print" ",u ,kp[u], x[u],y[u]
dup[kp[u]] = k_prev
else:
x_prev = x[u]
y_prev = y[u]
u_prev = u
k_prev = kp[u]
for npt in coords.xy:
# if node is not duplicated add node
if npt not in dup:
self.Gs.add_node(npt)
self.Gs.pos[npt] = tuple(coords.xy[npt])
_np += 1
# Reading segments
#
# ways of osm
for k, nseg in enumerate(ways.way):
tahe = ways.way[nseg].refs
for l in range(len(tahe) - 1):
nta = tahe[l]
nhe = tahe[l + 1]
#
# if a node is duplicate recover the original node
#
if nta in dup:
nta = dup[nta]
if nhe in dup:
nhe = dup[nhe]
d = ways.way[nseg].tags
#
# Convert string to integer if possible
#
for key in d:
try:
d[key] = eval(d[key])
except:
pass
# getting segment slab information
if 'slab' in d:
slab = d['name']
else: # the default slab name is WALL
slab = "WALL"
if 'z' in d:
z = d['z']
else:
if self.typ == 'indoor':
z = (0, 3)
if self.typ == 'outdoor':
z = (0, 3000)
if type(z[0])==str:
zmin = eval(z[0])
else:
zmin = z[0]
if type(z[1])==str:
zmax = eval(z[1])
else:
zmax = z[1]
if zmin < self.zfloor:
self.zfloor = zmin
if zmax > self.zceil:
self.zceil = zmax
if 'offset' in d:
offset = d['offset']
else:
offset = 0
#
# get the common neighbor of nta and nhe if it exists
#
#v1.1 u1 = np.array(nx.neighbors(self.Gs, nta))
#v1.1 u2 = np.array(nx.neighbors(self.Gs, nhe))
# import ipdb
# u1 = np.array(self.Gs.node[nta])
# u2 = np.array(self.Gs.node[nhe])
# inter_u1_u2 = np.intersect1d(u1, u2)
#
# Create a new segment (iso segments are managed in add_segment)
#
ns = self.add_segment(nta, nhe, name=slab, z=z, offset=offset)
self.Np = _np
#self.Ns = _ns
self.Nss = nss
#
#
lon = array([self.Gs.pos[k][0] for k in self.Gs.pos])
lat = array([self.Gs.pos[k][1] for k in self.Gs.pos])
# bd = [lon.min(), lat.min(), lon.max(), lat.max()]
# lon_0 = (bd[0] + bd[2]) / 2.
# lat_0 = (bd[1] + bd[3]) / 2.
# self.m = Basemap(llcrnrlon=bd[0], llcrnrlat=bd[1],
# urcrnrlon=bd[2], urcrnrlat=bd[3],
# resolution='i', projection='cass', lon_0=lon_0, lat_0=lat_0)
self.m = m
self.extent = (m.lonmin,m.lonmax,m.latmin,m.latmax)
self.pll = self.m(self.extent[0],self.extent[2])
self.pur = self.m(self.extent[1],self.extent[3])
self.extent_c = (self.pll[0],self.pur[0],self.pll[1],self.pur[1])
if (cart and (self.coordinates!='cart')):
x, y = self.m(lon, lat)
self.Gs.pos = {k: (x[i], y[i]) for i, k in enumerate(self.Gs.pos)}
self.coordinates = 'cart'
# del coords
# del nodes
# del ways
# del relations
#
# get slab and materials DataBase
#
# 1) create material database
# 2) load materials database
# 3) create slabs database
# 4) add materials database to slab database
# 5) load slabs database
mat = sb.MatDB()
mat.load(self._filematini)
self.sl = sb.SlabDB()
self.sl.mat = mat
self.sl.load(self._fileslabini)
#
# update self.name with existing slabs database entries
#
for k in self.sl.keys():
if k not in self.name:
self.name[k] = []
# convert graph Gs to numpy arrays for speed up post processing
self.g2npy()
#
# add boundary
#
self.boundary()
# save ini file
self.save()
#
def exportosm(self):
""" export layout in osm file format
Parameters
----------
_filename : string
Notes
-----
See Also
--------
layout.loadosm
layout.loadini
layout.check
"""
# export Layout in osm format
# The osm filename basenam is the same as the _filename ini file
_filename, ext = os.path.splitext(self._filename)
filename = pyu.getlong(_filename + '.osm', 'struc/osm')
if os.path.exists(filename):
filename = pyu.getlong(_filename + '_.osm', 'struc/osm')
fd = open(filename, "w")
fd.write("<?xml version='1.0' encoding='UTF-8'?>\n")
fd.write("<osm version='0.6' upload='false' generator='PyLayers'>\n")
# creating points
for n in self.Gs.pos:
if n < 0:
if n not in self.lboundary:
if self.coordinates == 'latlon':
lon, lat = self.Gs.pos[n]
if self.coordinates == 'cart':
x, y = self.Gs.pos[n]
lon, lat = self.m(x, y, inverse=True)
fd.write("<node id='" + str(n) + "' action='modify' visible='true' lat='" +
str(lat) + "' lon='" + str(lon) + "' />\n")
for n in self.Gs.pos:
if n > 0:
#
# Conditions pour ajout segments
#
# _AIR are not added
#
# outdoor AIR wall above buildings are not added
# cond1 is wrong
cond1 = (self.Gs.node[n]['name'] != '_AIR')
cond2 = (self.Gs.node[n]['name'] == 'AIR')
cond3 = (self.Gs.node[n]['z'][1] == self.zceil)
cond4 = (self.Gs.node[n]['z'][0] == self.zfloor)
cond5 = (cond2 and cond3)
cond6 = (cond2 and cond4)
cond7 = (cond2 and cond3 and cond4)
if (cond1 and (not cond5) and (not cond6)) or cond7:
#v1.1 neigh = nx.neighbors(self.Gs, n)
neigh = self.Gs[n].keys()
d = self.Gs.node[n]
#
noden = -10000000 - n
fd.write("<way id='" + str(noden) +
"' action='modify' visible='true'>\n")
fd.write("<nd ref='" + str(neigh[0]) + "' />\n")
fd.write("<nd ref='" + str(neigh[1]) + "' />\n")
fd.write("<tag k='name' v='" + str(d['name']) + "' />\n")
fd.write("<tag k='z' v=\"" + str(d['z']) + "\" />\n")
fd.write("<tag k='transition' v='" +
str(d['transition']) + "' />\n")
fd.write("</way>\n")
fd.write("</osm>\n")
fd.close()
def save(self):
""" save Layout structure in a .lay file
"""
current_version = 1.3
if os.path.splitext(self._filename)[1]=='.ini':
self._filename = self._filename.replace('.ini','.lay')
#
# version 1.3 : suppression of index in slab and materials
#
config = ConfigParser.RawConfigParser()
config.optionxform = str
config.add_section("info")
config.add_section("points")
config.add_section("segments")
config.add_section("files")
config.add_section("slabs")
config.add_section("materials")
if self.coordinates == 'latlon':
config.set("info", "format", "latlon")
else:
config.set("info", "format", "cart")
config.set("info", "version", current_version)
config.set("info", "type", self.typ)
if self.typ == 'indoor':
config.add_section("indoor")
config.set("indoor", "zceil", self.zceil)
config.set("indoor", "zfloor", self.zfloor)
if self.typ == 'outdoor':
config.add_section("outdoor")
#
# save bounding box in latlon for reconstruction of self.m
#
if hasattr(self,"m"):
config.add_section("latlon")
config.set("latlon","llcrnrlon",self.m.llcrnrlon)
config.set("latlon","llcrnrlat",self.m.llcrnrlat)
config.set("latlon","urcrnrlon",self.m.urcrnrlon)
config.set("latlon","urcrnrlat",self.m.urcrnrlat)
config.set("latlon","projection",self.m.projection)
# config.set("info",'Nsegments',self.Ns)
# config.set("info",'Nsubsegments',self.Nss)
#for k in self.display:
# config.set("display", k, self.display[k])
# iterate on points
# boundary nodes and air walls are not saved
for n in self.Gs.pos:
if n < 0:
if n not in self.lboundary:
config.set("points", str(
n), (self.Gs.pos[n][0], self.Gs.pos[n][1]))
# iterate on segments
for n in self.Gs.pos:
if n > 0:
cond1 = (self.Gs.node[n]['name'] != '_AIR')
cond2 = (self.Gs.node[n]['name'] == 'AIR')
cond3 = (self.Gs.node[n]['z'][1] == self.zceil)
cond4 = (self.Gs.node[n]['z'][0] == self.zfloor)
cond5 = (cond2 and cond3)
cond6 = (cond2 and cond4)
cond7 = (cond2 and cond3 and cond4)
#
# _AIR are not stored (cond1)
# AIR segment reaching zceil are not stored (cond4)
# AIR segment reaching zfloor are not stored (cond5)
#
if (cond1 and (not cond5) and (not cond6)) or cond7:
d = copy.deepcopy(self.Gs.node[n])
# v1.1 d['connect'] = nx.neighbors(self.Gs, n)
d['connect'] = list(self.Gs[n].keys())
try:
if d['transition']:
pass
except:
d['transition'] = False
try:
if 'DOOR' in d['ss_name']:
d['transition'] = True
except:
pass
# remove normal information from the strucure
try:
d.pop('norm')
except:
pass
# remove iso information from the strucure
try:
d.pop('iso')
except:
pass
# remove ncycles information from the strucure
try:
d.pop('ncycles')
except:
pass
# transition are saved only if True
if not d['transition']:
d.pop('transition')
# offset are saved only if not zero
if 'offset' in d:
if d['offset']==0:
d.pop('offset')
config.set("segments", str(n), d)
#
# [ slabs ]
#
# get the list of used slabs
lslab = [x for x in self.name if len(self.name[x]) > 0]
lmat = []
#
# In case an osm file has been read; there is no .sl
# By default all the available slabs and materials are provided
#
if not hasattr(self,'sl'):
self.sl = sb.SlabDB(filemat='matDB.ini', fileslab='slabDB.ini')
for s in lslab:
ds = {}
if s not in self.sl:
if s not in self.sl.mat:
self.sl.mat.add(name=s,cval=6,sigma=0,typ='epsr')
self.sl.add(s,[s],[0.1])
#ds['index'] = self.sl[s]['index']
ds['color'] = self.sl[s]['color']
ds['lmatname'] = self.sl[s]['lmatname']
for m in ds['lmatname']:
if m not in lmat:
lmat.append(m)
ds['lthick'] = self.sl[s]['lthick']
ds['linewidth'] = self.sl[s]['linewidth']
config.set("slabs", s, ds)
if "_AIR" not in lslab:
air = {'color': 'white', 'linewidth': 1,
'lthick': [0.1], 'lmatname': ['AIR']}
config.set("slabs", "_AIR", air)
if "AIR" not in lslab:
air = {'color': 'white', 'linewidth': 1,
'lthick': [0.1], 'lmatname': ['AIR']}
config.set("slabs", "AIR", air)
if "CEIL" not in lslab:
ceil = {'color': 'grey20', 'linewidth': 1,
'lthick': [0.1], 'lmatname': ['REINFORCED_CONCRETE']}
config.set("slabs", "CEIL", ceil)
if "FLOOR" not in lslab:
floor = {'color': 'grey40', 'linewidth': 1,
'lthick': [0.1], 'lmatname': ['REINFORCED_CONCRETE']}
config.set("slabs", "FLOOR", floor)
#
# [ materials ]
#
for m in lmat:
dm = self.sl.mat[m]
try:
dm.pop('name')
except:
pass
# store UIT format only if it is used
if 'a' in dm:
if dm['a'] ==None:
dm.pop('a')
dm.pop('b')
dm.pop('c')
dm.pop('d')
config.set("materials", m, dm)
if "REINFORCED_CONCRETE" not in lmat:
reic = {'mur': (
1 + 0j), 'epr': (8.69999980927 + 0j), 'roughness': 0.0, 'sigma': 3.0}
config.set("materials", "REINFORCED_CONCRETE", reic)
# config.set("files",'materials',self.filematini)
# config.set("files",'slab',self.fileslabini)
#
# [ furniture ]
#
config.set("files", 'furniture', self._filefur)
#
# handling olf format ( to be removed later)
#
if os.path.splitext(self._filename)[1]=='.ini':
fileout = self._filename.replace('.ini','.lay')
else:
fileout = self._filename
filelay = pyu.getlong(fileout, pro.pstruc['DIRLAY'])
print(filelay)
fd = open(filelay, "w")
config.write(fd)
fd.close()
# convert graph Gs to numpy arrays for speed up post processing
# ideally an edited Layout should be locked while not saved.
# self.g2npy()
self._hash = hashlib.md5(open(filelay, 'rb').read()).hexdigest()
def load(self):
""" load a layout from a .lay file
The filename is in self._filename
Format version 1.3
------------------
[info]
format = {cart | latlon}
version =
type = {indoor | outdoor}
[points]
-1 = (x,y)
[segments]
1 = {'slab':'',transition:boolean,'connect:[-1,-2],'z':(0,3)}
[slabs]
WALL = {'lthick':[,],'lmat':[,],'color:'','linewidth':float}
[materials]
BRICK = {'mur':complex,'epsr':complex,'sigma':float,'roughness':}
[indoor]
zceil =
zfloor =
[latlon]
"""
# di : dictionnary which reflects the content of ini file
di = {}
config = ConfigParser.RawConfigParser()
config.optionxform = str
filelay = pyu.getlong(self._filename, pro.pstruc['DIRLAY'])
config.read(filelay)
sections = config.sections()
for section in sections:
di[section] = {}
options = config.options(section)
for option in options:
try:
di[section][option] = config.get(section, option)
except:
print(section, option)
self.Np = len(di['points'])
self.Ns = len(di['segments'])
self.Gs = nx.Graph(name='Gs')
self.Gs.pos = {}
self.labels = {}
#
# [info]
# format {cart,latlon}
# version int
# type {'indoor','outdoor'}
if 'version' in di['info']:
self.version = di['info']['version']
if 'type' in di['info']:
self.typ = di['info']['type']
self.name = {}
if ((self.typ!='indoor') &
(self.typ!='outdoor') &
(self.typ!='floorplan')):
print("invalid file type in ",self._filename)
return(None)
#
# [indoor]
# zceil
# zfloor
#
if self.typ == 'indoor':
self.zceil = eval(di['indoor']['zceil'])
self.zfloor = eval(di['indoor']['zfloor'])
# old format
if self.typ == 'floorplan':
self.zceil = eval(di['floorplan']['zceil'])
self.zfloor = eval(di['floorplan']['zfloor'])
# from format 1.3 floorplan is call indoor
if self.typ=='floorplan':
self.typ = 'indoor'
#
# [outdoor]
# TODO add a DEM file
#
if self.typ == 'outdoor':
if 'outdoor' in di:
if 'zceil' in di['outdoor']:
self.zceil = eval(di['outdoor']['zceil'])
else:
self.zceil = 3000 # upper limit for AIR walls
else:
self.zceil = 3000 # upper limit for AIR walls
if 'outdoor' in di:
if 'zfloor' in di['outdoor']:
self.zfloor = eval(di['outdoor']['zfloor'])
else:
self.zfloor = 0
else:
self.zfloor = 0
#
#
# manage ini file with latlon coordinates
#
# if the format is latlon, coordinates are converted into
# cartesian coordinates with the coords.cartesian method
#
if 'format' in di['info']:
if di['info']['format'] == 'latlon':
or_coord_format = 'latlon'
coords = osm.Coords()
coords.clean()
coords.latlon = {i: np.array(
eval(di['points'][i])) for i in di['points']}
coords.boundary = np.hstack((np.min(np.array(coords.latlon.values()), axis=0),
np.max(np.array(coords.latlon.values()), axis=0)))
coords.cartesian(cart=True)
else:
or_coord_format = 'cart'
else:
or_coord_format = 'cart'
#
# update display section
#
if 'display' in di:
for k in di['display']:
try:
self.display[k] = eval(di['display'][k])
except:
self.display[k] = di['display'][k]
# self.ax = self.display['box']
#
# [points]
#
# update points section
for nn in di['points']:
nodeindex = eval(nn)
if or_coord_format == 'latlon':
x, y = coords.xy[nn]
else:
x, y = eval(di['points'][nn])
#
# limitation of point precision is important for avoiding
# topological problems in shapely.
# Layout precision is hard limited to millimeter precision.
#
self.Gs.add_node(nodeindex) # add point node
self.Gs.pos[nodeindex] = (
round(1000 * x) / 1000., round(1000 * y) / 1000.)
self.labels[nodeindex] = nn
#
# [segments]
#
# update segments section
self.name['AIR'] = []
self.name['_AIR'] = []
#
# get the maximum index
#
maxnum = max([eval(x) for x in di['segments'].keys()])
for key in di['segments']:
d = eval(di['segments'][key])
nta = d['connect'][0]
nhe = d['connect'][1]
#print(key,nta,nhe)
name = d['name']
z = d['z']
if not 'transition' in d:
transition = False
else:
transition = d['transition']
if not 'offset' in d:
offset = 0
else:
offset = d['offset']
# add new segment
#
# The segment number is the same as in the .lay file
#
# Very useful feature
#
num = self.add_segment(nta, nhe,
num = eval(key),
name = name,
transition = transition,
offset = offset,
z = z)
# exploit iso for segment completion (AIR type)
#
# Complement single segment which do not reach zceil or zfloor with
# an iso segment with AIR property
#
segdone = []
for key in di['segments']:
iseg = eval(key)
d = eval(di['segments'][key])
nta = d['connect'][0]
nhe = d['connect'][1]
# if not already done
if iseg not in segdone:
# get all the iso from the segment key
iso = copy.copy(self.Gs.node[iseg]['iso'])
# append key to iso
iso.append(iseg)
# stack all the intervals in increasing order
ziso = []
for ns in iso:
ziso.append(self.Gs.node[ns]['z'])
# get the complementary intervals
if self.typ == 'outdoor':
zmin = 1e6
zmax = -1e6
for iz in ziso:
zmin = np.minimum(zmin,min(iz))
zmax = np.maximum(zmax,max(iz))
ziso = [(zmin,zmax)]
zair = pyu.compint(ziso,self.zfloor,self.zceil)
# add AIR wall in the intervals
for za in zair:
num = self.add_segment(nta, nhe,
name='AIR',
offset=0,
z=(za[0], za[1]))
segdone = segdone + iso
#
# add _AIR wall around the layout
#
self.boundary()
# compliant with config file without material/slab information
#
# {latlon]
#
if config.has_section('latlon'):
llcrnrlon = eval(config.get('latlon', 'llcrnrlon'))
llcrnrlat = eval(config.get('latlon', 'llcrnrlat'))
urcrnrlon = eval(config.get('latlon', 'urcrnrlon'))
urcrnrlat = eval(config.get('latlon', 'urcrnrlat'))
projection = config.get('latlon','projection')
lon_0 = (llcrnrlon+urcrnrlon)/2.
lat_0 = (llcrnrlat+urcrnrlat)/2.
# Construction of Basemap for coordinates transformation
self.m = Basemap(llcrnrlon=llcrnrlon,
llcrnrlat=llcrnrlat,
urcrnrlon=urcrnrlon,
urcrnrlat=urcrnrlat,
resolution='i',
projection=projection,
lon_0=lon_0,
lat_0=lat_0)
self.extent = (llcrnrlon,urcrnrlon,llcrnrlat,urcrnrlat)
self.pll = self.m(self.extent[0],self.extent[2])
self.pur = self.m(self.extent[1],self.extent[3])
self.extent_c = (self.pll[0],self.pur[0],self.pll[1],self.pur[1])
if config.has_section('files'):
# self.filematini=config.get('files','materials')
# self.fileslabini=config.get('files','slab')
self._filefur = config.get('files', 'furniture')
if config.has_section('slabs'):
#filemat = self._filename.replace('ini', 'mat')
#fileslab = self._filename.replace('ini', 'slab')
ds = di['slabs']
dm = di['materials']
for k in ds:
ds[k] = eval(ds[k])
for k in dm:
dm[k] = eval(dm[k])
self.sl = sb.SlabDB(ds=ds, dm=dm)
# In this section we handle the ini file format evolution
if 'fileoverlay' in self.display:
self.display['overlay_file'] = self.display.pop('fileoverlay')
self.display['overlay_axis'] = self.display['box']
self.save()
if 'inverse' in self.display:
self.display['overlay_flip'] = ""
self.display.pop('inverse')
self.save()
# convert graph Gs to numpy arrays for faster post processing
self.g2npy()
#
fd = open(filelay,'rb')
self._hash = hashlib.md5(fd.read()).hexdigest()
fd.close()
def loadfur(self, _filefur):
""" loadfur load a furniture file
Parameters
----------
_filefur : string
short name of the furniture ini file
Notes
-----
Furniture objects are stored in self.lfur list
Examples
--------
Load a Layout file and an associated furniture ini file
.. plot::
:include-source:
>>> import matplotlib.pyplot as plt
>>> from pylayers.gis.layout import *
>>> L = Layout('WHERE1.lay')
>>> L.loadfur('Furw1.ini')
>>> fig = plt.figure()
>>> ax = fig.gca()
>>> fig,ax = L.showGs(fig=fig,ax=ax,furniture=True)
>>> ti = plt.title('loadfur')
>>> plt.show()
"""
filefur = pyu.getlong(_filefur, pro.pstruc['DIRFUR'])
config = ConfigParser.ConfigParser()
config.read(filefur)
furname = config.sections()
self.lfur = []
for name in furname:
F = fur.Furniture()
F.load(_filefur, name)
self.lfur.append(F)
self.filefur = _filefur
def load_modif(self, _filename, build=True, cartesian=False, dist_m=400):
""" load a Layout in different formats
Parameters
----------
_filename : string
Notes
-----
+ .lay : ini file format (natural one) DIRLAY
"""
newfile = False
filename = pyu.getlong(_filename, pro.pstruc['DIRLAY'])
if os.path.exists(filename): # which exists
self.loadini(arg)
else: # which do not exist
self._filename = _filename
newfile = True
print("new file", self._filename)
# construct geomfile (.off) for vizualisation with geomview
self.subseg()
if not newfile:
try:
self.geomfile()
except:
print("problem to construct geomfile")
# if check:
# self.check()
self.boundary(dx=10, dy=10)
# create shapely polygons L._shseg
def subseg(self):
""" establishes the association : name <-> edgelist
Returns
-------
dico : dict
sub segment name as key and segment number as value
"""
dico = {}
listtransition = []
for k in self.Gs.node.keys():
dk = self.Gs.node[k]
if 'transition' in dk:
transition = dk['transition']
if transition:
listtransition.append(k)
if 'ss_name' in dk:
lname = dk['ss_name']
for j, name in enumerate(lname):
if name in dico:
dico[name].append((k, j))
else:
dico[name] = [(k, j)]
self.dsseg = dico
self.listtransition = listtransition
return(dico)
def add_pnod(self, p, e1, e2):
""" Project point p on segment e1 along segment e2
Parameters
----------
p : ndarray
point
e1 : int
edge number 1
e2 : int
edge number 2
..todo
This function is void
"""
#p1 = p + alpha*ve2
#p1 = pa + beta * (pb-pa)
pass
def add_fnod(self, p=(0.0, 0.0)):
""" add free node p
Parameters
----------
p : (1x2) tuple
Examples
--------
>>> from pylayers.gis.layout import *
>>> L = Layout('defstr.lay')
>>> L.add_fnod((10.0,10.0))
-13
"""
# next free node
if len(self.Gs.node)>0:
num = -( -min(self.Gs.node) + 1 )
else:
num = -1
self.Gs.add_node(num)
self.Gs.pos[num] = p
self.Np = self.Np + 1
# update labels
self.labels[num] = str(num)
return(num)
def add_nfpe(self, np0, s1, s2):
""" Add node on s1 from projection of np0 along s2
Parameters
----------
np0 : point number
s1 : edge number 1
s2 : edge number 2
"""
np1 = list(self.Gs[s1].keys())
np2 = list(self.Gs[s2].keys())
xA = self.Gs.pos[np1[0]][0]
yA = self.Gs.pos[np1[0]][1]
xB = self.Gs.pos[np1[1]][0]
yB = self.Gs.pos[np1[1]][1]
xC = self.Gs.pos[np2[0]][0]
yC = self.Gs.pos[np2[0]][1]
xD = self.Gs.pos[np2[1]][0]
yD = self.Gs.pos[np2[1]][1]
xP = self.Gs.pos[np0][0]
yP = self.Gs.pos[np0][1]
A = np.array([[xB - xA, xD - xC], [yB - yA, yD - yC]])
b = np.array([xP - xA, yP - yA])
x = sp.linalg.solve(A, b)
if ((x[0] > 0.) & (x[0] < 1.0)):
self.add_pons(s1, 1 - x[0])
def add_pons(self, ns, alpha=0.5):
""" add point on segment
Parameters
----------
ns : int
segment number
alpha : parameterization of the point
alpha = 0 (tail) alpha = 1 (head)
Notes
-----
delete segment ns
create 2 segments with same properties
"""
# v1.1 nop = self.Gs.neighbors(ns)
nop = list(self.Gs[ns])
namens = self.Gs.node[ns]['name']
zminns = self.Gs.node[ns]['z'][0]
zmaxns = self.Gs.node[ns]['z'][1]
p1 = np.array([self.Gs.pos[nop[0]][0], self.Gs.pos[nop[0]][1]])
p2 = np.array([self.Gs.pos[nop[1]][0], self.Gs.pos[nop[1]][1]])
p = tuple(alpha * p1 + (1 - alpha) * p2)
num = self.add_fnod(p)
# delete old edge ns
self.del_segment(ns)
# add new edge np[0] num
self.add_segment(nop[0], num, name=namens, z=[
zminns, zmaxns], offset=0)
# add new edge num np[1]
self.add_segment(num, nop[1], name=namens, z=[
zminns, zmaxns], offset=0)
def add_segment(self,
n1,
n2,
num=-1,
maxnum=-1,
transition = False,
name='PARTITION',
z=(0.0, 40000000),
offset=0,
verbose=True):
""" add segment between node n1 and node n2
Parameters
----------
n1 : integer < 0
n2 : integer < 0
num : segment index (-1 default not given)
maxnum : maximum number (-1 default not given)
name : string
layer name 'PARTITION'
z : tuple of 2 floats
default = (0,40000000)
offset : float
[-1,1] default (0)
Returns
-------
num : segment number (>0)
Notes
-----
A segment dictionnary has the following mandatory attributes
name : slab name associated with segment
z : list (zmin,zmax) (meters)
norm : array (1x3) segment normal
transition : boolean
ncycles : list of involved cycles
connect : list of point number
iso : list of isosegment
If a segment is _AIR it cannnot be duplicated
"""
# if 2 points are selected
if ((n1 < 0) & (n2 < 0) & (n1 != n2)):
nseg = [s for s in self.Gs.node if s > 0]
if num==-1:
if len(nseg) > 0:
num = max(maxnum+1,max(nseg) + 1) # index not given
else: # first segment index not given
num = 1
else:
pass # segment index given
else:
if verbose:
print("add_segment : error not a node", n1, n2)
return
# transition = False
if (name == '_AIR'):
# if name == 'AIR':
transition = True
p1 = np.array(self.Gs.pos[n1])
p2 = np.array(self.Gs.pos[n2])
p2mp1 = p2 - p1
t = p2mp1 / np.sqrt(np.dot(p2mp1, p2mp1))
#
# n = t x z (2D)
#
norm = np.array([t[1], -t[0], 0])
#
# Two segments with the same end points are iso segments
#
# Determine if there are existing segments with the same neighbors ?
nbnta = self.Gs[n1].keys()
nbnhe = self.Gs[n2].keys()
#v1.1 nbnta = self.Gs.neighbors(n1)
#nbnhe = self.Gs.neighbors(n2)
same_seg = list(set(nbnta).intersection(nbnhe))
#
# Impossible to have duplicated _AIR
#
# Warning : The 3 following lines are very important
# it breaks buildGt if commented
# Please do not comment them.
#
if (name == '_AIR'):
if len(same_seg) > 0:
return None
#
# add a segment node to Gs
#
self.Gs.add_node(num, name=name,
z = z,
norm = norm,
transition = transition,
offset = offset,
connect = [n1, n2],
iso = [],
ncycles = []
)
#
# update iso of the 2 segments
#
for k in same_seg:
if num not in self.Gs.node[k]['iso']:
self.Gs.node[k]['iso'].append(num)
if k not in self.Gs.node[num]['iso']:
self.Gs.node[num]['iso'].append(k)
#
# Segment point position is placed at the middle of segment
#
self.Gs.pos[num] = tuple((p1 + p2) / 2.)
#
# Connectivity between segment node num and points nodes n1 and n2
#
self.Gs.add_edge(n1, num)
self.Gs.add_edge(n2, num)
#
# Update current total number of segments
#
self.Ns = self.Ns + 1
# update slab name <-> edge number dictionnary
try:
self.name[name].append(num)
except:
self.name[name] = [num]
# update label
self.labels[num] = str(num)
if name not in self.display['layers']:
self.display['layers'].append(name)
# update shseg
self._shseg.update({num:sh.LineString((self.Gs.pos[n1], self.Gs.pos[n2]))})
return(num)
def merge_segment(self,n1,n2):
""" merge segment n2 included in n1
Parameters
----------
n1 : int
segment 1 (the larger) index
n2 : int
segment 2 (the smaller) index
"""
# get height/slabname information from segment n1
zn1 = self.Gs.node[n1]['z']
namen1 = self.Gs.node[n1]['name']
# get height/slabname information from segment n2
zn2 = self.Gs.node[n2]['z']
namen2 = self.Gs.node[n2]['name']
if min(zn1)<min(zn2):
znlow = (min(zn1),min(zn2))
if max(zn1)>max(zn2):
znhigh = (max(zn2),max(zn1))
# get termination points of segment n1 (p1 -- p4)
conn_n1 = self.Gs.node[n1]['connect']
conn_n2 = self.Gs.node[n2]['connect']
p1_index = conn_n1[0]
p4_index = conn_n1[1]
p2_index = conn_n2[0]
p3_index = conn_n2[1]
p1 = np.r_[self.Gs.pos[p1_index]]
p2 = np.r_[self.Gs.pos[p2_index]]
p3 = np.r_[self.Gs.pos[p3_index]]
p4 = np.r_[self.Gs.pos[p4_index]]
# determine point order p1 - p2 - p3 - p4
v14 = p4 - p1
v23 = p3 - p2
if np.dot(v14,v23)<0:
p2_index, p3_index = p3_index, p2_index
p2, p3 = p3, p2
# 1 delete segment n1
self.del_segment([n1])
# create new segment p1 - p2
self.add_segment(p1_index,p2_index,z=zn1,name=namen1)
# create new segment p3 - p4
self.add_segment(p3_index,p4_index,z=zn1,name=namen1)
# create new segment p2 - p3 with complementary heights
if 'zlow' in locals():
self.add_segment(p2_index, p3_index, z=znlow, name=namen1)
if 'zhigh' in locals():
self.add_segment(p2_index, p3_index, z=znhigh, name=namen1)
def repair(self,dseg):
""" repair layout
Parameters
----------
dseg : dict
{ns : [np1,np2]}
Notes
-----
Merge the superposed segments which has been determined by the check
method.
"""
for nseg in dseg:
num_p = dseg[nseg]
if len(num_p)==2:
ns1 = np.r_[nx.neighbors(self.Gs,num_p[0])]
ns2 = np.r_[nx.neighbors(self.Gs,num_p[1])]
ns_inter = np.intersect1d(ns1,ns2)
for nseg2 in ns_inter:
if ((self.Gs.node[nseg2]['name']!='AIR')
and ((self.Gs.node[nseg2]['name']!='_AIR'))):
self.merge_segment(nseg,nseg2)
def wedge2(self, apnt):
""" calculate wedge angle of a point
Parameters
----------
lpnt : array int
list of point number
"""
if isinstance(apnt, list):
apnt = np.array(apnt)
# 0. Find the position of diffraction point
ptdiff = self.pt[:, self.iupnt[-apnt]]
# 1. Find the associated segments and positions of a diff points
#v1.1 aseg = map(lambda x: filter(lambda y: y not in self.name['AIR'],
# nx.neighbors(self.Gs, x)),
# apnt)
aseg = map(lambda x: filter(lambda y: y not in self.name['AIR'],
self.Gs[x].keys()),apnt)
# manage flat angle : diffraction by flat segment e.g. door limitation)
[aseg[ix].extend(x) for ix, x in enumerate(aseg) if len(x) == 1]
# get points positions
pts = np.array(map(lambda x: self.seg2pts([x[0], x[1]]), aseg))
pt1 = pts[:, 0:2, 0] # tail seg1
ph1 = pts[:, 2:4, 0] # head seg1
pt2 = pts[:, 0:2, 1] # tail seg2
ph2 = pts[:, 2:4, 1] # head seg2
# 2. Make the correct association
# pts is (nb_diffraction_points x 4 x 2)
# - The dimension 4 represent the 2x2 points: t1,h1 and t2,h2
# tail and head of segemnt 1 and 2 respectively
# a segment
# - The dimension 2 is x,y
#
# The following aims to determine which tails and heads of
# segments associated to a give diffraction point
# are connected
# point diff is pt1
updpt1 = np.where(np.sum(ptdiff.T == pt1, axis=1) == 2)[0]
# point diff is ph1
updph1 = np.where(np.sum(ptdiff.T == ph1, axis=1) == 2)[0]
# point diff is pt2
updpt2 = np.where(np.sum(ptdiff.T == pt2, axis=1) == 2)[0]
# point diff is ph2
updph2 = np.where(np.sum(ptdiff.T == ph2, axis=1) == 2)[0]
pa = np.empty((len(apnt), 2))
pb = np.empty((len(apnt), 2))
# seg 1 :
# if pt1 diff point => ph1 is the other point
pa[updpt1] = ph1[updpt1]
# if ph1 diff point => pt1 is the other point
pa[updph1] = pt1[updph1]
# seg 2 :
# if pt2 diff point => ph2 is the other point
pb[updpt2] = ph2[updpt2]
# if ph2 diff point => pt2 is the other point
pb[updph2] = pt2[updph2]
# pt is the diffraction point
pt = ptdiff.T
vptpa = pt - pa
vptpan = vptpa.T / np.sqrt(np.sum((vptpa) * (vptpa), axis=1))
vptpb = pt - pb
vptpbn = vptpb.T / np.sqrt(np.sum((vptpb) * (vptpb), axis=1))
v1 = vptpan
v2 = vptpbn
ang = geu.vecang(vptpbn, vptpan)
ang[~uleft] = geu.vecang(vptpan, vptpan)
def wedge(self, lpnt):
""" calculate wedge angle of a point
Parameters
----------
lpnt : list of int
list of point number
"""
#v1.1 aseg = map(lambda x: filter(lambda y: y not in
# self.name['AIR'],
# nx.neighbors(self.Gs, x)),
# lpnt)
aseg = map(lambda x: filter(lambda y: y not in
self.name['AIR'],
self.Gs[x]), lpnt)
pts = np.array(map(lambda x: self.seg2pts(
[x[0], x[1]]).reshape(4, 2), aseg))
#map(lambda x: pt ,pts)
N = np.shape(pts)[0]
sector = []
for k in range(N):
pt1 = pts[k, 0:2, 0]
ph1 = pts[k, 2:4, 0]
pt2 = pts[k, 0:2, 1]
ph2 = pts[k, 2:4, 1]
if (pt1 == pt2).all():
pa = ph1
pb = ph2
pt = pt1
ang = geu.sector(pa, pb, pt)
if (pt1 == ph2).all():
pa = ph1
pb = pt2
pt = pt1
ang = geu.sector(pa, pb, pt)
if (ph1 == pt2).all():
pa = pt1
pb = ph2
pt = ph1
ang = geu.sector(pa, pb, pt)
if (ph1 == ph2).all():
pa = pt1
pb = pt2
pt = ph1
ang = geu.sector(pa, pb, pt)
sector.append(ang)
return(sector)
def add_furniture(self, name='R1_C', matname='PARTITION', origin=(0., 0.),
zmin=0., height=0., width=0., length=0., angle=0.):
""" add piece of furniture
Parameters
----------
name : string
default = 'R1_C'
matname : string
default = 'PARTITION'
origin : tuple of floats
height : float
default = 0
width : float
default = 0
length : float
default = 0
angle : float
default = 0
"""
# compute the four points
p0 = origin
u = np.array([np.cos(angle * np.pi / 180),
np.sin(angle * np.pi / 180)])
v = np.array([-np.sin(angle * np.pi / 180),
np.cos(angle * np.pi / 180)])
p1 = p0 + u * length
p2 = p1 + v * width
p3 = p2 - u * length
# adding free nodes
n0 = self.add_fnod(p0)
n1 = self.add_fnod(p1)
n2 = self.add_fnod(p2)
n3 = self.add_fnod(p3)
# adding segments
self.add_segment(n0, n1, name=matname, z=(zmin, zmin + height))
self.add_segment(n1, n2, name=matname, z=(zmin, zmin + height))
self.add_segment(n2, n3, name=matname, z=(zmin, zmin + height))
self.add_segment(n3, n0, name=matname, z=(zmin, zmin + height))
def add_furniture_file(self, _filefur, typ=''):
""" add pieces of furniture from .ini files
Parameters
----------
_filefur : string
"""
filefur = pyu.getlong(_filefur, pro.pstruc['DIRFUR'])
config = ConfigParser.ConfigParser()
config.read(filefur)
furname = config.sections()
for fur in furname:
name = config.get(fur, "name")
matname = config.get(fur, "matname")
origin = tuple(ast.literal_eval(config.get(fur, "origin")))
height = config.getfloat(fur, "height")
width = config.getfloat(fur, "width")
length = config.getfloat(fur, "length")
angle = config.getfloat(fur, "angle")
thickness = config.getfloat(fur, "thickness")
# ~ if matname=='WOOD':
# ~ zmin = height
# ~ height=thickness
# ~ else:
# ~ zmin=0.0
# .. todo: be more generic relate to floor level
zmin = 0.0
if typ == '':
self.add_furniture(name, matname, origin,
zmin, height, width, length, angle)
else:
try:
self.add_furniture(name, matname, origin,
zmin, height, width, length, angle)
except:
raise NameError('No such furniture type - ' + typ + '-')
def del_points(self, lp):
""" delete points in list lp
Parameters
----------
lp : list
node list
"""
# test if array
if (type(lp) == np.ndarray):
ln = list(ln)
# test if list
if (type(lp) != list):
lp = [lp]
print("lp : ", lp)
# get segments involved in points list
ls = self.nd2seg(lp)
print("ls : ", ls)
# 1) delete involved segments
for k in ls:
assert(k > 0)
self.del_segment(k)
print('del ', k)
# 2) delete involved points
for n1 in lp:
assert(n1 < 0)
# v1.1 nbrs = self.Gs.neighbors(n1)
nbrs = self.Gs[n1].keys()
self.Gs.remove_node(n1)
del self.Gs.pos[n1]
self.labels.pop(n1)
self.Np = self.Np - 1
# 3) updating structures
self.g2npy()
def del_segment(self, le, verbose=True, g2npy=True):
""" delete segments in le
Parameters
----------
le : list of segments number
See Also
--------
pylayers.gis.layout.Layout.del_node
Notes
-----
100% of time is in g2npy
"""
if (type(le) == np.ndarray):
le = list(le)
if (type(le) != list):
le = [le]
for e in le:
assert(e > 0)
name = self.Gs.node[e]['name']
iso = self.Gs.node[e]['iso']
[self.Gs.node[i]['iso'].remove(e) for i in iso
if e in self.Gs.node[i]['iso']]
del self.Gs.pos[e] # delete edge position
self.Gs.remove_node(e)
self.labels.pop(e)
self.Ns = self.Ns - 1
# update slab name <-> edge number dictionnary
self.name[name].remove(e)
# delete iso if required
try:
# remove shapely seg
self._shseg.pop(e)
except:
pass
if g2npy:
self.g2npy()
def point_touches_seg(self,pt,lseg=[],segtol=1e-2,tahetol=1e-2):
""" determine if a point is touching a segment
Parameters
----------
pt : a point (2,)
seg : a list of segments to test.
if [] => all Gs segments are tested
segdtol : distance tolerance point to segment
tahetol : distance tolerance point to segment extremeties
=> a point on segment extremeties is considered
not touching the segseg
Return
------
ltseg : lsit of touched segments (by the point)
"""
if lseg == []:
lseg = self.Gs.nodes()
ltseg = []
allnodes = self.Gs.nodes()
for s in lseg :
if s > 0 and s in allnodes:
n0,n1 = self.Gs.node[s]['connect']
dta,dhe,h = geu.dptseg(np.array(pt)[:,None],
np.array(self.Gs.pos[n0])[:,None],
np.array(self.Gs.pos[n1])[:,None])
if (h <= segtol) and ((dta > tahetol) and (dhe > tahetol)):
ltseg.append(s)
return ltseg
def seg_intersection(self,**kwargs):
''' determine if a segment intersects any other segment of the layout
Parameters
----------
shLine : a shapely LineString
or
ta,he : tail/head of a segment
Returns
-------
llay_seg : list of layout's segments intersected
lshP : list of shapely points of intersections.
See Also
--------
editor.py
'''
if ('ta' in kwargs) and ('he' in kwargs):
seg = sh.LineString((kwargs['ta'],kwargs['he']))
elif 'shLine' in kwargs:
seg = kwargs['shLine']
# WARNING : use crosses instead of interesects
# otherwise 2 segment connected to a same node
# are considered as intersecting
binter = [seg.crosses(x) for x in list(self._shseg.values())]
if np.sum(binter) > 0:
uinter = np.where(binter)[0]
llay_seg = []
lshP = []
for k in uinter:
# layout segment
llay_seg.append(list(self._shseg.keys())[k])
lay_shseg = self._shseg[llay_seg[-1]]
# intersection shapely point
lshP.append(seg.intersection(lay_shseg))
return(llay_seg,lshP)
else:
return ([],[])
def mask(self):
""" returns the polygonal mask of the building
Returns
-------
mask : geu.Polygon
Notes
-----
This function assumes graph Gt has been generated
"""
if hasattr(self,Gt):
# takes the 1st cycle polygon
p = self.Gt.node[1]['polyg']
# get the exterior of the polygon
ps = sh.Polygon(p.exterior)
# make the union of the exterior of all the cycles
#
# cycle : -1 exterior
# 0 ??
#
for k in self.Gt.node:
if (k != 0) & (k != -1):
p = self.Gt.node[k]['polyg']
ps = ps.union(sh.Polygon(p.exterior))
mask = geu.Polygon(ps)
mask.setvnodes(self)
return(mask)
else:
print("Gt not built")
def translate(self, vec):
""" translate layout
Parameters
----------
loa vec :
"""
for k in self.Gs.pos:
pt = self.Gs.pos[k]
self.Gs.pos[k] = (pt[0] + vec[0], pt[1] + vec[1])
def rotate(self, angle=90):
""" rotate the layout
Parameters
----------
angle : float
(degrees)
"""
a = angle * np.pi / 180
for k in self.Gs.pos:
pt = self.Gs.pos[k]
ptr = np.dot(
array([[np.cos(a), -np.sin(a)], [np.sin(a), np.cos(a)]]), array(pt))
self.Gs.pos[k] = (ptr[0], ptr[1])
self.g2npy()
def check2(self):
""" Layout checking
Returns
-------
tseg ; list of segment shapely
"""
tseg = []
for k in list(self.Gs.node.keys()):
if k > 0:
#v1.1 lnp = self.Gs.neighbors(k)
lnp = list(self.Gs[k].keys())
p1 = self.Gs.pos[lnp[0]]
p2 = self.Gs.pos[lnp[1]]
tseg.append(sh.LineString([(p1[0], p1[1]), (p2[0], p2[1])]))
N = len(tseg)
for k in combinations(range(N), 2):
seg1 = tseg[k[0]]
seg2 = tseg[k[1]]
if seg1.crosses(seg2):
print("crosses :", k[0], k[1])
if seg1.contains(seg2):
print("contains :", k[0], k[1])
if seg2.contains(seg1):
print("contains :", k[0], k[1])
if seg1.overlaps(seg2):
print("overlaps :", k[0], k[1])
if seg2.overlaps(seg1):
print("overlaps :", k[0], k[1])
return(tseg)
def cleanup(self):
""" cleanup the Layout
Notes
-----
1. Remove nodes which are not connected
2. Remove supperimposed segments
"""
lk = list(self.Gs.node.keys())
for n in lk:
if ((n < 0) & (self.Gs.degree(n) == 0)):
self.Gs.remove_node(n)
del self.Gs.pos[n]
try:
self.Gv.remove_node(n)
except:
pass
self.Np = len(np.nonzero(np.array(list(self.Gs.node.keys())) < 0)[0])
aseg_conn=[]
for seg in self.Gs.nodes():
if seg >0:
n0,n1 = list(nx.neighbors(self.Gs,seg))
aseg_conn.append([seg,n0,n1])
aseg_conn = np.array(aseg_conn)
# aseg_conn=np.array([[list(nx.neighbors(self.Gs,x))] for x in self.Gs.nodes() if x >0])
uni,upos=np.unique(aseg_conn[:,1:],axis=0,return_index=True)
utbd = [x for x in range(len(aseg_conn)) if not x in upos]
tbd = aseg_conn[utbd,0]
for k in tbd:
self.del_segment(k)
self.g2npy()
def info_segment(self, s1):
""" information about segment
Parameters
----------
s1 : segment number
"""
# v1.1 nebd = self.Gs.neighbors(s1)
nebd = self.Gs[s1].keys()
n1 = nebd[0]
n2 = nebd[1]
#v1.1 nns1 = self.Gs.neighbors(n1)
#nns2 = self.Gs.neighbors(n2)
nns1 = self.Gs[n1].keys()
nns2 = self.Gs[n2].keys()
ds1 = self.Gs.node[s1]
print(n1, ' : ', nns1)
print(n2, ' : ', nns2)
print('------------')
print('Slab : ', ds1['name'])
print('zmin (m) : ', ds1['z'][0])
print('zmax (m) : ', ds1['z'][1])
try:
print('------------')
a = ds1['ss_name']
print('subseg Slabs : ', ds1['ss_name'])
print('subseg (zmin,zmax) (m) : ', ds1['ss_z'])
except:
pass
def edit_seg(self, e1, data={}):
""" edit segment
Parameters
----------
e1 : integer
edge number
data : dict
dictionnary of value of seg or subseg
Notes
-----
A segment has the following properties :
+ name : string
+ z : tuple
+ transition : boolean (default FALSE)
+ offset : [-1,1]
If a segment has subsegments attached the following properties are
added :
+ ss_name : list of string
+ ss_z : list of subsegment e.q. [(min height (meters),max height (meters))]
+ ss_offset : list of offset in [0,1]
"""
if data == {}:
pass
else:
ename = self.Gs.node[e1]['name']
# manage self.name
self.name[ename].pop(self.name[ename].index(e1))
# manage self.display['name']
if len(self.name[ename]) == 0:
try:
self.display['layers'].pop(
self.display['layers'].index(ename))
except:
pass
for k in data:
self.Gs.node[e1][k] = data[k]
if data['name'] in self.name:
self.name[data['name']].append(e1)
else:
self.name[data['name']]=[e1]
if data['name'] not in self.display['layers']:
self.display['layers'].append(data['name'])
return data
def have_subseg(self, e1):
""" check if edge e1 have subseg
Parameters
----------
e1 : int
Returns
-------
have_subseg_bool : boolean
"""
dk = self.Gs.node[e1]
if len(dk['iso'])>0:
return True
else:
return False
def find_edgelist(self, edgelist, nodelist):
"""
edgelist = find_edgelist(edgelist,nodelist)
edgelist : input edgelist
nodelist : input nodelist
return the subset of edgelist
Not Finished :
"""
tail = self.tahe[0, edgelist]
head = self.tahe[1, edgelist]
nt = np.intersect1d_nu[tail, nodelist]
nh = np.intersect1d_nu[head, nodelist]
edgelist = edgelist[np.unique(ed_t, ed_h)]
return(edgelist)
def diag(self, p1, p2, l, al1, al2, quadsel=0):
""" return edge list from a diagonal zone
Parameters
-----------
p1 : np.array
p2 : np.array
tol :
al1 :
al2 :
quadsel : 0 all quadrant
2 1
3 4
Returns
-------
edgelist
"""
x = self.pt[0, :]
y = self.pt[1, :]
#
# selection du quadran
#
if (quadsel == 0):
u0 = np.arange(self.Np)
if (quadsel == 1):
u0 = np.nonzero((y > p1[1]) & (x > p1[0]))[0]
if (quadsel == 2):
u0 = np.nonzero((y > p1[1]) & (x <= p1[0]))[0]
if (quadsel == 3):
u0 = np.nonzero((y <= p1[1]) & (x <= p1[0]))[0]
if (quadsel == 4):
u0 = np.nonzero((y <= p1[1]) & (x > p1[0]))[0]
x_u0 = x[u0]
y_u0 = y[u0]
#
# Permutation points
#
if (p1[0] > p2[0]):
pt = p2
p2 = p1
p1 = pt
#
# Box length
#
Dx = p2[0] - p1[0]
Dy = p2[1] - p1[1]
L = np.sqrt(Dx ** 2 + Dy ** 2)
#
# p1 p2
#
if ((abs(Dx) > finfo(float).eps) & (abs(Dy) > finfo(float).eps)):
a = Dy / Dx
b = p1[1] - a * p1[0]
b1 = p1[1] + p1[0] / a
b2 = p2[1] + p2[0] / a
delta_b = tol * L / abs(Dx)
delta_b1 = al1 * L * L / abs(Dy)
delta_b2 = al2 * L * L / abs(Dy)
u1 = np.nonzero(y_u0 < a * x_u0 + b + delta_b / 2.)[0]
x_u1 = x_u0[u1]
y_u1 = y_u0[u1]
u2 = np.nonzero(y_u1 > a * x_u1 + b - delta_b / 2.)[0]
x_u2 = x_u1[u2]
y_u2 = y_u1[u2]
if (a > 0):
u3 = np.nonzero(y_u2 > -x_u2 / a + b1 - delta_b1)[0]
x_u3 = x_u2[u3]
y_u3 = y_u2[u3]
u4 = np.nonzero(y_u3 < -x_u3 / a + b2 + delta_b2)[0]
else:
u3 = np.nonzero(y_u2 < -x_u2 / a + b1 + delta_b1)[0]
x_u3 = x_u2[u3]
y_u3 = y_u2[u3]
u4 = np.nonzero(y_u3 > -x_u3 / a + b2 - delta_b2)[0]
x_u4 = x_u3[u4]
y_u4 = y_u3[u4]
#
# p1 p2 vertical
#
if (abs(Dx) <= finfo(float).eps):
u1 = np.nonzero(x < p1[0] + tol / 2.)[0]
x_u1 = x[u1]
y_u1 = y[u1]
u2 = np.nonzero(x_u1 > p1[0] - tol / 2.)[0]
y_u2 = y[u2]
if (p1[1] > p2[1]):
u3 = np.nonzero(y_u2 < p1[1] + al1 * L)[0]
y_u3 = y[u3]
u4 = np.nonzero(y_u3 > p2[1] - al2 * L)[0]
else:
u3 = np.nonzero(y_u2 < p2[1] + al2 * L)[0]
y_u3 = y[u3]
u4 = np.nonzero(y_u3 > p1[1] - al1 * L)[0]
#
# p1 p2 horizontal
#
if (abs(Dy) <= finfo(float).eps):
u1 = np.nonzero(y < p1[1] + tol / 2.)[0]
y_u1 = y[u1]
u2 = np.nonzero(y_u1 > p1[1] - tol / 2.)[0]
x_u2 = x[u2]
if (p1(1) > p2(1)):
u3 = np.nonzero(x_u2 < p1[0] + al1 * L)[0]
x_u3 = x[u3]
u4 = np.nonzero(x_u3 > p2[0] - al2 * L)[0]
else:
u3 = np.nonzero(x_u2 < p2[0] + al2 * L)[0]
x_u3 = x[u3]
u4 = np.nonzero(x > p1[0] - al1 * L)[0]
nodelist = u0[u1[u2[u3[u4]]]]
edgelist = np.arange(self.Ns)
edgelist = self.find_edge_list(edgelist, nodelist)
return(edgelist)
def nd2seg(self, ndlist):
""" convert node list to edge list
Parameters
----------
ndlist : list or ndarray
node list
Returns
-------
seglist : ndarray
edge list
Notes
-----
previously nd2ed
"""
if isinstance(ndlist, np.ndarray):
ndlist = ndlist.tolist()
seglist = []
# for n in ndlist:
# seglist = seglist + self.Gs.adj[n].keys()
#l = map(lambda x: self.Gs.adj[x].keys(), ndlist)
l = [ list(dict(self.Gs.adj[x]).keys()) for x in ndlist ]
seglist = []
for y in l :
seglist = seglist + y
#reduce(lambda x, y: x + y, l)
return(np.unique(np.array(seglist)))
def ed2nd(self, edlist):
""" convert edgelist to nodelist
Parameters
----------
edlist : list or ndarray
edge list
Returns
-------
ndlist : ndarray
node list
"""
if isinstance(edlist, np.ndarray):
edlist = edlist.tolist()
# mecanisme de concatenation de listes
ndlist = []
for e in edlist:
ndlist = ndlist + self.Gs.adj[e].keys()
return(np.unique(ndlist))
def get_zone(self, ax):
""" get point list and segment list in a rectangular zone
Parameters
----------
ax : list ot tuple
[xmin,xmax,ymin,ymax]
Returns
-------
ptlist,seglist
"""
xmin = ax[0]
xmax = ax[1]
ymin = ax[2]
ymax = ax[3]
ptlist = []
for n in self.Gs.node.keys():
if n < 0:
x = self.Gs.pos[n][0]
y = self.Gs.pos[n][1]
if ((x > xmin) & (x < xmax) & (y > ymin) & (y < ymax)):
ptlist.append(n)
seglist = self.nd2seg(ptlist)
return ptlist, seglist
def get_points(self, boxorpol , tol = 0.05):
""" get points list and segments list in a polygonal zone
Parameters
----------
boxorpol : list or tuple
[xmin,xmax,ymin,ymax]
or shapely Polygon
Returns
-------
(pt,ke) : points coordinates and index
pt : (2xn)
ke : (,n)
Notes
-----
This method returns all the existing Layout points inside a box zone or
the boundary of a polygon
"""
if type(boxorpol) == geu.Polygon:
N = len(boxorpol.vnodes)/2
eax = boxorpol.bounds
xmin = eax[0] - tol
xmax = eax[2] + tol
ymin = eax[1] - tol
ymax = eax[3] + tol
else:
xmin = boxorpol[0]
xmax = boxorpol[1]
ymin = boxorpol[2]
ymax = boxorpol[3]
#
# layout points
#
x = self.pt[0,:]
y = self.pt[1,:]
uxmin = (x>= xmin)
uymin = (y>= ymin)
uxmax = (x<= xmax)
uymax = (y<= ymax)
#
# k True when all conditons are True simultaneously
#
k = np.where(uxmin*uymin*uxmax*uymax==1)[0]
#pt = np.array(zip(x[k],y[k])).T
# pt (2 x N )
pt = np.vstack((x[k],y[k]))
ke = self.upnt[k]
# if(pt.shape[1]<N):
# plt.ion()
# fig,a=self.showG('s')
# a.plot(pt[0,:],pt[1,:],'or')
# a.plot(eax[0],eax[1],'or')
# plt.show()
# ux = ((x>=xmin).all() and (x<=xmax).all())
# uy = ((y>=ymin).all() and (y<=ymax).all())
return((pt,ke))
def angleonlink3(self, p1=np.array([0, 0, 1]), p2=np.array([10, 3, 1])):
""" return (seglist,angle) between p1 and p2
Parameters
----------
p1 : np.array (3 x N) or (3,)
p2 : np.array (3 x N) or (3,)
Returns
-------
data : structured array x N
'i' : index
's' : slab
'a' : angle (in radians)
Examples
--------
>>> from pylayers.gis.layout import *
>>> L = Layout('DLR2.lay')
>>> p1 = np.array([0,0,1])
>>> p2 = np.array([10,3,2])
>>> data = L.angleonlink3(p1,p2)
#array([(0, 141, 1.2793395519256592), (0, 62, 0.29145678877830505),
(0, 65, 0.29145678877830505)],
dtype=[('i', '<i8'), ('s', '<i8'), ('a', '<f4')])
See Also
--------
antprop.loss.Losst
geomutil.intersect3
"""
sh1 = np.shape(p1)
sh2 = np.shape(p2)
assert sh1[0] == 3
assert sh2[0] == 3
if (len(sh1) < 2) & (len(sh2) > 1):
p1 = np.outer(p1, np.ones(sh2[1]))
if (len(sh2) < 2) & (len(sh1) > 1):
p2 = np.outer(p2, np.ones(sh1[1]))
if (len(sh2) < 2) & (len(sh1) < 2):
p1 = np.outer(p1, np.ones(1))
p2 = np.outer(p2, np.ones(1))
# 3 x N
u = p1 - p2
# 1 x N
nu = np.sqrt(np.sum(u * u, axis=0))
# 3 x N
un = u / nu[np.newaxis, :]
#
# warning : seglist contains the segment number in tahe not in Gs
#
seglist = np.unique(self.seginframe2(p1[0:2], p2[0:2])).astype(int)
#seglist = np.unique(self.seginframe(p1[0:2], p2[0:2]))
upos = np.nonzero(seglist >= 0)[0]
uneg = np.nonzero(seglist < 0)[0]
# nNLOS = len(uneg) + 1
# # retrieve the number of segments per link
# if nNLOS > 1:
# llink = np.hstack(
# (uneg[0], np.hstack((uneg[1:], array([len(seglist)]))) - uneg - 1))
# else:
# llink = np.array([len(seglist)])
# [(link id,number of seg),...]
# nl = zip(np.arange(nlink),llink)n
seglist = seglist[upos]
npta = self.tahe[0, seglist]
nphe = self.tahe[1, seglist]
Pta = self.pt[:, npta]
Phe = self.pt[:, nphe]
Nscreen = len(npta)
# get segment height bounds
zmin = np.array([self.Gs.node[x]['z'][0]
for x in self.tsg[seglist]])
zmax = np.array([self.Gs.node[x]['z'][1]
for x in self.tsg[seglist]])
# centroid of the screen
Pg = np.vstack(((Phe + Pta) / 2., (zmax + zmin) / 2.))
Ptahe = Phe - Pta
L1 = np.sqrt(np.sum(Ptahe * Ptahe, axis=0))
# 3 x Nscreen U1 is in plane xy
U1 = np.vstack((Ptahe / L1, np.zeros(Nscreen)))
L2 = zmax - zmin
U2 = np.array([0, 0, 1])[:, None] # 3 x 1 U2 is along z
#
# p1 : 3 x Ng
# p2 : 3 x Ng
# Pg : 3 x Nscreen
# U1 : 3 x Nscreen
# U2 : 3 x 1
# L1 : ,Nscreen
# L2 : ,Nscreen
bo, pt = geu.intersect3(p1, p2, Pg, U1, U2, L1, L2)
ubo = np.where(bo)
Nseg = len(ubo[0])
data = np.zeros(Nseg, dtype=[('i', 'i8'), ('s', 'i8'), ('a', np.float32)])
data['i'] = ubo[0]
data['s'] = self.tsg[seglist[ubo[1]]]
#
# Calculate angle of incidence refered from segment normal
#
norm = self.normal[:, seglist[ubo[1]]]
# vector along the link
uu = un[:, ubo[0]]
unn = abs(np.sum(uu * norm, axis=0))
angle = np.arccos(unn)
data['a'] = angle
return(data)
def angleonlink(self, p1=np.array([0, 0]), p2=np.array([10, 3])):
""" angleonlink(self,p1,p2) return (seglist,angle) between p1 and p2
Parameters
----------
p1 : np.array (2 x Np) or (2,)
p2 : np.array (2 x Np) or (2,)
Returns
-------
data['i']
data['s'] : list of segment number
data['a'] : angle (in radians) between segment and LOS axis
Examples
--------
>>> from pylayers.gis.layout import *
>>> L = Layout('DLR.lay')
>>> p1 = np.array([0,0])
>>> p2 = np.array([10,3])
>>> alpha = L.angleonlink(p1,p2)
#array([(0, 141, 1.2793395519256592), (0, 62, 0.29145678877830505),
(0, 65, 0.29145678877830505)],
dtype=[('i', '<i8'), ('s', '<i8'), ('a', '<f4')])
"""
sh1 = np.shape(p1)
sh2 = np.shape(p2)
assert sh1[0] == 2
assert sh2[0] == 2
if (len(sh1) < 2) & (len(sh2) > 1):
p1 = np.outer(p1, np.ones(sh2[1]))
if (len(sh2) < 2) & (len(sh1) > 1):
p2 = np.outer(p2, np.ones(sh1[1]))
if (len(sh2) < 2) & (len(sh1) < 2):
p1 = np.outer(p1, np.ones(1))
p2 = np.outer(p2, np.ones(1))
# 2 x N
u = p1 - p2
# 1 x N
nu = np.sqrt(np.sum(u * u, axis=0))
# 2 x N
un = u / nu[np.newaxis, :]
seglist = self.seginframe2(p1, p2)
#seglist = self.seginframe(p1, p2)
upos = np.nonzero(seglist >= 0)[0]
uneg = np.nonzero(seglist < 0)[0]
nNLOS = len(uneg) + 1
# retrieve the number of segments per link
if nNLOS > 1:
llink = np.hstack(
(uneg[0], np.hstack((uneg[1:], array([len(seglist)]))) - uneg - 1))
else:
llink = np.array([len(seglist)])
# llink : list of link length
npta = self.tahe[0, seglist[upos]]
nphe = self.tahe[1, seglist[upos]]
Pta = self.pt[:, npta]
Phe = self.pt[:, nphe]
#
# This part should possibly be improved
#
for i, nl in enumerate(llink):
try:
P1 = np.hstack((P1, np.outer(p1[:, i], np.ones(nl))))
P2 = np.hstack((P2, np.outer(p2[:, i], np.ones(nl))))
ilink = np.hstack(
(ilink, array([-1]), i * np.ones(nl, dtype='int')))
except:
P1 = np.outer(p1[:, i], np.ones(nl))
P2 = np.outer(p2[:, i], np.ones(nl))
ilink = i * np.ones(nl, dtype='int')
bo = geu.intersect(P1, P2, Pta, Phe)
upos_intersect = upos[bo]
seglist2 = seglist[upos_intersect]
idxlnk = ilink[upos_intersect]
#
# Calculate angle of incidence refered from segment normal
#
norm = self.normal[0:2, seglist2]
# vector along the linkco
uu = un[:,idxlnk]
unn = abs(np.sum(uu * norm, axis=0))
angle = np.arccos(unn)
# seglist = seglist+1
seglist = np.array([self.tsg[x] for x in seglist2])
data = np.zeros(len(seglist), dtype=[
('i', 'i8'), ('s', 'i8'), ('a', np.float32)])
#
# update subsegment in seglist
#
# self.lsss
data['i'] = idxlnk
data['s'] = seglist
data['a'] = angle
return data
def angleonlinkold(self, p1=np.array([0, 0]), p2=np.array([10, 3])):
""" angleonlink(self,p1,p2) returns seglist between p1 and p2
Parameters
----------
p1 : (1 x 2 )
[0,0]
p2 : (1 x 2 )
[10,3]
Returns
-------
seglist : list
list of segment number on the link
theta
Examples
--------
#>>> from pylayers.gis.layout import *
#>>> L = Layout('DLR.lay','matDB.ini','slabDB.ini')
#>>> p1 = np.array([0,0])
#>>> p2 = np.array([10,3])
#>>> L.angleonlinkold(p1,p2)
#(array([59, 62, 65]), array([ 1.27933953, 0.29145679, 0.29145679]))
"""
logger.warning('This function is deprecated use')
u = p1 - p2
nu = np.sqrt(np.dot(u, u))
un = u / nu
seglist = self.seginframe(p1, p2)
# new implementation of seginframe is faster
#
#seglist = self.seginframe2(p1, p2)
npta = self.tahe[0, seglist]
nphe = self.tahe[1, seglist]
Pta = self.pt[:, npta]
Phe = self.pt[:, nphe]
P1 = np.outer(p1, np.ones(len(seglist)))
P2 = np.outer(p2, np.ones(len(seglist)))
bo = geu.intersect(P1, P2, Pta, Phe)
seglist = seglist[bo]
#
# Calculate normal angle angle of incidence
#
tail = self.tahe[0, seglist]
head = self.tahe[1, seglist]
vn = np.vstack((self.pt[1, head] - self.pt[1, tail],
self.pt[0, head] - self.pt[0, tail]))
mvn = np.outer(np.ones(2), np.sqrt(np.sum(vn * vn, axis=0)))
n = vn / mvn
uu = np.outer(un, np.ones(len(seglist)))
unn = abs(np.sum(uu * n, axis=0))
theta = np.arccos(unn)
# printvn
# printmvn
# print'n :',n
# print'un : ',unn
# print'theta (deg)',the*180./pi
# seglist = seglist+1
seglist = np.array([self.tsg[x] for x in seglist])
return(seglist, theta)
def layeronlink(self, p1, p2):
"""
layeronlink(self,p1,p2) return seglist between p1 and p2
p1 : (1 x 2 )
p2 : (1 x 2 )
"""
seglist = self.seginframe(p1, p2)
npta = self.tahe[0, seglist]
nphe = self.tahe[1, seglist]
Pta = self.pt[:, npta]
Phe = self.pt[:, nphe]
P1 = np.outer(p1, np.ones(len(seglist)))
P2 = np.outer(p2, np.ones(len(seglist)))
bool = np.intersect(P1, P2, Pta, Phe)
seglist = seglist[bool]
return seglist
def seguv(self, iseg):
""" returns unitary vector along segments
Parameters
----------
iseg : np.array
index of segments
Examples
--------
>>> from pylayers.gis.layout import *
>>> L = Layout('DLR.lay')
>>> idx = np.array([1,2,3,17])
>>> v1 = L.seguv(idx)
>>> idx = np.array([1])
>>> v2= L.seguv(idx)
"""
# idx : npt
idx = self.tgs[iseg]
# tahe : 2 x npt
tahe = self.tahe[:, idx]
if len(iseg) > 1:
ta = tahe[0, :]
he = tahe[1, :]
else:
ta = tahe[0]
he = tahe[1]
pta = self.pt[:, ta]
phe = self.pt[:, he]
# v : 2 x npt
v = pta - phe
# mv : npt
mv = np.sqrt(np.sum(v * v, axis=0))
# vn : 2 x npt
if len(idx) > 1:
vn = v / mv[np.newaxis, :]
else:
vn = (v / mv).reshape(2)
return(vn)
def seg2pts(self, aseg):
""" convert segments array from Gs numerotation
to corresponding termination points array in pt
Parameters
----------
aseg : np.array (,Ns) or int for single value:w
array of segment number (>0)
Returns
-------
pth : np.array (4 x Ns)
pth is a vstacking of tail point (2,Ns) and head point (2,Ns)
Examples
--------
>>> from pylayers.gis.layout import *
>>> import numpy as np
>>> L = Layout('defstr.lay')
>>> aseg = np.array([1,3,6])
>>> pt = L.seg2pts(aseg)
OBSOLETE : Use self.s2pc instead
"""
if not isinstance(aseg, np.ndarray):
aseg = np.array([aseg])
assert(len(np.where(aseg < 0)[0]) == 0)
utahe = self.tgs[aseg]
if (utahe>=0).all():
tahe = self.tahe[:, utahe]
ptail = self.pt[:, tahe[0, :]]
phead = self.pt[:, tahe[1, :]]
pth = np.vstack((ptail, phead))
pth = pth.reshape(pth.shape[0], pth.shape[-1])
return pth
else:
pdb.set_trace()
def segpt(self, ptlist=np.array([0])):
""" return the seg list of a sequence of point number
Parameters
----------
ptlist array(1xNp)
point number array
Returns
-------
seglist
array seglist associated with ptlist
Examples
--------
>>> from pylayers.gis.layout import *
>>> L = Layout('TA-Office.lay')
>>> ptlist = np.array([0,1])
>>> seg = L.segpt(ptlist)
Notes
-----
"""
seglist = np.array([], dtype=int)
for i in ptlist:
ut = np.nonzero(self.tahe[0, :] == i)[0]
uv = np.nonzero(self.tahe[1, :] == i)[0]
seglist = np.hstack((seglist, ut, uv))
seglist = np.unique(seglist)
return(seglist)
def extrseg(self):
""" calculate extremum of segments
Notes
-----
update the following members
`min_sx`
`max_sx`
`min_sy`
`max_sy`
Used in seginframe
"""
# 2 x Np
pt = self.pt
# tahe 2 x Nseg
#th = zip(self.tahe[0, :], self.tahe[1, :])
ta = self.tahe[0,:]
he = self.tahe[1,:]
self.max_sx = np.maximum(pt[0,ta],pt[0,he])
self.min_sx = np.minimum(pt[0,ta],pt[0,he])
self.max_sy = np.maximum(pt[1,ta],pt[1,he])
self.min_sy = np.minimum(pt[1,ta],pt[1,he])
#self.max_sx = np.array([ np.maximum(pt[0, x[0]], pt[0, x[1]]) for x in th ])
#self.min_sx = np.array([ np.minimum(pt[0, x[0]], pt[0, x[1]]) for x in th ])
#self.max_sy = np.array([ np.maximum(pt[1, x[0]], pt[1, x[1]]) for x in th ])
#self.min_sy = np.array([ np.minnimum(pt[1, x[0]], pt[1, x[1]]) for x in th ])
def seginframe2(self, p1, p2):
""" returns the seg list of a given zone defined by two points
(vectorised version)
Parameters
----------
p1 array (2 x N)
array of N 2D points
p2 array (2 x N)
array of N 2D points
Returns
-------
seglist
list of segment number inside a planar region defined by p1 an p2
Examples
--------
.. plot::
:include-source:
>>> from pylayers.gis.layout import *
>>> L = Layout('TA-Office.lay')
>>> p1 = np.array([[0,0,0],[0,0,0]])
>>> p2 = np.array([[10,10,10],[10,10,10]])
>>> seglist = L.seginframe2(p1,p2)
>>> edlist = [ L.tsg[x] for x in seglist ]
>>> fig,ax = L.showG('s',edlist=edlist)
"""
sh1 = np.shape(p1)
sh2 = np.shape(p2)
assert sh1[0] == 2
assert sh2[0] == 2
if (len(sh1) < 2) & (len(sh2) > 1):
p1 = np.outer(p1, np.ones(sh2[1]))
if (len(sh2) < 2) & (len(sh1) > 1):
p2 = np.outer(p2, np.ones(sh1[1]))
if (len(sh2) < 2) & (len(sh1) < 2):
p1 = np.outer(p1, np.ones(1))
p2 = np.outer(p2, np.ones(1))
# clipping conditions to keep segment
#
# max_sx > min_x
# min_sx < max_x
# max_sy > min_y
# min_sy < max_y
# N x 1
#max_x = [ max(x[1], x[0]) for x in zip(p1[0, :], p2[0, :]) ]
#min_x = [ min(x[1], x[0]) for x in zip(p1[0, :], p2[0, :]) ]
#max_y = [ max(x[1], x[0]) for x in zip(p1[1, :], p2[1, :]) ]
#min_y = [ min(x[1], x[0]) for x in zip(p1[1, :], p2[1, :]) ]
max_x = np.maximum(p1[0,:],p2[0,:])
min_x = np.minimum(p1[0,:],p2[0,:])
max_y = np.maximum(p1[1,:],p2[1,:])
min_y = np.minimum(p1[1,:],p2[1,:])
seglist = [ np.nonzero((self.max_sx > x[0]) &
(self.min_sx < x[1]) &
(self.max_sy > x[2]) &
(self.min_sy < x[3]))[0]
for x in zip(min_x, max_x, min_y, max_y) ]
# np.array stacking
# -1 acts as a deliminiter (not as a segment number)
# seglist = reduce(lambda x, y: np.hstack((x, array([-1]), y)), seglist)
x = np.array([]).astype(int)
for y in seglist:
x = np.hstack((x, np.array([-1]), y))
return(x)
def seginframe(self, p1, p2):
""" return the seg list of a given zone defined by two points
Parameters
----------
p1
array (1 x 2)
p2
array (1 x 2)
Returns
-------
seglist
list of segment number inside a planar region defined by p1 an p2
Examples
--------
>>> from pylayers.gis.layout import *
>>> L = Layout('TA-Office.lay')
>>> p1 = np.array([0,0])
>>> p2 = np.array([10,10])
>>> L.seginframe(p1,p2)
array([ 1, 3, 7, 8, 14, 15, 16, 17, 18, 20, 21, 23, 24, 26, 27, 29, 30,
32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 44, 46, 47, 52, 53, 54,
55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71,
72, 73, 74, 75, 76, 77, 78, 81, 82, 85, 86])
"""
#assert( (p1.shape==(1,2)) or (p1.shape==(2)))
#assert( (p2.shape==(1,2)) or (p2.shape==(2)))
pdb.set_trace()
max_x = max(p1[0], p2[0])
min_x = min(p1[0], p2[0])
max_y = max(p1[1], p2[1])
min_y = min(p1[1], p2[1])
Dx = max_x - min_x
Dy = max_y - min_y
if Dx < 0.5:
max_x = max_x + 0.5
min_x = min_x - 0.5
if Dy < 0.5:
max_y = max_y + 0.5
min_y = min_y - 0.5
if (Dy < Dx):
up = np.nonzero((self.pt[0, :] < max_x) &
(self.pt[0, :] > min_x))[0]
else:
up = np.nonzero((self.pt[1, :] < max_y) &
(self.pt[1, :] > min_y))[0]
seglist = self.segpt(up)
return(seglist)
def layerongrid(self, grid, Tx):
""" grid Nx,Ny,2
Tx 1x2
.. todo:: layeron grid Not finished
"""
Nx = grid.shape[0]
Ny = grid.shape[1]
for ix in range(Nx):
for iy in range(Ny):
p = grid[ix, iy, :]
seglist, theta = self.layeronlink(p, Tx)
def cycleinline(self, c1, c2):
""" returns the intersection between a given line and all segments
Parameters
----------
c1 : int
point
c2 : int
point
Returns
-------
I : numpy.ndarray
See Also
--------
pylayers.antprop.signature.Signatures.rays
pylayers.gis.layout.Layout.seginframe2
Notes
-----
This function is used to detect LOS conditions
"""
I = np.array([]).reshape(3, 0)
# polygon cycle 1
poly1 = self.Gt.node[c1]['polyg']
p1t = poly1.centroid.xy
# polygon cycle 2
poly2 = self.Gt.node[c2]['polyg']
p2t = poly2.centroid.xy
# centroid of cycle 1 and 2
p1 = np.array([p1t[0][0], p1t[1][0]])
p2 = np.array([p2t[0][0], p2t[1][0]])
line = sh.LineString((p1, p2))
# els = self.seginframe(p1,p2)
# new implementation of seginframe is faster
els = self.seginframe2(p1, p2)
elg = self.tsg[els]
lc = []
ls = []
I = np.array([]).reshape(2, 0)
for seg in elg:
#v1.1 ta, he = self.Gs.neighbors(seg)
ta, he = self.Gs[seg]
pa = np.array(self.Gs.pos[ta])
pb = np.array(self.Gs.pos[he])
segline = sh.LineString((pa, pb))
if line.intersects(segline):
lc.extend(self.Gs.node[seg]['ncycles'])
# printseg,self.Gs.node[seg]['ncycles']
ls.append(seg)
psh = line.intersection(segline)
I = np.hstack((I, np.array([[psh.x], [psh.y]])))
v = (I - p1[:, np.newaxis])
dv = np.sum(v * v, axis=0)
u = np.argsort(dv)
lss = np.array(ls)[u]
lc = [c1]
for s in lss:
cy1, cy2 = self.Gs.node[s]['ncycles']
if cy1 not in lc:
lc.append(cy1)
elif cy2 not in lc:
lc.append(cy2)
else:
assert NameError('Bad transisiton in Layout.cycleinline')
return lc
def seginline(self, p1, p2):
""" returns the intersection between a given line and all segments
Parameters
----------
p1 : numpy.ndarray
p2 : numpy.ndarray
Returns
-------
I : numpy.ndarray
"""
I = np.array([]).reshape(3, 0)
line = sh.LineString((p1, p2))
for seg in self.Gs.nodes():
if seg > 0:
# v1.1 ta, he = self.Gs.neighbors(seg)
ta, he = self.Gs[seg]
pa = np.array(self.Gs.pos[ta])
pb = np.array(self.Gs.pos[he])
else:
pa = np.array(self.Gs.pos[seg])
pb = pa
segline = sh.LineString((pa, pb))
if line.intersects(segline):
psh = line.intersection(segline)
liseg = np.array([[psh.x], [psh.y]])
I = np.hstack((I, np.vstack(([[seg]], liseg))))
return I
def visilist(self, p):
""" returns the list of nodes which are visible from point p
Parameters
----------
p
np.array point
Returns
-------
Notes
-----
AAS = [0:2pi]
While (AAS != void set)
1) Find segment ns either
i) the closest segment from p in AAS
ii) neighbor of prec(ns)
2) Find the edgelist visible from ns
edgelist = vedgelist(ns)
3) Check_occultation(p,ns,edgelist)
Occultation 8 situations
[p1,pM,p2] = [T,T,T] : fully occulted
[ ] partially visible
[F,F,F] : fully visible
4) Update Allowed Angular Sector (AAS)
"""
AAS = Intvl([0, 2 * pi])
nsprev = np.inf
edgelist = np.array([])
while AAS.measure() != 0:
if nsprev == np.inf:
ns = self.closest(p, AAS)
else:
ns = self.neighbors(nsprev)
edgelist = self.vedgelist(ns)
[b1, bM, b2] = self.check - occultation(p, ns, edgelist)
AAS = self.update(AAS,)
def closest_edge(self, p, AAS):
""" not implemented
Parameters
----------
This function return the closest segment from p which belong to
the AAS (Allowed Angular Sector)
[ns] = closest_edge(self,p,AAS)
"""
pass
# not implemented
def visi_papb(self, pa, pb, edgelist=np.array([])):
"""
visi_papb : determine if pa and pb are in visibility for the structure graph
visi_papb(pa,pb,edgelist)
pa : 1x2
pb : 1x2
edgelist : exclusion edge list
"""
#
# .. todo: avoid utilisation tahe
#
x = self.pt[0, :]
y = self.pt[1, :]
ta = self.tahe[0, :]
he = self.tahe[1, :]
x1 = x[ta]
y1 = y[ta]
x2 = x[he]
y2 = y[he]
den = (pb[1] - pa[1]) * (x2 - x1) - (pb[0] - pa[0]) * (y2 - y1)
w = np.nonzero(abs(den) < 1e-12)[0]
den[w] = 1e-12
numa = (pb[0] - pa[0]) * (y1 - pa[1]) - (pb[1] - pa[1]) * \
(x1 - pa[0])
numb = (x2 - x1) * (y1 - pa[1]) - (y2 - y1) * (x1 - pa[0])
ua = numa / den
ub = numb / den
#ua[edgelist] = 1000
u = np.nonzero((ua >= 0) & (ua <= 1) & (ub >= 0) & (ub <= 1))[0]
# Si le segment de droite pa-pb intercepte des paroies de la structure
if (u != []):
visi = 0
else:
visi = 1
return(visi)
def show_nodes(self, ndlist=[1e8], size=10, color='b', dlabels=False, font_size=15, alpha=1, node_shape='o', fig=[], ax=[]):
""" show nodes
Parameters
----------
ndlist
size : int
default 10
color : 'b'
dlabels : Boolean
False
font_size : int
15
alpha : float
transparancy
"""
if fig == []:
fig = plt.figure()
if ax == []:
ax = fig.add_subplot(111)
if type(ndlist) == np.ndarray:
ndlist = list(ndlist)
if len(ndlist) == 0:
# ndlist.append(1e8)
dlabels = False
elif ndlist[0] == 1e8:
ndlist = self.Gs.node.keys()
# elif ndlist[0]==1e8:
# ndlist = self.Gs.node.keys()
# printndlist
Z = nx.draw_networkx_nodes(self.Gs, self.Gs.pos, node_color=color,
node_size=size, nodelist=ndlist, alpha=alpha,
node_shape=node_shape, fig=fig, ax=ax)
try:
fig = Z.figure
ax = Z.axes
except:
pass
if dlabels:
dicopos = {}
dicolab = {}
for n in ndlist:
dicopos[n] = np.array(self.Gs.pos[n])
dicolab[n] = self.labels[n]
Z = nx.draw_networkx_labels(self.Gs, dicopos, dicolab,
font_size=font_size, font_color=color, fig=fig, ax=ax)
try:
fig = Z.figure
ax = Z.axes
except:
pass
return fig, ax
def show_seg1(self, edlist=[], alpha=1, width=1, size=2, color='black', font_size=15, dlabels=False):
""" show segment
Parameters
----------
edlist
alpha
width
size
color
font_size
dlabels
"""
if type(edlist) == 'ndarray':
edlist = edlist.tolist()
elif type(edlist) == int:
edlist = [edlist]
# printndlist
nx.draw_networkx_nodes(
self.Gs, self.Gs.pos, node_size=size, nodelist=edlist)
if dlabels:
dicopos = {}
dicolab = {}
for n in ndlist:
# dicopos[n]=tuple(np.array(self.Gs.pos[n])+np.array((0.8,0.2)))
dicopos[n] = np.array(self.Gs.pos[n])
dicolab[n] = self.labels[n]
nx.draw_networkx_labels(
self.Gs, dicopos, dicolab, font_size=font_size)
def show_segment(self, **kwargs):
""" show segment
Parameters
----------
edlist : list
segment list
alpha : float
transparency 0< alpha < 1
width : float
line width (default 1)
color : string
default 'black'
dnodes : boolean
display nodes ( Default False)
dlabels : boolean
display labels ( Default False)
font_size : int
Default 15
"""
defaults = {'fig': [],
'ax': [],
'edlist': [],
'alpha': 1,
'width': 1,
'color': 'black',
'dnodes': False,
'dlabels': False,
'font_size': 15,
'node_shape': 'o'
}
for key, value in defaults.items():
if key not in kwargs:
kwargs[key] = value
if kwargs['fig'] == []:
fig = plt.figure()
else:
fig = kwargs['fig']
if kwargs['ax'] == []:
ax = fig.add_subplot(111)
else:
ax = kwargs['ax']
clrlist = []
cold = pyu.coldict()
# html color or string
if kwargs['color'][0] != '#':
clrlist.append(cold[kwargs['color']])
else:
if color == '#FFFFF0':
color = '#00000F'
clrlist.append(color)
ecmap = clr.ListedColormap(clrlist)
U = self.Gs.edges(kwargs['edlist'])
# ue = (np.ones(2 * len(kwargs['edlist']))).astype('int').tolist()
ue = np.ones(len(U),dtype='int').tolist()
if len(U) > 0:
Z = nx.draw_networkx_edges(self.Gs, self.Gs.pos, edgelist=U,
edge_color=ue, edge_cmap=ecmap,
alpha=kwargs['alpha'], width=kwargs['width'], fig=fig, ax=ax)
try:
fig = Z.figure
ax = Z.axes
except:
pass
if kwargs['dlabels']:
# printedlist
# nodelist = self.ed2nd(edlist)
fig, ax = self.show_nodes(ndlist=kwargs['edlist'], dlabels=kwargs['dlabels'],
color='b', font_size=kwargs['font_size'],
node_shape=kwargs['node_shape'], fig=fig, ax=ax)
if kwargs['dnodes']:
fig, ax = self.show_nodes(
ndlist=kwargs['edlist'], color='b', fig=fig, ax=ax)
return fig, ax
def show_layer(self, name, edlist=[], alpha=1, width=0,
color='black', dnodes=False, dthin=False,
dlabels=False, font_size=15, fGHz=[], fig=[], ax=[]):
""" show layer
Parameters
----------
name :
edlist : []
alpha : float
transparency
width : int
if width = 0 width depends on slab property
color : string
default black'
dnodes :
display nodes (False )
dthin :
display thin ( False )
dlabels :
display labels ( False )
font_size
"""
if fig == []:
fig = plt.figure()
if ax == []:
ax = fig.add_subplot(111)
if edlist == []:
edlist = self.name[name]
else:
# intersect layer edge list with local zone edge list (in function
# argument)
a1 = np.array(self.name[name])
a2 = np.array(edlist)
edlist = list(np.intersect1d(a1, a2))
if self.display['thin']:
fig, ax = self.show_segment(edlist=edlist,
alpha=1,
width=1,
color=color,
dlabels=dlabels,
font_size=font_size, fig=fig, ax=ax)
else:
slab = self.sl[name]
if width == 0:
linewidth = slab['linewidth'] / 3.
else:
linewidth = width
if fGHz == []:
color = slab['color']
else:
if (name != 'METAL') & (name != 'METALIC'):
color = slab.tocolor
else:
color = 'black'
fig, ax = self.show_segment(edlist=edlist, alpha=1,
width=linewidth, color=color, dnodes=dnodes,
dlabels=dlabels, font_size=font_size, fig=fig, ax=ax)
return fig, ax
def _showGi(self, **kwargs):
""" show graph of interactions Gi
Parameters
----------
seed : float
alpha : float
transparency
sig : list of signatures (isequence of Gi nodes format)
cycles : list
[cystart,cyend]
ninter : int
interaction index
inter : tuple
interaction tuple
See Also
--------
Signatures.siginter
"""
defaults = {'seed':1,
'alpha':0.4,
'sig':[],
'cycles':[],
'ninter':0,
'node_size':30,
'fontsize':18,
'labels':False,
'inter':[]}
for k in defaults:
if k not in kwargs:
kwargs[k]=defaults[k]
edges = self.Gi.edges()
cy = kwargs['cycles']
if cy!=[]:
pstart = self.Gt.pos[cy[0]]
pstop = self.Gt.pos[cy[1]]
if kwargs['sig']!=[]:
lsig = kwargs['sig']
edgelist = []
startlist = []
stoplist = []
phe_start = np.array([])
phe_stop = np.array([])
phe_start.shape = (2,0)
phe_stop.shape = (2,0)
for sig in lsig:
edgelist = edgelist + list(zip(sig[0:-1],sig[1:]))
if cy!=[]:
p1 = np.array(self.Gi.pos[sig[0]])[:,None]
p2 = np.array(self.Gi.pos[sig[-1]])[:,None]
phe_start=np.hstack((phe_start,p1))
phe_stop=np.hstack((phe_stop,p2))
elif kwargs['inter']!=[]:
edinter = kwargs['inter']
outlist = self.Gi[edinter[0]][edinter[1]]['output']
outprob = outlist.values()
edgelist = [(edinter[1],x) for x in outlist]
dprob = dict(zip(edgelist,[str(x) for x in outprob]))
elif kwargs['ninter']!=[]:
edinter = [ e for e in edges][kwargs['ninter']]
outlist = self.Gi[edinter[0]][edinter[1]]['output']
outprob = outlist.values()
edgelist = [(edinter[1],x) for x in outlist]
dprob = dict(zip(edgelist,[str(x) for x in outprob]))
else:
pass
ns = kwargs['node_size']
np.random.seed(kwargs['seed'])
fig = plt.figure(figsize=(20,10))
ax1 = plt.subplot(121)
pos = nx.spring_layout(self.Gi)
nx.draw_networkx_nodes(self.Gi,pos,nodelist=[x for x in self.Gi.nodes() if len(x)==1],
node_color='r',node_size=ns,ax=ax1,alpha=kwargs['alpha'])
nx.draw_networkx_nodes(self.Gi,pos,nodelist=[x for x in self.Gi.nodes() if len(x)==2],
node_color='b',node_size=ns,ax=ax1,alpha=kwargs['alpha'])
nx.draw_networkx_nodes(self.Gi,pos,nodelist=[x for x in self.Gi.nodes() if len(x)==3],
node_color='g',node_size=ns,ax=ax1,alpha=kwargs['alpha'])
nx.draw_networkx_edges(self.Gi,pos,edgelist=self.Gi.edges(),width=.1,edge_color='k',arrow=False,ax=ax1)
if (kwargs['sig']==[]):
nx.draw_networkx_edges(self.Gi,pos,edgelist=[edinter],width=2,edge_color='g',arrow=False,ax=ax1)
nx.draw_networkx_edges(self.Gi,pos,edgelist=edgelist,width=2,edge_color='r',arrow=False,ax=ax1)
ax2 = plt.subplot(122)
fig,ax2 = self.showG('s',aw=1,ax=ax2)
nx.draw_networkx_nodes(self.Gi,self.Gi.pos,nodelist=[x for x in self.Gi.nodes() if len(x)==1],
node_color='r',node_size=ns,ax=ax2,alpha=kwargs['alpha'])
nx.draw_networkx_nodes(self.Gi,self.Gi.pos,nodelist=[x for x in self.Gi.nodes() if len(x)==2],
node_color='b',node_size=ns,ax=ax2,alpha=kwargs['alpha'])
nx.draw_networkx_nodes(self.Gi,self.Gi.pos,nodelist=[x for x in self.Gi.nodes() if len(x)==3],
node_color='g',node_size=ns,ax=ax2,alpha=kwargs['alpha'])
nx.draw_networkx_edges(self.Gi,self.Gi.pos,edgelist=self.Gi.edges(),width=.1,edge_color='k',arrow=False,ax=ax2)
if kwargs['labels']:
nx.draw_networkx_labels(self.Gi,self.Gi.pos,labels=[str(x) for x in self.Gi.nodes()],ax=ax2,fontsize=kwargs['fontsize'])
if (kwargs['sig']==[]):
nx.draw_networkx_edges(self.Gi,self.Gi.pos,edgelist=[edinter],width=2,edge_color='g',arrow=False,ax=ax2)
nx.draw_networkx_edges(self.Gi,self.Gi.pos,edgelist=edgelist,width=2,edge_color='r',arrow=False,ax=ax2)
if (kwargs['sig']==[]):
nx.draw_networkx_edge_labels(self.Gi,self.Gi.pos,edge_labels=dprob,ax=ax2,fontsize=kwargs['fontsize'])
if cy!=[]:
ptstart = pstart[:,None]*np.ones(phe_start.shape[1])[None,:]
ptstop = pstop[:,None]*np.ones(phe_start.shape[1])[None,:]
plu.displot(ptstart,phe_start,ax=ax2,arrow=True)
plu.displot(phe_stop,ptstop,ax=ax2,arrow=True)
# interactions corresponding to edge en
# int0, int1 = self.Gi.edges()[kwargs['en']]
#
# print("int0 : ", int0)
# print("int1 : ", int1)
#
# # if interaction is tuple (R or T)
# if ((len(int0) > 1) & (len(int1) > 1)):
# nstr0 = int0[0]
# nstr1 = int1[0]
# e01 = self.Gi.edge[int0][int1]
# lseg = []
# if e01.has_key('output'):
# output = e01['output']
# print(" output ", output)
# ltup = filter(lambda x: type(x) == tuple, output.keys())
# lref = filter(lambda x: len(x) == 2, ltup)
# ltran = filter(lambda x: len(x) == 3, ltup)
# lseg = np.unique(np.array(map(lambda x: x[0], output.keys())))
# probR = np.array(map(lambda x: output[x], lref))
# segR = np.array(map(lambda x: x[0], lref))
# probT = np.array(map(lambda x: output[x], ltran))
# segT = np.array(map(lambda x: x[0], lref))
# dprobR = dict(zip(segR, probR))
# dprobT = dict(zip(segT, probT))
# # print" Sum pR : ",sum(dprobR.values())
# # print" Sum pT : ",sum(dprobT.values())
# # print"lseg", lseg
# # termination points from seg0 and seg1
# pseg0 = self.s2pc[nstr0].toarray().reshape(2, 2).T
# pseg1 = self.s2pc[nstr1].toarray().reshape(2, 2).T
# #
# # create the cone seg0 seg1
# #
# cn = cone.Cone()
# cn.from2segs(pseg0, pseg1)
# # show cone
# # show Gt
# self.display['thin'] = True
# self.display['subseg'] = False
# fig, ax = self.showG('s',aw=1,labels=True)
# fig, ax = cn.show(fig=fig, ax=ax)
# for nse in lseg:
# ta, he = self.Gs.neighbors(nse)
# pta = np.array(self.Gs.pos[ta])
# phe = np.array(self.Gs.pos[he])
#
# try:
# pR = dprobR[nse]
# except:
# pR = 0
#
# try:
# pT = dprobT[nse]
# except:
# pT = 0
#
# alpha = (pR + pT) / 2.
# segment = ax.plot([pta[0], phe[0]],
# [pta[1], phe[1]],
# 'g', linewidth=7, visible=True, alpha=alpha)
#
return(fig, ax1)
def _showGt(self, ax=[], roomlist=[], mode='indoor'):
""" show topological graph Gt
Parameters
-----------
ax : matlplotlib axes
roomlist : list
list of room numbers
mode : string
'indoor','open','area','start'
"""
if not isinstance(ax, plt.Axes):
fig = plt.gcf()
ax = fig.gca()
G = self.Gt
for k, nc in enumerate(G.node.keys()):
if nc!=0:
poly = G.node[nc]['polyg']
a = poly.signedarea()
if mode == 'area':
if a < 0:
poly.plot(color='red', alpha=0.5, fig=fig, ax=ax)
else:
poly.plot(color='green', alpha=0.5, fig=fig, ax=ax)
if mode == 'start':
if poly.vnodes[0] < 0:
poly.plot(color='blue', alpha=0.5, fig=fig, ax=ax)
else:
poly.plot(color='yellow', alpha=0.5, fig=fig, ax=ax)
if mode == 'indoor':
if G.node[nc]['indoor']:
poly.plot(color='green', alpha=0.5, fig=fig, ax=ax)
else:
poly.plot(color='blue', alpha=0.5, fig=fig, ax=ax)
if mode == 'open':
if G.node[nc]['isopen']:
poly.plot(color='green', alpha=0.5, fig=fig, ax=ax)
# else:
# poly.plot(color='blue', alpha=0.5,fig=fig,ax=ax)
ax.axis('scaled')
def showGs(self, **kwargs):
""" show structure graph Gs
Parameters
----------
ndlist : np.array
set of nodes to be displayed
edlist : np.array
set of edges to be displayed
roomlist : list
default : []
axis :
width : int
2
fGHz : float
show : boolean
default True
furniture : boolean
default False
display parameters are defined in display dictionnary
Returns
-------
ax
See Also
--------
pylayers.gis.layout.showG
"""
defaults = {'ndlist': [],
'edlist': [],
'roomlist': [],
'axis': [],
'width': 2,
'fGHz': [],
'show': False,
'furniture': False,
}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
args = {}
for k in kwargs:
if k not in defaults:
args[k] = kwargs[k]
if 'fig' not in kwargs:
fig = plt.figure()
else:
fig = kwargs['fig']
if 'ax' not in kwargs:
ax = fig.add_subplot(111)
else:
ax = kwargs['ax']
if self.display['clear']:
ax.cla()
# display overlay image
if self.display['overlay']:
# imok : Image is OK
imok = False
if len(self.display['overlay_file'].split('http:')) > 1:
#img_file = urllib.urlopen(self.display['overlay_file'])
img_file = urlopen(self.display['overlay_file'])
#im = StringIO(img_file.read())
image = Image.open(im)
imok = True
else:
if self.display['overlay_file'] != '':
image = Image.open(os.path.join(
pro.basename, pro.pstruc['DIRIMAGE'], self.display['overlay_file']))
imok = True
if imok:
if 'v' in self.display['overlay_flip']:
image = image.transpose(Image.FLIP_LEFT_RIGHT)
if 'h' in self.display['overlay_flip']:
image = image.transpose(Image.FLIP_TOP_BOTTOM)
ax.imshow(image, extent=self.display[
'overlay_axis'], alpha=self.display['alpha'], origin='lower')
if kwargs['ndlist'] == []:
tn = np.array(list(self.Gs.node.keys()))
u = np.nonzero(tn < 0)[0]
ndlist = tn[u]
if kwargs['edlist'] == []:
tn = self.Gs.node.keys()
#u = np.nonzero(tn > 0)[0]
#edlist = tn[u]
edlist = filter(lambda x: (x > 0), tn)
#& (not self.Gs.node[x].has_key('ss_name')),tn)
else:
edlist = kwargs['edlist']
if self.display['nodes']:
dlabels = self.display['ndlabel']
fig, ax = self.show_nodes(
ndlist, size=30, color='k', dlabels=dlabels, node_shape='s', fig=fig, ax=ax)
if self.display['isonb']:
if hasattr(self,'lsss'):
seg = [x for x in self.Gs.nodes() if x >0]
# psseg = np.array([[self.Gs.pos[x][0],self.Gs.pos[x][1]] for x in seg])
# nbsseg = np.array([len(self.Gs.node[x]['iso']) for x in seg],dtype='int')
try:
psseg = np.array([[self.Gs.pos[x][0],self.Gs.pos[x][1]] for x in seg
if len(self.Gs.node[x]['iso']) >1])
except:
import ipdb
ipdb.set_trace()
# [ax.text(psseg[x,0]+0.2,psseg[x,1]+0.2,str(nbsseg[x]),
# fontdict={'size':8},ha='center') for x in range(len(seg))]
[ax.text(psseg[x,0]+0.2,psseg[x,1]+0.2,'+',
fontdict={'size':8},ha='center') for x in range(len(psseg))]
if self.display['transition']:
try:
segwtrans = [y for y in [x for x in self.Gs.nodes() if x > 0]if self.Gs.node[
y]['transition']]
posseg = np.array([self.Gs.pos[x] for x in segwtrans])
normseg = np.array([self.Gs.node[x]['norm']
for x in segwtrans])[:, :2]
b1 = (posseg - normseg / 2)
b2 = (posseg + normseg / 2)
[ax.annotate('', xy=b1[x],
xycoords='data',
xytext=b2[x],
textcoords='data',
arrowprops={'arrowstyle': '<->'})
for x in range(len(segwtrans))]
except:
pass
slablist = self.name.keys()
if self.display['edges']:
dlabels = self.display['edlabel']
font_size = self.display['fontsize']
dnodes = self.display['ednodes']
dthin = self.display['thin']
alpha = self.display['alpha']
for nameslab in self.name:
color = self.sl[nameslab]['color']
edlist = self.name[nameslab]
fig, ax = self.show_layer(nameslab, edlist=edlist, alpha=alpha,
dthin=dthin, dnodes=dnodes, dlabels=dlabels,
color=color,
font_size=font_size,
width=kwargs['width'],
fGHz=kwargs['fGHz'],
fig=fig, ax=ax)
if self.display['subseg']:
dico = self.subseg()
for k in dico.keys():
if kwargs['fGHz'] == []:
color = self.sl[k]['color']
else:
if (k != 'METAL') & (k != 'METALIC'):
color = self.sl[k].tocolor(fGHz)
#color = 'red'
else:
color = 'black'
# printk,color
edlist2 = []
for ts in dico[k]:
edlist2.append(ts[0])
# edlist2.append(ts)
edlist3 = list(set(edlist2).intersection(set(edlist)))
# printk , color , edlist
fig, ax = self.show_segment(
edlist=edlist3, color=color, alpha=1.0, width=2, fig=fig, ax=ax)
if self.display['scaled']:
ax.axis('scaled')
ax.set_title(self.display['title'])
#fig = plt.gcf()
#ax = fig.axes[0]
#
# TODO Not working in python 3
#if self.display['ticksoff']:
# ax.xaxis.set_ticks([])
# for loc, spine in ax.spines.iteritems():
# spine.set_color('none')
if kwargs['furniture']:
if 'lfur' in self.__dict__:
for fur1 in self.lfur:
if fur1.Matname == 'METAL':
fig, ax = fur1.show(fig, ax)
else:
print("Warning : no furniture file loaded")
for nr in kwargs['roomlist']:
ncy = self.Gr.node[nr]['cycle']
fig, ax = self.Gt.node[ncy]['polyg'].plot(fig=fig, ax=ax)
if kwargs['axis'] == []:
ax.axis('scaled')
else:
ax.axis(kwargs['axis'])
if kwargs['show']:
plt.show()
return fig, ax
def build(self, graph='tvirw',verbose=False,difftol=0.15,multi=False):
""" build graphs
Parameters
----------
graph : string composed of
't' : Gt
'v' : Gv
'i' : Gi
'r' : Gr
'w" : Gw
verbose : boolean
difftol : diffraction tolerance
multi : boolean
enable multi processing
Notes
-----
This function builds all the graph associated with the Layout.
Warning : by default the layout is saved (dumpw) after each build
"""
# list of built graphs
if not self.hasboundary:
self.boundary()
# to save graoh Gs
self.lbltg.extend('s')
Buildpbar = pbar(verbose,total=5,desc='Build Layout',position=0)
if verbose:
Buildpbar.update(1)
if 't' in graph:
self.buildGt(difftol=difftol, verbose=verbose, tqdmpos=1)
self.lbltg.extend('t')
if verbose:
Buildpbar.update(1)
if 'v' in graph:
self.buildGv(verbose=verbose, tqdmpos=1)
self.lbltg.extend('v')
if verbose:
Buildpbar.update(1)
if 'i' in graph:
self.buildGi(verbose=verbose, tqdmpos=1)
if not multi:
self.outputGi(verbose=verbose,tqdmpos=1)
else:
self.outputGi_mp()
self.lbltg.extend('i')
if verbose:
Buildpbar.update(1)
# if 'r' in graph:
# if verbose:
# print"Gr"
# self.buildGr()
# self.lbltg.extend('r')
# if 'w' in graph and len(self.Gr.nodes())>1:
# self.buildGw()
# self.lbltg.extend('w')
# add hash to node 0 of Gs
filelay = pyu.getlong(self._filename, pro.pstruc['DIRLAY'])
fd = open(filelay,'rb')
_hash = hashlib.md5(fd.read()).hexdigest()
fd.close()
self.Gt.add_node(0, hash=_hash)
# There is a dumpw after each build
self.dumpw()
self.isbuilt = True
if verbose:
Buildpbar.update(1)
def dumpw(self):
""" write a dump of given Graph
Notes
-----
't' : Gt
'r' : Gr
's' : Gs
'v' : Gv
'i' : Gi
"""
# create layout directory
if os.path.splitext(self._filename)[1]=='.ini':
dirname = self._filename.replace('.ini','')
if os.path.splitext(self._filename)[1]=='.lay':
dirname = self._filename.replace('.lay','')
path = os.path.join(pro.basename, 'struc', 'gpickle', dirname)
if not os.path.isdir(path):
os.mkdir(path)
for g in self.lbltg:
try:
# if g in ['v','i']:
# gname1 ='G'+g
# write_gpickle(getattr(self,gname1),os.path.join(basename,'struc','gpickle','G'+g+'_'+self._filename+'.gpickle'))
# else:
gname = 'G' + g
write_gpickle(getattr(self, gname), os.path.join(
path, 'G' + g + '.gpickle'))
except:
raise NameError(
'G' + g + ' graph cannot be saved, probably because it has not been built')
# save dictionnary which maps string interaction to [interaction node,
# interaction type]
if 't' in self.lbltg:
if hasattr(self,'ddiff'):
write_gpickle(getattr(self, 'ddiff'),
os.path.join(path, 'ddiff.gpickle'))
if hasattr(self,'lnss'):
write_gpickle(getattr(self, 'lnss'),
os.path.join(path, 'lnss.gpickle'))
if hasattr(self,'dca'):
write_gpickle(getattr(self, 'dca'), os.path.join(path, 'dca.gpickle'))
# write_gpickle(getattr(self,'sla'),os.path.join(path,'sla.gpickle'))
if hasattr(self, 'm'):
write_gpickle(getattr(self, 'm'), os.path.join(path, 'm.gpickle'))
def dumpr(self, graphs='stvirw'):
""" read of given graphs
Notes
-----
graph : string
's' : Gv
't' : Gt
'r' : Gr
'v' : Gv
'i' : Gi
.gpickle files are store under the struc directory of the project
specified by the $BASENAME environment variable
"""
if os.path.splitext(self._filename)[1]=='.ini':
dirname = self._filename.replace('.ini','')
if os.path.splitext(self._filename)[1]=='.lay':
dirname = self._filename.replace('.lay','')
path = os.path.join(pro.basename, 'struc', 'gpickle', dirname)
for g in graphs:
try:
# if g in ['v','i']:
# gname1 ='G'+g
# setattr(self, gname1, read_gpickle(os.path.join(pro.basename,'struc','gpickle','G'+g+'_'+self._filename+'.gpickle')))
# else:
gname = 'G' + g
filename = os.path.join(path, 'G' + g + '.gpickle')
G = read_gpickle(filename)
setattr(self, gname, G)
self.lbltg.extend(g)
except:
print("Warning Unable to read graph G"+g)
pass
# retrieve md5 sum of the original ini file
if 's' in graphs:
#self._hash = self.Gs.node.pop(0)['hash']
# self._hash = self.Gs.node[0]['hash']
# update self.name
lseg = [x for x in self.Gs.node if x > 0]
for name in self.name:
self.name[name] = [
x for x in lseg if self.Gs.node[x]['name'] == name]
self.g2npy()
filediff = os.path.join(path, 'ddiff.gpickle')
if os.path.isfile(filediff):
ddiff = read_gpickle(filediff)
setattr(self, 'ddiff', ddiff)
else:
self.ddiff={}
filelnss = os.path.join(path, 'lnss.gpickle')
if os.path.isfile(filelnss):
lnss = read_gpickle(filelnss)
setattr(self, 'lnss', lnss)
else :
self.lnss=[]
filedca = os.path.join(path, 'dca.gpickle')
if os.path.isfile(filedca):
dca = read_gpickle(filedca)
setattr(self, 'dca',dca)
filem = os.path.join(path, 'm.gpickle')
if os.path.isfile(filem):
setattr(self, 'm', read_gpickle(filem))
def polysh2geu(self, poly):
""" transform sh.Polygon into geu.Polygon
"""
try:
Gsnodes = np.array(self.Gs.nodes())
# get node coordinates
nodept = [self.Gs.pos[i] for i in Gsnodes]
# transform into shapely points
shpt = [sh.Point(pt) for pt in nodept]
# IV 1 get nodes and vnodes
# Create a ring to avoid taking points inside the polygon.
# This helps to avoid polygon inside polygons
# take exterior of polygon. embose it with buffer and find difference with original polygon*.
# polye = poly.intersection((poly.exterior).buffer(1e-3))
uvn = np.where([poly.exterior.buffer(1e-3).contains(p)
for p in shpt])[0]
vnodes = Gsnodes[uvn]
# IV 1.b transform vnodes to an ordered cycle with Cycle class
# NOTE ! Using class cycle is MANDATORY
# because, some extra vnodes can be pickup during the contain
# process before
S = nx.subgraph(self.Gs, vnodes)
cycle = nx.cycle_basis(S)
if len(cycle) > 1:
lc = np.array([len(c) for c in cycle])
dif = abs(lc - len(vnodes))
ud = np.where(dif == min(dif))[0]
cycle = cycle[ud]
else:
cycle = cycle[0]
if cycle[0] > 0:
cycle = np.roll(cycle, -1)
pos = [self.Gs.pos[c] for c in cycle if c < 0]
# IV 1.c create a new polygon with correct vnodes and correct
# points
P = geu.Polygon(p=pos, vnodes=cycle)
except:
import ipdb
ipdb.set_trace()
return P
def getangles(self, poly, unit='rad', inside=True):
""" find angles of a polygon
Parameters
----------
poly : geu.Polygon or sh.Polygon
unit : str
'deg' : degree values
'rad' : radian values
inside : boolean
True : compute the inside angles of the cycle.
(a.k.a. the interior of the polygon)
False : compute the outside angles of the cycle.
(a.k.a. the exterior of the polygon)
Returns
-------
(u,a)
u : int (Np)
point number
a : float (Np)
associated angle to the point
Notes
-----
http://www.mathopenref.com/polygonexteriorangles.html
TODO : This function should be moved in geomutil.py (NOT USED)
"""
if isinstance(poly, sh.Polygon):
poly = polysh2geu(poly)
cycle = poly.vnodes
upt = cycle[cycle < 0]
# rupt=np.roll(upt,1) # for debug
# rupt2=np.roll(upt,-1) # for debug
#
# See OSM bug fix
#
pt = self.pt[:, self.iupnt[-upt]]
if geu.SignedArea(pt) < 0:
upt = upt[::-1]
pt = pt[:, ::-1]
ptroll = np.roll(pt, 1, axis=1)
v = pt - ptroll
v = np.hstack((v, v[:, 0][:, None]))
vn = v / np.sqrt(np.sum((v) * (v), axis=0))
v0 = vn[:, :-1]
v1 = vn[:, 1:]
cross = np.cross(v0.T, v1.T)
dot = np.sum(v0 * v1, axis=0)
ang = np.arctan2(cross, dot)
uneg = ang < 0
ang[uneg] = -ang[uneg] + np.pi
ang[~uneg] = np.pi - ang[~uneg]
if not inside:
ang = 2 * np.pi - ang
if unit == 'deg':
return upt, ang * 180 / np.pi
elif unit == 'rad':
return upt, ang
# atan2(cross(a,b)), dot(a,b))
def pltlines(self, lines, fig=[], ax=[], color='r', alpha=1):
""" plot a line with a specified color and transparency
Parameters
-----------
lines : shapely lines
fig : matplotlib figure
ax : figure axis
color : string
alpha : float
transparency
See Also
--------
pylayers.gis.layout.Layout.plot
"""
if fig == []:
fig = plt.gcf()
if ax == []:
ax = plt.gca()
c = np.array([l.xy for l in lines])
[ax.plot(x[0, :], x[1, :], color=color, alpha=alpha) for x in c]
plt.axis(self.ax)
plt.draw()
def pltpoly(self, poly, fig=[], ax=[], color='r', alpha=0.2):
""" plot a polygon with a specified color and transparency
TODO : To be deplaced in an ither class
"""
if fig == []:
fig = plt.gcf()
if ax == []:
ax = plt.gca()
try:
mpl = [PolygonPatch(x, alpha=alpha, color=color) for x in poly]
except:
mpl = [PolygonPatch(x, alpha=alpha, color=color) for x in [poly]]
[ax.add_patch(x) for x in mpl]
plt.axis(self.ax)
plt.draw()
def pltvnodes(self, vn, fig=[], ax=[]):
""" plot vnodes
Parameters
----------
vn : list of nodes
fig :
ax :
"""
if fig == []:
fig = plt.gcf()
if ax == []:
ax = plt.gca()
if len(vn) > 0:
X = np.array([self.Gs.pos[x] for x in vn])
ax.plot(X[:, 0], X[:, 1], 'or')
[ax.text(x[0], x[1], vn[xx]) for xx, x in enumerate(X)]
return fig, ax
def updateshseg(self):
""" update shapely segment
build a shapely object for all segments
This function is called at the beginning of buildGt.
See Also
--------
buildGt
"""
seg_connect = {x: self.Gs.node[x]['connect']
for x in self.Gs.nodes() if x > 0}
dpts = {x[0]: (self.Gs.pos[x[1][0]], self.Gs.pos[x[1][1]])
for x in seg_connect.items()}
self._shseg = {p[0]: sh.LineString(p[1]) for p in dpts.items()}
def _triangle_old(self, poly_surround, poly_holes=[], mesh_holes=False):
"""
perfome a delaunay partitioning on shapely polygons
Parameters
----------
poly_surround : sh.Polygon
A single polygon to be partitionned
poly_holes : list of sh.Polygon
A list of polygon contained inside poly_surround. they are considered as holes
mesh_holes : bool
If True make the delaunay partition of poly_holes
else : only partitioning poly_surround and traits poly_holes as holes
Returns
-------
T : dict
dictionnary from triangle.triangulate library
T.keys()
['segment_markers',
'segments',
'holes',
'vertices',
'vertex_markers',
'triangles'
]
Notes
-----
uses triangle library
"""
if not isinstance(poly_surround, list):
poly_surround = [poly_surround]
lP = poly_surround + poly_holes
vertices = np.ndarray(shape=(2, 0))
segments = np.ndarray(shape=(2, 0), dtype='int')
holes = np.ndarray(shape=(2, 0))
segcpt = 0
for p in lP:
pts = np.array(p.exterior.xy)[:, :-1]
vertices = np.hstack((vertices, pts))
nbv = pts.shape[1]
segments = np.hstack((segments, np.array(
[np.arange(nbv), np.mod(range(1, nbv + 1), nbv)], dtype='int') + segcpt))
segcpt = segcpt + nbv
if not mesh_holes:
holes = np.hstack((holes, np.array(p.centroid.xy)))
if not mesh_holes:
C = {'vertices': vertices.T, 'segments': segments.T, 'holes': holes.T}
else:
C = {'vertices': vertices.T, 'segments': segments.T}
import ipdb
ipdb.set_trace()
T = triangle.triangulate(C, 'pa')
# import triangle.plot as plot
# ax=plt.gca()
# plot.plot(ax,**T)
return T
def _merge_polygons(self, lP):
""" merge triangle (polygon object) to cvx polygon
Parameters
----------
lP : list
list of polygon to be merged
Return
------
lMP : list
list of merged polygons
"""
lMP = []
# MERGE POLYGONS
# move from delaunay triangles to convex polygons
while lP != []:
p = lP.pop(0)
# restrict research to polygon that are touching themself
restp = [(ix, x) for ix, x in enumerate(lP)
if isinstance(p.intersection(x), sh.LineString)]
# self.pltpoly(p,ax=plt.gca())
conv = False
pold = p
# for ip2,p2 in restp:
for ip2, p2 in restp:
# inter = p.intersection(p2)
# if 2 triangles have a common segment
p = p + p2
if p.isconvex():
lP.pop(ip2)
lP.insert(0, p)
conv = True
break
else:
# if pold not in cpolys:
# cpolys.append(pold)
p = pold
# if (ip2 >= len(polys)):# and (conv):
# if conv :
# if p not in cpolys:
# cpolys.append(p)
if restp == [] and conv == True:
lMP.append(p)
if not conv: # else:
if pold not in lMP:
lMP.append(pold)
if len(lP) == 0:
if p not in lMP:
lMP.append(p)
return lMP
def _triangle(self, holes=[], vnodes=[] ,bplot = False):
""" Delaunay partitioning on shapely polygons
Parameters
----------
holes : ndarray
if holes ==[] : it means the merge is applied on the interior of the layout (indoor)
if holes == np.ndarray (centroid of polygon). indoor is discarded and delaunay
is applied on outdoor
Returns
-------
T : dict
dictionnary from triangle.triangulate library with the following keys
['segment_markers', 'segments', 'holes', 'vertices', 'vertex_markers', 'triangles']
map_vertices : points index
Notes
-----
This methods uses the `triangle` library
"""
# this means Delaunay is applied on exterior
# and inside polygon will be discarded
segbounds = []
ptbounds = []
if holes == []:
# remove air segments around layout
pass
# [segbounds.extend(nx.neighbors(L.Gs,x)) for x in L.lboundary]
# ptbounds = L.lboundary
if vnodes == []:
vnodes = self.Gs.nodes()
# find termination points of segments of layout
if nx.__version__!='1.10':
seg = np.array([self.Gs[x] for x in vnodes
if x > 0
and x not in segbounds])
else:
seg = np.array([nx.neighbors(self.Gs, x) for x in vnodes
if x > 0
and x not in segbounds])
# get vertices/points of layout
ivertices = np.array([(x, self.Gs.pos[x][0], self.Gs.pos[x][1]) for x in vnodes
if x < 0
and x not in ptbounds])
# map_vertices : points negative index (Np,)
map_vertices = ivertices[:, 0].astype('int')
# vertices : coordinates (Np x 2)
vertices = ivertices[:, 1:]
sorter = np.argsort(map_vertices)
# mapping between Gs graph segments and triangle segments
segments = sorter[np.searchsorted(map_vertices, seg, sorter=sorter)]
if holes == []:
C = {'vertices': vertices, 'segments': segments}
else:
C = {'vertices': vertices, 'segments': segments, 'holes': holes}
T = triangle.triangulate(C, 'pa')
if bplot:
import triangle.plot as plot
ax=plt.gca()
plot.plot(ax,**T)
ax = plt.gca()
ax.get_xaxis().set_visible(True)
ax.get_yaxis().set_visible(True)
plt.show()
return T, map_vertices
def buildGt(self, check=True,difftol=0.01,verbose=False,tqdmpos=0):
""" build graph of convex cycles
Parameters
----------
check : boolean
difftol : float
verbose : boolean
tqdmpos : progressbar
todo :
- add an option to only take outside polygon
=> pass to self._triangle a hole coreesponding to centroid of
polygon except those of boundary ( see buildGtold )
"""
# 1. Do a Delaunay triangulation
# build a list of triangle polygons : lTP
# vnodes refers to the nodes of Gs
# if vnodes == 0 it means this is a created
# segment which is tagged as _AIR
###
# if verbose :
# Gtpbar = tqdm.tqdm(total=100., desc='BuildGt',position=0)
# pbar_awloop = tqdm.tqdm(total=100., desc ='airwalls loop',leave=False,position=1)
Gtpbar = pbar(verbose,total=100., desc ='BuildGt',position=tqdmpos)
pbartmp = pbar(verbose,total=100., desc ='Triangulation',leave=True,position=tqdmpos+1)
T, map_vertices = self._triangle()
if verbose:
pbartmp.update(100.)
Gtpbar.update(100./12.)
ptri = T['vertices'][T['triangles']]
# List of Triangle Polygons
pbartmp = pbar(verbose,total=100.,
desc ='Transfer polygons list',
leave=True,
position=tqdmpos+1)
lTP = [geu.Polygon(x) for x in ptri]
if verbose:
pbartmp.update(100.)
Gtpbar.update(100./12.)
# update vnodes of Polygons
pbartmp = pbar(verbose,total=100.,
desc ='Update Polygons vnodes',
leave=True,
position=tqdmpos+1)
#
# p is a polygon
# get_points(p) : get points from polygon
# this is for limiting the search region for large Layout
#
[ polygon.setvnodes_new(self.get_points(polygon), self) for polygon in lTP ]
if verbose:
pbartmp.update(100.)
Gtpbar.update(100./12.)
# 2.add air walls to triangle poly
###
# luaw : list of tuples
# ( polygon , array of _AIR segments)
pbartmp = pbar(verbose,total=100.,
desc ='Buiild list of airwalls',
leave=True,
position=tqdmpos+1)
luaw = [(p, np.where(p.vnodes == 0)[0]) for p in lTP]
if verbose:
pbartmp.update(100.)
Gtpbar.update(100./12.)
#
# For a triangle polygon the number of vnodes
# creates new _AIR segments
#
cpt = 1./(len(luaw)+1)
_airseg = []
pbartmp = pbar(verbose,total=100., desc ='Add airwalls',leave=True,position=tqdmpos+1)
for p, uaw in luaw:
# for each vnodes == 0, add an _AIR
if verbose :
pbartmp.update(100.*cpt)
for aw in uaw:
modpt = len(p.vnodes)
_airseg.append(self.add_segment(p.vnodes[np.mod(aw - 1, modpt)],
p.vnodes[
np.mod(aw + 1, modpt)], name='_AIR',
z=(0, 40000000),
verbose=False))
# update polygon segments with new added airwalls
p.setvnodes_new(self.get_points(p),self)
if verbose:
Gtpbar.update(100./12.)
pbartmp = pbar(verbose,total=100., desc ='Update Graph',leave=True,position=tqdmpos+1)
tri = T['triangles']
nbtri = len(T['triangles'])
# temporary name/node_index of triangles
MT = -np.arange(1, nbtri + 1)
# 3. Create a temporary graph
# where : positive nodes (>0) are triangles segments
# negative nodes (<0) are triangles centroids
# edges link triangle centroids to their respective segments
# Ex represent list of points in Gs corresponging to segments
#[pt_head pt_tail]
E0 = map_vertices[tri[:, 1:]]
E1 = map_vertices[tri[:, :2]]
E2 = map_vertices[tri[:, 0::2]]
# from [pt_tail pt_head] get segment id in Gs
n0 = [self.numseg(e[0], e[1]) for e in E0]
n1 = [self.numseg(e[0], e[1]) for e in E1]
n2 = [self.numseg(e[0], e[1]) for e in E2]
# creation of a temporary graph
G = nx.Graph()
G.add_edges_from(zip(n0, MT))
G.add_edges_from(zip(n1, MT))
G.add_edges_from(zip(n2, MT))
# 4. search in the temporary graph
###
# nodes of degree 2 :
# - they correspond to Gs segments that link to triangle centroid
# - their neighbors are the triangles centroids
# find nodes of degree 2 (corresponding to segments linked to a
# triangle centroid)
rn = []
rn.extend([un for un in n0 if nx.degree(G, un) == 2])
rn.extend([un for un in n1 if nx.degree(G, un) == 2])
rn.extend([un for un in n2 if nx.degree(G, un) == 2])
rn = np.unique(rn)
# determine the neighbors of those segments (the 2 connected triangles
# centroids)
# v1.1 neigh = [nx.neighbors(G, un) for un in rn]
#neigh = [ dict(G[un]).keys() for un in rn ]
neigh = [[n for n in nx.neighbors(G,un)] for un in rn ]
# store into networkx compliant format
uE = [(neigh[un][0], neigh[un][1], {'segment': [
rn[un]] + self.Gs.node[rn[un]]['iso']}) for un in range(len(rn))]
iuE = {rn[un]: [-neigh[un][0], -neigh[un][1]]
for un in range(len(rn))}
# delete temporary graph
del G
# create graph Gt
self.Gt = nx.Graph(name='Gt')
self.Gt.add_edges_from(uE)
self.Gt = nx.relabel_nodes(self.Gt, lambda x: -x)
# add polyg to nodes
# add indoor to nodes
# add isopen to nodes
nno = [(n, {'polyg': lTP[n - 1], 'indoor':True, 'isopen':True})
for n in self.Gt.nodes()]
self.Gt.add_nodes_from(nno)
self.Gt.pos = {}
self.Gt.pos.update({n: np.array(
self.Gt.node[n]['polyg'].centroid.xy).squeeze() for n in self.Gt.nodes()})
# self.Gtpos = {-MT[i]:pMT[i] for i in xrange(len(MT))}
# plt.figure()
# # G=nx.Graph()
# # G.add_edges_from(E0)
# # G.add_edges_from(E1)
# # G.add_edges_from(E2)
_airseg = np.array(_airseg)
_airseg = _airseg[_airseg != np.array(None)].astype('int')
_airseg = np.unique(_airseg)
#
# Mikado like progression for simplification of a set of convex polygons
#
# Loop over AIR segments
#
mapoldcy = {c: c for c in self.Gt.nodes()}
# self.showG('st',aw=1)
if verbose:
pbartmp.update(100.)
Gtpbar.update(100./12.)
Nairseg = len(_airseg)
cpt = 1./(Nairseg+1)
pbartmp = pbar(verbose,total=100., desc ='Mikado',leave=True,position=tqdmpos+1)
for a in _airseg:
if verbose:
pbartmp.update(100.*cpt)
#
# n0,n1 : cycle number
#
n0, n1 = iuE[a]
found = False
while not found:
nn0 = mapoldcy[n0]
if n0 == nn0:
found = True
else:
n0 = nn0
found = False
while not found:
nn1 = mapoldcy[n1]
if n1 == nn1:
found = True
else:
n1 = nn1
p0 = self.Gt.node[n0]['polyg']
p1 = self.Gt.node[n1]['polyg']
# Merge polygon
P = p0 + p1
# If the new Polygon is convex update Gt
#
if geu.isconvex(P):
# updates vnodes of the new merged polygon
P.setvnodes_new(self.get_points(P),self)
# update edge
n0s = n0
n1s = n1
# get segments information from cycle n0
dne = dict(self.Gt[n0])
# remove connection to n0 to avoid a cycle being
# connected to itself
# v1.1 self.Gt[n1].pop(n0)
dict(self.Gt[n1]).pop(n0)
# add information from adjacent cycle n1
dne.update(dict(self.Gt[n1]))
# list of items of the merged dictionnary
ine = dne.items()
# update n0 with the new merged polygon
self.Gt.add_node(n0, polyg=P)
# connect new cycle n0 to neighbors
# for x in ine:
# if x[0]!=n0:
# ncy = x[0]
# dseg = x[1]
# # a link between cycles already exists
# if self.Gt.has_edge(n0,ncy):
# dseg_prev = self.Gt.edge[n0][ncy]
# dseg['segment']=list(set(dseg['segment']+dseg_prev['segment']))
# printn0,ncy,dseg['segment']
# self.Gt.add_edge(n0,ncy,segment=dseg['segment'])
self.Gt.add_edges_from([(n0, x[0], x[1])
for x in ine if x[0] != n0])
# remove old cycle n1 n
self.Gt.remove_node(n1)
# update pos of the cycle with merged polygon centroid
self.Gt.pos[n0] = np.array((P.centroid.xy)).squeeze()
self.Gt.pos.pop(n1)
# delete _air segment a
# do not apply g2npy
self.del_segment(a, verbose=False, g2npy=False)
mapoldcy[n1] = n0
# fig,a=self.showG('st',aw=1)
# plt.show()
######
# fix renumbering Gt nodes
if verbose:
Gtpbar.update(100./12.)
pbartmp = pbar(verbose,total=100., desc ='Update Gs ncy',leave=True,position=tqdmpos+1)
pos = self.Gt.pos
nl = {c: uc + 1 for uc, c in enumerate(self.Gt.nodes())}
self.Gt = nx.relabel_nodes(self.Gt, nl)
self.Gt.pos = {}
self.Gt.pos = {nl[n]: pos[n] for n in nl}
self._updGsncy()
if verbose:
pbartmp.update(100.)
Gtpbar.update(100./12.)
#
# add cycle 0 to boundaries segments
# cycle 0 is necessarily outdoor
#
self.Gt.add_node(0, indoor=False)
for s in self.segboundary:
self.Gs.node[s]['ncycles'].append(0)
#
# boundary adjascent cycles
#
#adjcyair = np.array(map(lambda x: filter(lambda y: y != 0,
# self.Gs.node[x]['ncycles'])[0], self.segboundary))
adjcyair = np.array([[n for n in self.Gs.node[s]['ncycles'] if n!=0]
for s in self.segboundary]).ravel()
# connect cycles separated by air wall to cycle 0
for cy, seg in zip(adjcyair, self.segboundary):
self.Gt.node[cy]['indoor'] = False
self.Gt.node[cy]['isopen'] = True
self.Gt.add_edge(0, cy, segment=[seg])
#
#
#
if check:
# print("check len(ncycles) == 2",)
nodes = [i for i in self.Gs.nodes() if i > 0]
cncy = np.array([len(self.Gs.node[i]['ncycles']) for i in nodes])
ucncyl = np.where(cncy < 2)[0]
ucncym = np.where(cncy > 2)[0]
assert len(ucncyl) == 0, "Some segments are connected to LESS than 2 cycles" + \
str(np.array(nodes)[ucncyl])
assert len(ucncym) == 0, "Some segments are connected to MORE than 2 cycles" + \
str(np.array(nodes)[ucncym])
# print("passed")
# self.degree is updated in g2npy
# self.degree has to be called before determination of diffraction points
# which relies of the full determination of the degree of each point of Gs
# including the corner point with degree 0 ( only connected to _AIR)
self.g2npy()
# find diffraction points : updating self.ddiff
tqdmkwargs={'total':100.,'desc':'Find Diffractions','position':1}
self._find_diffractions(difftol=difftol,verbose=verbose,tqdmkwargs=tqdmkwargs)
if verbose:
Gtpbar.update(100./12.)
# print('find diffraction...Done 8/12')
pbartmp = pbar(verbose,total=100., desc ='Diffraction on airwalls',leave=True,position=tqdmpos+1)
#
# explanation of lnss
#
# list of diffraction point involving different segment
# list of diffraction point involving subsegment ( = iso segments)
# needs checking height in rays.to3D for constructing the 3D ray
#
self.lnss = [x for x in self.ddiff if len(set(self.Gs[x]).intersection(set(self.lsss))) > 0]
#set(nx.neighbors(self.Gs, x)).intersection(set(self.lsss))) > 0]
if verbose:
pbartmp.update(100.)
Gtpbar.update(100./12.)
#
# VIII - Construct the list of interactions associated to each cycle
#
# Interaction labeling convention
#
# tuple (npoint,) : Diffraction on point npoint
# tuple (nseg,ncycle) : Reflection on nseg toward cycle ncycle
# tuple (nseg,cy0,cy1) : Transmission from cy0 to cy1 through nseg
#
# At that stage the diffraction points are not included
# not enough information available.
# The diffraction points are not known yet
tqdmkwargs={'total':100.,'desc':'List of interactions','position':1}
self._interlist(verbose=verbose,tqdmkwargs=tqdmkwargs)
if verbose:
Gtpbar.update(100./12.)
#
# dca : dictionnary of cycles which have an air wall
#
pbartmp = pbar(verbose,total=100., desc ='Build dca',leave=True,position=tqdmpos+1)
self.dca = {}
for seg, d in self.Gs.node.items():
if seg > 0:
if ((d['name'] == 'AIR') or d['name'] == '_AIR'):
cy = d['ncycles']
try:
self.dca[cy[0]].append(cy[1])
except:
self.dca[cy[0]] = [cy[1]]
try:
self.dca[cy[1]].append(cy[0])
except:
self.dca[cy[1]] = [cy[0]]
if verbose:
# print('build dca...Done 11/12')
pbartmp.update(100.)
Gtpbar.update(100./12.)
#
# indoor property is spread by contagion
#
pbartmp = pbar(verbose,total=100., desc ='Indoor properties',leave=False,position=tqdmpos+1)
visited = [0]
#v1.1 to_visit = nx.neighbors(self.Gt, 0)
to_visit = list(dict(self.Gt[0]).keys())
law = self.name['_AIR'] + self.name['AIR']
while len(to_visit) > 0:
# get current cycle
cur_cy = to_visit.pop()
# get neighbors of current_cycle
# v1.1 neighbors = nx.neighbors(self.Gt, cur_cy)
neighbors = self.Gt[cur_cy].keys()
# get neighbors separated by an air_wall
neighbors_aw = [x for x in neighbors
if (len(self.Gt[cur_cy][x]['segment'])==1 and
self.Gt[cur_cy][x]['segment'][0] in law
)
]
# get not visited neighbors_aw
nv_neighbors_aw = [
x for x in neighbors_aw if x not in (visited + to_visit)]
# not visited neighbors air wall separated cycles are outdoor cycle
for x in nv_neighbors_aw:
self.Gt.node[x]['indoor'] = False
self.Gt.node[x]['isopen'] = True
# extend to_visit to not visited neighbors
to_visit.extend(nv_neighbors_aw)
visited.append(cur_cy)
if verbose:
pbartmp.update(100.)
Gtpbar.update(100./12.)
self.g2npy()
def _visual_check(self,fontsize=18):
""" visual checking of graphs
Parameters
----------
fontsize : int
"""
fig, axs = plt.subplots(2, 2,figsize=(10,10))
plt.subplots_adjust(left = 0 ,
right = 1.0,
bottom = 0 ,
top = 1 ,
wspace = 0 ,
hspace =0)
if hasattr(self,'Gs') and hasattr(self,'Gt'):
ax = axs[0, 0]
self.showG('s', aw=1, ax=ax, fig=fig)
indoor = [self.Gt.node[p]['polyg']
for p in self.Gt.nodes() if p != 0 and self.Gt.node[p]['indoor']]
outdoor = [self.Gt.node[p]['polyg']
for p in self.Gt.nodes() if p != 0 and not self.Gt.node[p]['indoor']]
self.pltpoly(indoor, color='r', ax=ax, fig=fig)
self.pltpoly(outdoor, color='g', ax=ax, fig=fig)
ax = axs[0, 1]
f, ax = self.showG('s', aw=1, ax=ax, fig=fig)
if hasattr(self,'ddiff'):
diffpos = np.array([self.Gs.pos[x] for x in self.ddiff.keys()])
ax.scatter(diffpos[:, 0], diffpos[:, 1],s=130)
#ax.set_title('Diffraction points')
ax = axs[1, 0]
f, ax = self.showG('st', aw=1, ax=ax, fig=fig)
#ax.set_title('$\mathcal{G}_t$',fontsize=fontsize)
ax.set_axis_off
if hasattr(self,'Gv'):
ax = axs[1, 1]
f, ax = self.showG('sv', aw=1, ax=ax, fig=fig)
#ax.set_title('$\mathcal{G}_v$',fontsize=fontsize)
ax.set_axis_off
else:
print('no Gv found. Yet computed ?')
plt.savefig('visual_check.pdf')
#plt.tight_layout()
# axs[2,1].remove()
def _delaunay(self, poly, polyholes=[]):
""" make a Delaunay partitioning of a polygon
If polyhole == []
if a cycle is non convex
1- find its polygon
2- partition polygon into convex polygons (Delaunay)
3- try to merge partitioned polygons in order to obtain
the minimal number of convex polygons
If polyholes != []
polygon poly contains holes (polyholes)
This methods returns a partitioning of the polygon poly
into several convex polygons (voronoi).
Parameters
----------
poly : sh.Polygon
polyhole : list of sh.Polygon
Returns
-------
ncpol : list
list of new created geu.Polygons
Notes
-----
The algorithm updates the Gt nodes and edges created into self.buildGt
by adding new nodes and new _AIR segments.
Called In
---------
pylayers.gis.layout.buildGt
See Also
--------
pylayers.gis.layout.buildGt
pylayers.gis.layout.add_segment
pylayers.gis.layout.del_segment
pylayers.util.geomutil.Polygon
sp.spatial.Delaunay
"""
pucs = np.array(poly.exterior.xy).T
# keep all convex points (in + out) to build a Delaunay triangulation
if polyholes != []:
if not isinstance(polyholes, list):
polyholes = [polyholes]
for ph in polyholes:
# sum up polyholes to their gathered polygones
pucsh = np.array(ph.exterior.xy).T
pucs = np.vstack((pucs, pucsh))
if len(pucs) != 0:
####
# perform a Delaunay Partioning
####
trid = sp.spatial.Delaunay(pucs)
tri = trid.simplices
polys = []
naw = []
popo = []
for t in tri:
ts = geu.Polygon(pucs[t])
# check if the new polygon is contained into
# the original polygon (non guarantee by Delaunay)
try:
C0 = poly.contains(ts)
except:
from IPython.core.debugger import Tracer
Tracer()()
if polyholes == []:
C = [False]
I = 0
else:
C = [isinstance(ii.intersection(ts), sh.Polygon)
for ii in polyholes]
popo.append(ts)
# if poly contains triangle but not the polyholes
# if polyholes !=[]:
# self.pltpoly([ts],color='b')
# import ipdb
# ipdb.set_trace()
if C0 and (not np.any(C)):
# if polyholes!=[]:
# self.pltpoly([ts],color='r')
# plt.draw()
cp = ts
cp.setvnodes(self)
uaw = np.where(cp.vnodes == 0)[0]
lvn = len(cp.vnodes)
for i in uaw:
# keep track of created airwalls, because some
# of them will be destroyed in step 3.
naw.append(self.add_segment(
cp.vnodes[np.mod(i - 1, lvn)],
cp.vnodes[np.mod(i + 1, lvn)], name='_AIR'))
polys.append(cp)
#
# 3. merge Delaunay triangulation in order to obtain
# the larger convex polygons partitioning
#
diff = poly.difference(sh.MultiPolygon(polys))
if isinstance(diff, sh.Polygon):
diff = sh.MultiPolygon([diff])
if isinstance(diff, sh.MultiPolygon):
for d in diff:
extra = geu.Polygon(d)
extra.setvnodes(self)
polys.append(extra)
cpolys = []
nbpolys = len(polys)
while polys != []:
p = polys.pop(0)
for ip2, p2 in enumerate(polys):
conv = False
inter = p.intersection(p2)
# if 2 triangles have a common segment
pold = p
if isinstance(inter, sh.LineString):
p = p + p2
if p.isconvex():
polys.pop(ip2)
polys.insert(0, p)
conv = True
break
else:
# if pold not in cpolys:
# cpolys.append(pold)
p = pold
# if (ip2 >= len(polys)):# and (conv):
# if conv :
# if p not in cpolys:
# cpolys.append(p)
if not conv: # else:
if pold not in cpolys:
cpolys.append(pold)
if len(polys) == 0:
cpolys.append(p)
# 4. ensure the correct vnode numerotation of the polygons
# and remove unecessary airwalls
# ncpol : new created polygons
ncpol = []
vnodes = []
for p in cpolys:
interpoly = poly.intersection(p)
if isinstance(interpoly, sh.MultiPolygon):
raise AttributeError('multi polygon encountered')
else:
try:
ptmp = geu.Polygon(interpoly)
# ptmp = self.polysh2geu(interpoly)
except:
import ipdb
ipdb.set_trace()
ptmp.setvnodes(self)
ncpol.append(ptmp)
vnodes.extend(ptmp.vnodes)
# if no polyholes
if polyholes == []:
# 4bis
# Check if all the original area is covered
# sometimes, area surrounded by 2 new airwalls is not found
# the following code re-add it.
cpdiff = poly.difference(cascaded_union(cpolys))
if isinstance(cpdiff, sh.Polygon):
cpdiff = sh.MultiPolygon([cpdiff])
if isinstance(cpdiff, sh.MultiPolygon):
for cp in cpdiff:
ptmp = geu.Polygon(cp)
ptmp.setvnodes(self)
ncpol.append(ptmp)
vnodes.extend(ptmp.vnodes)
daw = filter(lambda x: x not in vnodes, naw)
for d in daw:
self.del_segment(d, verbose=False, g2npy=False)
self.g2npy()
return ncpol
def _updGsncy(self):
""" update Gs ncycles using Gt information
Update graph Gs segment with their 2 cycles information
initialize a void list 'ncycles' for each segment of Gs
See Also
--------
pylayers.gis.layout.buildGt
pylayers.gis.layout.convexify
"""
for k in self.Gs.node:
self.Gs.node[k]['ncycles'] = []
# filter out node 0
Gtnodes = filter(lambda x: x != 0, self.Gt.nodes())
# loop over all cycles
for ncy in Gtnodes:
# get vnodes : points and segments number
vnodes = self.Gt.node[ncy]['polyg'].vnodes
for n in vnodes:
if n == 0:
pdb.set_trace()
if ncy not in self.Gs.node[n]['ncycles']:
self.Gs.node[n]['ncycles'].append(ncy)
if n > 0:
if len(self.Gs.node[n]['ncycles']) > 2:
print(n, self.Gs.node[n]['ncycles'])
logger.warning(
'A segment cannot relate more than 2 cycles')
for nseg in self.Gs.node:
if nseg > 0:
ncycles = self.Gs.node[nseg]['ncycles']
if len(ncycles) > 1:
#if nseg not in self.Gt.edge[ncycles[0]][ncycles[1]]['segment']:
# self.Gt.edge[ncycles[0]][ncycles[1]][
# 'segment'].append(nseg)
if nseg not in self.Gt[ncycles[0]][ncycles[1]]['segment']:
self.Gt[ncycles[0]][ncycles[1]]['segment'].append(nseg)
def _addoutcy(self, check=False):
"""
Probably use in a future version of buildGt , managing the upcoming inifile
add outside cycle (absorbant region index 0 )
Parameters
----------
check : Boolean
# if ncycles is a list which has only one element then the adjascent
# cycle is the outside region (cycle 0)
"""
seg0 = []
for macvx in self.macvx:
seg = [i for i in macvx.vnodes if i > 0]
seg0 = seg0 + seg
[self.Gs.node[i]['ncycles'].append(0) for i in seg0]
if check:
print("check len(ncycles) == 2",)
nodes = [i for i in self.Gs.nodes() if i > 0]
cncy = np.array([len(self.Gs.node[i]['ncycles']) for i in nodes])
ucncyl = np.where(cncy < 2)[0]
ucncym = np.where(cncy > 2)[0]
assert len(ucncyl) == 0, "Some segments are connected to LESS than 2 cycles" + \
str(np.array(nodes)[ucncyl])
assert len(ucncym) == 0, "Some segments are connected to MORE than 2 cycles" + \
str(np.array(nodes)[ucncym])
print("passed")
def _interlist(self, nodelist=[],verbose = False,tqdmkwargs={}):
""" Construct the list of interactions associated to each cycle
Parameters
----------
nodelist: list
list of Gt nodes (cycles) for which interactions have to be found
Notes
-----
if selfr.indoor==True , get list of interaction of Gt cycle with indoor =True
else list of indoor interaction is skipped
Interaction labeling convention
tuple (npoint,) : Diffraction on point npoint
tuple (nseg,ncycle) : Reflection on nseg toward cycle ncycle
tuple (nseg,cy0,cy1) : Transmission from cy0 to cy1 through nseg
At that stage the diffraction points are not included
not enough information available. The diffraction point are not
known yet
See Also
--------
pylayers.gis.layout.buildGt
pylayers.gis.layout._convex_hull
"""
if tqdmkwargs=={}:
tqdmkwargs={'total':100.,
'desc':'list of interactions',
'position':0}
if nodelist == []:
nodelist = self.Gt.nodes()
elif not isinstance(nodelist, list):
nodelist = [nodelist]
# for all cycles k (node of Gt)
if verbose :
cpt = 1./(len(nodelist)+1.)
pbar = tqdm.tqdm(tqdmkwargs)
for k in nodelist:
if verbose:
pbar.update(100.*cpt)
if k != 0:
if self.typ=='indoor' or not self.Gt.node[k]['indoor']:
#vnodes = self.Gt.node[k]['vnodes']
vnodes = self.Gt.node[k]['polyg'].vnodes
ListInteractions = []
for inode in vnodes:
if inode > 0: # segments
cy = set(self.Gs.node[inode]['ncycles'])
name = self.Gs.node[inode]['name'] # segment name
#
# Reflexion occurs on segment different
# from AIR and ABSORBENT (segment number, cycle)
#
if ((name != '_AIR') & (name != 'AIR') & (name != 'ABSORBENT')):
ListInteractions.append((inode, k))
#
# Transmission requires 2 cycles separated by a
# segment which is different from METAL and ABSORBENT
#
# (segment number, cycle in , cycle out )
if len(cy) == 2:
if ('METAL' not in name) & ('ABSORBENT' not in name):
ncy = list(cy.difference({k}))[0]
ListInteractions.append((inode, k, ncy))
ListInteractions.append((inode, ncy, k))
else: # points
pass
# add list of interactions of a cycle
self.Gt.add_node(k, inter=ListInteractions)
else:
self.Gt.add_node(k, inter=[])
def _convex_hull(self, mask):
"""
Add air walls to the layout enveloppe in self.Gs
in order the hull of the Layout to be convex.
Parameters
----------
mask : Polygon
Returns
-------
polys : list of geu.Polygon
nsew polygon of the convex hull
self.macvx : convex mask of the layout
Notes
-----
This is a post processing of BuildGt
See Also
--------
pylayers.gis.layout._interlist
"""
# 1 - Find differences between the convex hull and the Layout contour
# The result of the difference are polygons
masku = cascaded_union(mask)
ch = masku.convex_hull
P = ch.difference(masku)
polys = []
if isinstance(P, sh.MultiPolygon):
for p in P:
if p.area > 1e-3:
polys.append(geu.Polygon(p))
polys[-1].setvnodes(self)
lncy = []
for p in polys:
# p.coorddeter()
uaw = np.where(p.vnodes == 0)
for aw in uaw:
# 2 - non existing segments are created as airwalls
awid = self.add_segment(
p.vnodes[aw - 1][0], p.vnodes[aw + 1][0], name='AIR')
p.vnodes[aw] = awid
# U = cascaded_union([mask]+polys)
# self.macvx = geu.Polygon(U)
# self.macvx.setvnodes(self)
return polys
def buildGv(self, show=False,verbose=False,tqdmpos=0):
""" build visibility graph
Parameters
----------
show : boolean
default False
verbose : boolean
tqdmpos : progressbar
Examples
--------
>>> from pylayers.gis.layout import *
>>> L = Layout('TA-Office.lay')
>>> L.buildGt()
>>> Ga = L.buildGr()
>>> L.buildGv()
Notes
-----
This method exploits cycles convexity.
"""
if not hasattr(self,'ddiff'):
self.ddiff={}
Gvpbar = pbar(verbose,total=100., desc ='build Gv',position=tqdmpos)
self.Gv = nx.Graph(name='Gv')
#
# loop over convex cycles (nodes of Gt)
#
self.dGv = {} # dict of Gv graph
cpt = 1./(len(self.Gt.node) + 1.)
for icycle in self.Gt.node:
if verbose:
Gvpbar.update(100.*cpt)
if icycle != 0:
#if self.indoor or not self.Gt.node[icycle]['indoor']:
#print(icycle)
# pass
#
# If indoor or outdoor all visibility are calculated
# If outdoor only visibility between iso = 'AIR' and '_AIR' are calculated
#
#if self.indoor or not self.Gt.node[icycle]['indoor']:
polyg = self.Gt.node[icycle]['polyg']
# plt.show(polyg.plot(fig=plt.gcf(),ax=plt.gca())
# take a single segment between 2 points
vnodes = polyg.vnodes
# list of index of points in vodes
unodes = np.where(vnodes<0)[0]
# list of position of an incomplete list of segments
# used rule : after a point there is always a segment
useg = np.mod(unodes+1,len(vnodes))
# list of points
#npt = filter(lambda x: x < 0, vnodes)
npt = [ x for x in vnodes if x <0 ]
nseg_full = [x for x in vnodes if x > 0]
# nseg : incomplete list of segments
#
# if mode outdoor and cycle is indoor only
# the part above the building (AIR and _AIR) is considered
if ((self.typ=='outdoor') and (self.Gt.node[icycle]['indoor'])):
nseg = [ x for x in nseg_full if ((self.Gs.node[x]['name']=='AIR') or (self.Gs.node[x]['name']=='_AIR') ) ]
else:
nseg = vnodes[useg]
# # nseg_full : full list of segments
# #nseg_full = filter(lambda x: x > 0, vnodes)
# # keep only airwalls without iso single (_AIR)
# nseg_single = filter(lambda x: len(self.Gs.node[x]['iso'])==0, nseg)
# lair1 = self.name['AIR']
# lair2 = self.name['_AIR']
# lair = lair1 + lair2
# # list of airwalls in nseg_single
# airwalls = filter(lambda x: x in lair, nseg_single)
# diffraction points
ndiff = [x for x in npt if x in self.ddiff.keys()]
#
# Create a graph
#
Gv = nx.Graph(name='Gv')
#
# in convex case :
#
# i) every non aligned segments see each other
#
for nk in combinations(nseg, 2):
nk0 = self.tgs[nk[0]]
nk1 = self.tgs[nk[1]]
tahe0 = self.tahe[:, nk0]
tahe1 = self.tahe[:, nk1]
pta0 = self.pt[:, tahe0[0]]
phe0 = self.pt[:, tahe0[1]]
pta1 = self.pt[:, tahe1[0]]
phe1 = self.pt[:, tahe1[1]]
aligned = geu.is_aligned4(pta0,phe0,pta1,phe1)
# A0 = np.vstack((pta0, phe0, pta1))
# A0 = np.hstack((A0, np.ones((3, 1))))
# A1 = np.vstack((pta0, phe0, phe1))
# A1 = np.hstack((A1, np.ones((3, 1))))
# d0 = np.linalg.det(A0)
# d1 = np.linalg.det(A1)
#if not ((abs(d0) < 1e-1) & (abs(d1) < 1e-1)):
if not aligned:
if ((0 not in self.Gs.node[nk[0]]['ncycles']) and
(0 not in self.Gs.node[nk[1]]['ncycles'])):
# get the iso segments of both nk[0] and nk[1]
if ((self.typ=='indoor') or (not self.Gt.node[icycle]['indoor'])):
l0 = [nk[0]]+self.Gs.node[nk[0]]['iso']
l1 = [nk[1]]+self.Gs.node[nk[1]]['iso']
else:
l0 = [nk[0]]
l1 = [nk[1]]
for vlink in product(l0,l1):
#printicycle,vlink[0],vlink[1]
Gv.add_edge(vlink[0], vlink[1])
#
# Handle diffraction points
#
# ii) all non adjascent valid diffraction points see each other
# iii) all valid diffraction points see segments non aligned
# with adjascent segments
#
#if diffraction:
#
# diffraction only if indoor or outdoor cycle if outdoor
#
if ((self.typ=='indoor') or (not self.Gt.node[icycle]['indoor'])):
ndiffvalid = [ x for x in ndiff if icycle in self.ddiff[x][0]]
# non adjascent segment of vnodes see valid diffraction
# points
for idiff in ndiffvalid:
#
# segments voisins du point de diffraction valide
#
# v1.1 nsneigh = [x for x in
# nx.neighbors(self.Gs, idiff)
# if x in nseg_full]
nsneigh = [x for x in self.Gs[idiff] if x in nseg_full]
# segvalid : not adjascent segment
seen_from_neighbors = []
#
# point to point
#
for npoint in ndiffvalid:
if npoint != idiff:
Gv.add_edge(idiff, npoint)
#
# All the neighbors segment in visibility which are not connected to cycle 0
# and which are not neighbors of the point idiff
#
for x in nsneigh:
# v1.1 neighbx = [ y for y in nx.neighbors(Gv, x)
# if 0 not in self.Gs.node[y]['ncycles']
# and y not in nsneigh]
neighbx = [ y for y in Gv[x]
if 0 not in self.Gs.node[y]['ncycles']
and y not in nsneigh]
seen_from_neighbors += neighbx
for ns in seen_from_neighbors:
Gv.add_edge(idiff, ns)
#
# Graph Gv composition
#
self.Gv = nx.compose(self.Gv, Gv)
self.dGv[icycle] = Gv
def buildGi(self,verbose=False,tqdmpos=0):
""" build graph of interactions
Notes
-----
For each node of graph Gv creates
5 different nodes associated to the same segment
(np,) D
(ns,cy0) R -> cy0
(ns,cy1) R -> cy1
(ns,cy0,cy1) T 0->1
(ns,cy1,cy0) T 1->0
Gi is an oriented Graph (DiGraph)
"""
Gipbar = pbar(verbose,total=100., desc ='Build Gi',position=tqdmpos)
if verbose:
Gipbar.update(0.)
self.Gi = nx.DiGraph(name='Gi')
self.Gi.pos = {}
#
# 1 ) Create nodes of Gi and their positions
#
# diffraction node (D,)
# reflexion node (R,cy0)
# transmission node (T,cy0,cy1)
#
cpt = 100./(len(self.Gv.node)+1)
pbartmp = pbar(verbose,total=100., desc ='Create Gi nodes',position=tqdmpos+1)
for n in self.Gv.node:
# espoo_journal debug
#if n == 530:
if verbose:
pbartmp.update(cpt)
if n < 0: # D
self.Gi.add_node((n,))
self.Gi.pos[(n,)] = self.Gs.pos[n]
if n > 0: # R | T
cy = self.Gs.node[n]['ncycles']
name = self.Gs.node[n]['name']
assert(len(cy) == 2)
cy0 = cy[0]
cy1 = cy[1]
#nei = self.Gs.neighbors(n) # get neighbor
nei = list(dict(self.Gs[n]).keys()) # get neighbor
np1 = nei[0]
np2 = nei[1]
p1 = np.array(self.Gs.pos[np1])
p2 = np.array(self.Gs.pos[np2])
l = p1 - p2
nl = np.dot(l, l)
ln = l / nl
delta = nl / 10.
# On AIR or ABSORBENT there is no reflection
if ((name != '_AIR') & (name != 'AIR') & (name != 'ABSORBENT')):
self.Gi.add_node((n, cy0))
self.Gi.pos[(n, cy0)] = tuple(self.Gs.pos[n] + ln * delta)
self.Gi.add_node((n, cy1))
self.Gi.pos[(n, cy1)] = tuple(self.Gs.pos[n] - ln * delta)
# Through METAL or ABSORBENT there is no transmission
# except if n has a subsegment
if (name != 'METAL') & (name != 'ABSORBENT'):
self.Gi.add_node((n, cy0, cy1))
self.Gi.add_node((n, cy1, cy0))
self.Gi.pos[(n, cy0, cy1)] = tuple(
self.Gs.pos[n] + ln * delta / 2.)
self.Gi.pos[(n, cy1, cy0)] = tuple(
self.Gs.pos[n] - ln * delta / 2.)
#
# 2) Establishing link between interactions
#
# Loop over all Gt nodes cy
#
# if cy > 0
# calculates vnodes of cycles
# for all node of vnodes
#
iprint = 0
if verbose :
Gipbar.update(33.)
cpt = 100./(len(self.Gt.node)+1)
pbartmp = pbar(verbose,total=100., desc ='Create Gi nodes',position=tqdmpos+1)
for cy in self.Gt.node:
if verbose:
pbartmp.update(cpt)
# for all >0 convex cycles
if cy > 0:
vnodes = self.Gt.node[cy]['polyg'].vnodes
npt = []
#
# find all diffraction points involved in the cycle cy
#
for x in vnodes:
if x < 0:
if x in self.ddiff:
for y in self.ddiff[x][0]:
if y == cy:
npt.append(x)
nseg = [ k for k in vnodes if k>0 ]
# all segments and diffraction points of the cycle
vnodes = nseg + npt
for nstr in vnodes:
if nstr in self.Gv.nodes():
# list 1 of interactions
li1 = []
if nstr > 0:
# output cycle
# cy -> cyo1
cyo1 = self.Gs.node[nstr]['ncycles']
cyo1 = [ x for x in cyo1 if x!= cy] [0]
#cyo1 = filter(lambda x: x != cy, cyo1)[0]
# R , Tin , Tout
if cyo1 > 0:
if (nstr, cy) in self.Gi.nodes():
li1.append((nstr, cy)) # R
if (nstr, cy, cyo1) in self.Gi.nodes():
li1.append((nstr, cy, cyo1)) # T cy -> cyo1
if (nstr, cyo1, cy) in self.Gi.nodes():
li1.append((nstr, cyo1, cy)) # T : cyo1 -> cy
# if (nstr,cy) in self.Gi.nodes():
# li1 = [(nstr,cy),(nstr,cy,cyo1),(nstr,cyo1,cy)]
# else:# no reflection on airwall
# li1 = [(nstr,cyo1,cy)]
else:
if (nstr, cy) in self.Gi.nodes():
li1 = [(nstr, cy)]
# else:
# li1 =[]
else:
# D
li1 = [(nstr,)]
# list of cycle entities in visibility of nstr
# v1.1 lneighb = nx.neighbors(self.Gv, nstr)
lneighb = list(dict(self.Gv[nstr]).keys())
#if (self.Gs.node[nstr]['name']=='AIR') or (
# self.Gs.node[nstr]['name']=='_AIR'):
# lneighcy = lneighb
#else:
# list of cycle entities in visibility of nstr in the same cycle
lneighcy = [ x for x in lneighb if x in vnodes ]
# lneighcy = filter(lambda x: x in vnodes, lneighb)
for nstrb in lneighcy:
if nstrb in self.Gv.nodes():
li2 = []
if nstrb > 0:
cyo2 = self.Gs.node[nstrb]['ncycles']
cyo2 = [ x for x in cyo2 if x!= cy] [0]
#cyo2 = filter(lambda x: x != cy, cyo2)[0]
if cyo2 > 0:
if (nstrb, cy) in self.Gi.nodes():
li2.append((nstrb, cy))
if (nstrb, cy, cyo2) in self.Gi.nodes():
li2.append((nstrb, cy, cyo2))
if (nstrb, cyo2, cy) in self.Gi.nodes():
li2.append((nstrb, cyo2, cy))
# if (nstrb,cy) in self.Gi.nodes():
# li2 = [(nstrb,cy),(nstrb,cy,cyo2),(nstrb,cyo2,cy)]
# else: #no reflection on airwall
# li2 = [(nstrb,cy,cyo2),(nstrb,cyo2,cy)]
else:
if (nstrb, cy) in self.Gi.nodes():
li2 = [(nstrb, cy)]
else:
li2 = [(nstrb,)]
# if cy==4:
# printnstr,nstrb
#if iprint:
# print("li1",li1)
# print("li2",li2)
#if cy == 91:
# print(" ",li2)
for i1 in li1:
for i2 in li2:
if (i1[0] != i2[0]):
if ((len(i1) == 2) & (len(i2) == 2)):
# print"RR"
self.Gi.add_edge(i1, i2)
self.Gi.add_edge(i2, i1)
if ((len(i1) == 2) & (len(i2) == 3)):
# print"RT"
if i1[1] == i2[1]:
self.Gi.add_edge(i1, i2)
if ((len(i1) == 3) & (len(i2) == 2)):
# print"TR"
if i1[2] == i2[1]:
self.Gi.add_edge(i1, i2)
if ((len(i1) == 3) & (len(i2) == 3)):
# print"TT"
if i1[2] == i2[1]:
self.Gi.add_edge(i1, i2)
if i2[2] == i1[1]:
self.Gi.add_edge(i2, i1)
if ((len(i1) == 1) & (len(i2) == 3)):
# print"DT"
if i2[1] == cy:
self.Gi.add_edge(i1, i2)
if ((len(i1) == 3) & (len(i2) == 1)):
# print"TD"
if i1[2] == cy:
self.Gi.add_edge(i1, i2)
if ((len(i1) == 1) & (len(i2) == 2)):
# print"DR"
self.Gi.add_edge(i1, i2)
if ((len(i1) == 2) & (len(i2) == 1)):
# print"RD"
self.Gi.add_edge(i1, i2)
if ((len(i1) == 1) & (len(i2) == 1)):
# print"DD"
self.Gi.add_edge(i1, i2)
if verbose :
Gipbar.update(66.)
# updating the list of interactions of a given cycle
pbartmp = pbar(verbose,total=100.,
desc ='update interraction list',
leave=False,
position=tqdmpos+1)
for c in self.Gt.node:
if verbose:
pbartmp.update(cpt)
if c != 0:
vnodes = self.Gt.node[c]['polyg'].vnodes
for k in npt:
self.Gt.node[c]['inter'] += [(k,)]
if verbose :
Gipbar.update(100.)
# cleaning deadend Gi
# if outdoor for all nodes of Gi
# if not diffraction
# if termination cycle is indoor
# or if starting point is indoor
# then delete interaction
ldelete = []
if self.typ=='outdoor':
for k in list(dict(self.Gi.node).keys()):
# R and T
if len(k)>1:
segtype = self.Gs.node[k[0]]['name']
if ((segtype!='AIR') and (segtype!='_AIR')):
cyend = k[-1]
if self.Gt.node[cyend]['indoor']:
# if k[0]>0:
# if self.Gs.node[k[0]]['name']!='AIR':
ldelete.append(k)
if len(k) == 3:
cystart = k[1]
if self.Gt.node[cystart]['indoor']:
# if k[0]>0:
# if self.Gs.node[k[0]]['name']!='AIR':
ldelete.append(k)
self.Gi.remove_nodes_from(ldelete)
# build adjacency matrix of Gi graph
self.Gi_A = nx.adjacency_matrix(self.Gi)
#store list of nodes of Gi ( for keeping order)
self.Gi_no = self.Gi.nodes()
def filterGi(self, situ='outdoor'):
""" filter Gi to manage indoor/outdoor situations
Not called
"""
# get outdoor notes
cy = np.array(self.Gt.nodes())
uout = np.where([not self.Gt.node[i]['indoor'] for i in cy])
cyout = cy[uout]
inter = self.Gi.nodes()
Ti = [i for i in inter if ((len(i) == 3) and i[0] > 0)]
Ri = [i for i in inter if ((len(i) == 2) and i[0] > 0)]
Di = [i for i in inter if i[0] < 0]
Ti = [i for i in Ti if ((i[1] in cyout) and (i[2] in cyout))]
Ri = [i for i in Ri if (i[1] in cyout)]
Di = [i for i in Di if (i in self.ldiffout)]
rinter = Ti + Ri + Di
rGi = nx.subgraph(self.Gi, rinter)
rGi.pos = {i: self.Gi.pos[i] for i in self.Gi.nodes()}
self.Gi = rGi
self.Gi.pos = rGi.pos
def outputGi(self,verbose=False,tqdmpos=0.):
""" filter output of Gi edges
Parameters
----------
L : Layout
Notes
-----
Let assume a sequence (nstr0,nstr1,{nstr2A,nstr2B,...}) in a signature.
This function checks whether this sequence is feasible or not
, whatever the type of nstr0 and nstr1.
The feasible outputs from nstr0 to nstr1 are stored in an output field of
edge (nstr0,nstr1)
See Also
--------
pylayers.util.cone.Cone.from2seg
pylayers.util.cone.Cone.belong_seg
"""
assert('Gi' in self.__dict__)
oGipbar=pbar(verbose,total=100.,leave=False,desc='OutputGi',position=tqdmpos)
# loop over all edges of Gi
Nedges = len(self.Gi.edges())
cpt = 100./Nedges
# print "Gi Nedges :",Nedges
for k, e in enumerate(self.Gi.edges()):
# if (k%100)==0:
# print"edge : ",k
# extract both termination interactions nodes
if verbose:
oGipbar.update(cpt)
i0 = e[0]
i1 = e[1]
nstr0 = i0[0]
nstr1 = i1[0]
# list of authorized outputs. Initialized void
output = []
# nstr1 : segment number of central interaction
if nstr1 > 0:
# central interaction is a segment
pseg1 = self.seg2pts(nstr1).reshape(2, 2).T
# list all potential successors of interaction i1
# v1.1 i2 = nx.neighbors(self.Gi, i1)
i2 = list(dict(self.Gi[i1]).keys())
# create a Cone object
cn = cone.Cone()
# if starting from segment
if nstr0 > 0:
pseg0 = self.seg2pts(nstr0).reshape(2, 2).T
# if nstr0 and nstr1 are connected segments
# v1.1 if (len(np.intersect1d(nx.neighbors(self.Gs, nstr0), nx.neighbors(self.Gs, nstr1))) == 0):
if (len(np.intersect1d(self.Gs[nstr0], self.Gs[nstr1])) == 0):
# from 2 not connected segment
cn.from2segs(pseg0, pseg1)
else:
# from 2 connected segments
cn.from2csegs(pseg0, pseg1)
# if starting from a point
else:
pt = np.array(self.Gs.pos[nstr0])
cn.fromptseg(pt, pseg1)
#
ipoints = [x for x in i2 if len(x)==1 ]
# i0 i1 i2[x]
# Avoid to have the same diffaction point after reflection
# exemple : (-10,),(245,12),(-10,) impossible
# nstr0 nstr1
if nstr0<0:
ipoints = [x for x in ipoints if x[0]!=nstr0]
#ipoints = filter(lambda x: len(x) == 1, i2)
pipoints = np.array([self.Gs.pos[ip[0]] for ip in ipoints]).T
# filter tuple (R | T)
#istup = filter(lambda x : type(eval(x))==tuple,i2)
# map first argument segment number
#isegments = np.unique(map(lambda x : eval(x)[0],istup))
#isegments = np.unique(
# filter(lambda y: y > 0, map(lambda x: x[0], i2)))
isegments = np.unique(np.array([ s for s in [ n[0] for n in i2]
if s >0 ] ))
# if nstr0 and nstr1 are adjescent segment remove nstr0 from
# potential next interaction
# Fix 01/2017
# This is not always True if the angle between
# the two adjascent segments is < pi/2
# v1.1 nb_nstr0 = self.Gs.neighbors(nstr0)
# v1.1 nb_nstr1 = self.Gs.neighbors(nstr1)
nb_nstr0 = self.Gs[nstr0]
nb_nstr1 = self.Gs[nstr1]
common_point = np.intersect1d(nb_nstr0,nb_nstr1)
if len(common_point) == 1:
num0 = [x for x in nb_nstr0 if x != common_point]
num1 = [x for x in nb_nstr1 if x != common_point]
p0 = np.array(self.Gs.pos[num0[0]])
p1 = np.array(self.Gs.pos[num1[0]])
pc = np.array(self.Gs.pos[common_point[0]])
v0 = p0 - pc
v1 = p1 - pc
v0n = v0/np.sqrt(np.sum(v0*v0))
v1n = v1/np.sqrt(np.sum(v1*v1))
if np.dot(v0n,v1n)<=0:
isegments = np.array([ x for x in isegments if x != nstr0 ])
# filter(lambda x: x != nstr0, isegments))
# there are one or more segments
if len(isegments) > 0:
points = self.seg2pts(isegments)
pta = points[0:2, :]
phe = points[2:, :]
# add difraction points
# WARNING Diffraction points are added only if a segment is seen
# it should be the case in 99% of cases
if len(ipoints) > 0:
isegments = np.hstack((isegments, np.array(ipoints)[:, 0]))
pta = np.hstack((pta, pipoints))
phe = np.hstack((phe, pipoints))
# cn.show()
# if i0 == (38,79) and i1 == (135,79,23):
# printi0,i1
# import ipdb
# ipdb.set_trace()
# i1 : interaction T
if len(i1) == 3:
#if ((e[0]==(53,17)) and (e[1]==(108,17,18))):
# typ, prob = cn.belong_seg(pta, phe,visu=True)
#else:
typ, prob = cn.belong_seg(pta, phe)
# if bs.any():
# plu.displot(pta[:,bs],phe[:,bs],color='g')
# if ~bs.any():
# plu.displot(pta[:,~bs],phe[:,~bs],color='k')
# i1 : interaction R --> mirror
if len(i1) == 2:
Mpta = geu.mirror(pta, pseg1[:, 0], pseg1[:, 1])
Mphe = geu.mirror(phe, pseg1[:, 0], pseg1[:, 1])
typ, prob = cn.belong_seg(Mpta, Mphe)
# printi0,i1
# if ((i0 == (6, 0)) & (i1 == (7, 0))):
# pdb.set_trace()
# if bs.any():
# plu.displot(pta[:,bs],phe[:,bs],color='g')
# if ~bs.any():
# plu.displot(pta[:,~bs],phe[:,~bs],color='m')
# plt.show()
# pdb.set_trace())
########
# SOMETIMES PROBA IS 0 WHEReAS SEG IS SEEN
###########
# # keep segment with prob above a threshold
# isegkeep = isegments[prob>0]
# # dict {numint : proba}
# dsegprob = {k:v for k,v in zip(isegkeep,prob[prob>0])}
# 4 lines are replaced by
# keep segment with prob above a threshold
utypseg = typ != 0
isegkeep = isegments[utypseg]
# dict {numint : proba}
dsegprob = {k: v for k, v in zip(isegkeep, prob[utypseg])}
#########
# output = filter(lambda x: x[0] in isegkeep, i2)
output = [x for x in i2 if x[0] in isegkeep]
# probint = map(lambda x: dsegprob[x[0]], output)
probint = [dsegprob[x[0]] for x in output]
# dict interaction : proba
dintprob = {k: v for k, v in zip(output, probint)}
# keep all segment above nstr1 and in Cone if T
# keep all segment below nstr1 and in Cone if R
else:
# central interaction is a point (nstr1 <0)
# 1) Simple approach
# output interaction are all visible interactions
# 2) TO BE DONE
#
# output of the diffraction points
# exploring
# b
# + right of ISB
# + right of RSB
#
# + using the wedge cone
# + using the incident cone
#
# v1.1 output = nx.neighbors(self.Gi, (nstr1,))
output = self.Gi[(nstr1,)]
nout = len(output)
probint = np.ones(nout) # temporarybns
dintprob = {k: v for k, v in zip(output, probint)}
self.Gi.add_edge(i0, i1, output=dintprob)
def outputGi_new(self,verbose=False,tqdmpos=0.):
""" filter output of Gi edges
this version of outputGi, uses sparses matrix instead of NetworkX for MP
purpose
Parameters
----------
L : Layout
Notes
-----
Let assume a sequence (nstr0,nstr1,{nstr2A,nstr2B,...}) in a signature.
This function checks whether this sequence is feasible or not
, whatever the type of nstr0 and nstr1.
The feasible outputs from nstr0 to nstr1 are stored in an output field of
edge (nstr0,nstr1)
See Also
--------
pylayers.util.cone.Cone.from2seg
pylayers.util.cone.Cone.belong_seg
"""
def Gspos(n):
if n>0:
return np.mean(self.s2pc[n].toarray().reshape(2,2),axis=0)
else:
return self.p2pc[-n].toarray()
#s2pc = self.s2pc.toarray()
#s2pu = self.s2pu.toarray()
#p2pc = self.p2pc.toarray()
#A = self.Gi_A.toarray()
assert('Gi' in self.__dict__)
oGipbar = pbar(verbose,total=100.,leave=False,desc='OutputGi',position=tqdmpos)
# loop over all edges of Gi
Nedges = len(self.Gi.edges())
cpt = 100./Nedges
# print "Gi Nedges :",Nedges
for k, e in enumerate(self.Gi.edges()):
# if (k%100)==0:
# print"edge : ",k
# extract both termination interactions nodes
if verbose:
oGipbar.update(cpt)
i0 = e[0] # first interaction
i1 = e[1] # central interaction
nstr0 = i0[0]
nstr1 = i1[0]
# list of authorized outputs. Initialized void
output = []
# nstr1 : segment number of central interaction
if nstr1 > 0:
# central interaction is a segment
# pseg1 = self.s2pc[nstr1,:].toarray().reshape(2, 2).T
pseg1 = self.s2pc[nstr1,:].toarray().reshape(2, 2).T
# pseg1 = self.s2pc[nstr1,:].data.reshape(2, 2).T
# pseg1o = self.seg2pts(nstr1).reshape(2, 2).T
# create a Cone object
cn = cone.Cone()
# if starting from segment
if nstr0 > 0:
# pseg0 = self.s2pc[nstr0,:].toarray().reshape(2, 2).T
pseg0 = self.s2pc[nstr0,:].toarray().reshape(2, 2).T
# pseg0 = self.s2pc[nstr0,:].data.reshape(2, 2).T
# pseg0o = self.seg2pts(nstr0).reshape(2, 2).T
# if nstr0 and nstr1 are connected segments
if self.sgsg[nstr0,nstr1] == 0:
# from 2 not connected segment
cn.from2segs(pseg0, pseg1)
else:
# from 2 connected segments
cn.from2csegs(pseg0, pseg1)
# if starting from a point
else:
pt = Gspos(nstr0)[0,:]
# pt = np.array(self.Gs.pos[nstr0])
cn.fromptseg(pt, pseg1)
# list all potential successors of interaction i1
ui2 = self.Gi_no.index(i1)
ui = np.where(self.Gi_A[ui2,:].toarray()!=0)[1]
i2 = [self.Gi_no[u] for u in ui]
# i2 = nx.neighbors(self.Gi, i1)
# how to find neighbors without network
# ngi=L.Gi.nodes()
# A=nx.adjacency_matrix(L.Gi)
# inter = ngi[10]
# u = ngi.index(inter)
# ui = A[u,:].indices
# neigh_inter = np.array([ngi[u] for u in ui])
ipoints = [x for x in i2 if len(x)==1 ]
#ipoints = filter(lambda x: len(x) == 1, i2)
# pipoints = np.array([self.Gs.pos[ip[0]] for ip in ipoints]).T
pipoints = np.array([Gspos(ip[0]) for ip in ipoints]).T
# filter tuple (R | T)
#istup = filter(lambda x : type(eval(x))==tuple,i2)
# map first argument segment number
#isegments = np.unique(map(lambda x : eval(x)[0],istup))
# isegments = np.unique(
# filter(lambda y: y > 0, map(lambda x: x[0], i2)))
isegments = np.unique([x[0] for x in i2 if x[0]>0])
# if nstr0 and nstr1 are adjescent segment remove nstr0 from
# potential next interaction
# Fix 01/2017
# This is not always True if the angle between
# the two adjascent segments is < pi/2
# nb_nstr0 = self.Gs.neighbors(nstr0)
# nb_nstr1 = self.Gs.neighbors(nstr1)
# nb_nstr0 = np.array([self.s2pu[nstr0,0],self.s2pu[nstr0,1]])
# nb_nstr1 = np.array([self.s2pu[nstr1,0],self.s2pu[nstr1,1]])
# nb_nstr0 = self.s2pu[nstr0,:].toarray()[0]
# nb_nstr1 = self.s2pu[nstr1,:].toarray()[0]
# first interaction is a point
if nstr0<0:
nb_nstr0 = [nstr0]
else:
nb_nstr0 = self.s2pu[nstr0,:].toarray()[0,:]
nb_nstr1 = self.s2pu[nstr1,:].toarray()[0,:]
# common_point = np.intersect1d(nb_nstr0,nb_nstr1)
common_point = np.array([x for x in nb_nstr0 if x in nb_nstr1])
#print(common_point)
# if len(common_point) == 1:
# pdb.set_trace()
if common_point.any():
num0 = [x for x in nb_nstr0 if x != common_point]
num1 = [x for x in nb_nstr1 if x != common_point]
p0 = Gspos(num0[0])[0,:]
p1 = Gspos(num1[0])[0,:]
pc = Gspos(common_point[0])[0,:]
v0 = p0 - pc
v1 = p1 - pc
v0n = v0/np.sqrt(np.sum(v0*v0))
v1n = v1/np.sqrt(np.sum(v1*v1))
if np.dot(v0n,v1n)<=0:
isegments = np.array([ x for x in isegments if x != nstr0 ])
# [ x for x in rle if x != nstr0, isegments))
# there are one or more segments
# if len(isegments) > 0:
if isegments.any():
li1 = len(i1)
# points = self.s2pc[isegments,:].toarray().T
points = self.s2pc[isegments,:].toarray().T
# points = self.s2pc[isegments,:].data.reshape(4,len(isegments))
# pointso = self.seg2pts(isegments)
pta = points[0:2, :]
phe = points[2:, :]
# add difraction points
# WARNING Diffraction points are added only if a segment is seen
# it should be the case in 99% of cases
if len(ipoints) > 0:
isegments = np.hstack(
(isegments, np.array(ipoints)[:, 0]))
pta = np.hstack((pta, pipoints))
phe = np.hstack((phe, pipoints))
# cn.show()
# if i0 == (38,79) and i1 == (135,79,23):
# printi0,i1
# import ipdb
# ipdb.set_trace()
# i1 : interaction T
if li1 == 3:
typ, prob = cn.belong_seg(pta, phe)
# if bs.any():
# plu.displot(pta[:,bs],phe[:,bs],color='g')
# if ~bs.any():
# plu.displot(pta[:,~bs],phe[:,~bs],color='k')
# i1 : interaction R --> mirror
elif li1 == 2:
Mpta = geu.mirror(pta, pseg1[:, 0], pseg1[:, 1])
Mphe = geu.mirror(phe, pseg1[:, 0], pseg1[:, 1])
typ, prob = cn.belong_seg(Mpta, Mphe)
# printi0,i1
# if ((i0 == (6, 0)) & (i1 == (7, 0))):
# pdb.set_trace()
# if bs.any():
# plu.displot(pta[:,bs],phe[:,bs],color='g')
# if ~bs.any():
# plu.displot(pta[:,~bs],phe[:,~bs],color='m')
# plt.show()
# pdb.set_trace())
########
# SOMETIMES PROBA IS 0 WHEREAS SEG IS SEEN
###########
# # keep segment with prob above a threshold
# isegkeep = isegments[prob>0]
# # dict {numint : proba}
# dsegprob = {k:v for k,v in zip(isegkeep,prob[prob>0])}
# 4 lines are replaced by
# keep segment with prob above a threshold
utypseg = typ != 0
isegkeep = isegments[utypseg]
# dict {numint : proba}
dsegprob = {k: v for k, v in zip(isegkeep, prob[utypseg])}
#########
# output = [ x for x in rle if x[0] in isegkeep, i2)
output = [x for x in i2 if x[0] in isegkeep]
# probint = map(lambda x: dsegprob[x[0]], output)
probint = [dsegprob[x[0]] for x in output]
# dict interaction : proba
dintprob = {k: v for k, v in zip(output, probint)}
# keep all segment above nstr1 and in Cone if T
# keep all segment below nstr1 and in Cone if R
else:
# central interaction is a point
# 1) Simple approach
# output interaction are all visible interactions
# 2) TO BE DONE
#
# output of the diffraction points
# exploring
# b
# + right of ISB
# + right of RSB
#
# + using the wedge cone
# + using the incident cone
#
# output = nx.neighbors(self.Gi, (nstr1,))
uout = self.Gi_no.index((nstr1,))
ui = np.where(self.Gi_A[uout,:].toarray()!=0)[1]
output = [self.Gi_no[u] for u in ui]
nout = len(output)
probint = np.ones(nout) # temporarybns
dintprob = {k: v for k, v in zip(output,probint)}
try:
self.Gi.add_edge(i0, i1, output=dintprob)
except:
pass
def outputGi_mp(self):
""" filter output of Gi edges
Parameters
----------
L : Layout
Notes
-----
Let assume a sequence (nstr0,nstr1,{nstr2A,nstr2B,...}) in a signature.
This function checks whether this sequence is feasible or not
, whatever the type of nstr0 and nstr1.
The feasible outputs from nstr0 to nstr1 are stored in an output field of
edge (nstr0,nstr1)
See Also
--------
pylayers.util.cone.Cone.from2seg
pylayers.util.cone.Cone.belong_seg
"""
# assert('Gi' in self.__dict__)
# oGipbar=pbar(verbose,total=100.,leave=False,desc='OutputGi',position=tqdmpos)
# # loop over all edges of Gi
# Nedges = len(self.Gi.edges())
# cpt = 100./Nedges
# print "Gi Nedges :",Nedges
e = self.Gi.edges()
#Gi_no = [self.Gi_no]*len(e)
# densify sparse matrix
#aGi_A = self.Gi_A.toarray()
#ap2pc = self.p2pc.toarray()
#asgsg = self.sgsg.toarray()
#as2pc = self.s2pc.toarray()
#as2pu = self.s2pu.toarray()
global Gi_A
global Gi_no
global p2pc
global sgsg
global s2pc
global s2pu
Gi_A = self.Gi_A
Gi_no = self.Gi_no
p2pc = self.p2pc
sgsg = self.sgsg
s2pc = self.s2pc
s2pu = self.s2pu
#Gi_A = [aGi_A]*len(e)
#p2pc = [ap2pc]*len(e)
#s2pc = [as2pc]*len(e)
#s2pu = [as2pu]*len(e)
#sgsg = [asgsg]*len(e)
pool = Pool(cpu_count())
# multiprocessing style
#Z=zip(e, Gi_no, Gi_A, p2pc, sgsg, s2pc, s2pu)
#res = pool.map(outputGi_func,Z)
Z = zip(e)
res = pool.map(outputGi_func,Z)
self.Gi.add_edges_from(res)
# res = pool.map(outputGi_func_test,e)
# print('e')
# time.sleep(1)
# res = pool.map(outputGi_func_test,Gi_no)
# print('no')
# time.sleep(1)
# res = pool.map(outputGi_func_test,Gi_A)
# print('A')
# time.sleep(1)
# res = pool.map(outputGi_func_test,Gspos)
# print('pos')
# time.sleep(1)
# res = pool.map(outputGi_func_test,sgsg)
# print('sgsg')
# time.sleep(1)
# res = pool.map(outputGi_func_test,s2pc)
# print('s2pc')
# time.sleep(1)
# res = pool.map(outputGi_func_test,s2pu)
# print('s2pu')
# time.sleep(1)
# res = pool.map(outputGi_func_test,Z)
# print('Z')
#def outputGi_func(arg):
# if (k%100)==0:
# print"edge : ",k
# extract both termination interactions nodes
#for k in arg:
# Z=arg*arg
# e=arg[0]
# s2pc=arg[1]
# Gs=arg[2]
# Gi=arg[3]
# i0 = e[0]
# i1 = e[1]
# nstr0 = i0[0]
# nstr1 = i1[0]
# print(i0,i1)
# for k in range(1000):
# y=k*k
# # list of authorized outputs. Initialized void
# output = []
# # nstr1 : segment number of central interaction
# if nstr1 > 0:
# # central interaction is a segment
# pseg1 = np.array(s2pc[nstr1,:].todense()).reshape(2, 2).T
# # create a Cone object
# cn = cone.Cone()
# # if starting from segment
# if nstr0 > 0:
# pseg0 = np.array(s2pc[nstr0,:].todense()).reshape(2, 2).T
# # if nstr0 and nstr1 are connected segments
# if (len(np.intersect1d(nx.neighbors(Gs, nstr0), nx.neighbors(Gs, nstr1))) == 0):
# # from 2 not connected segment
# cn.from2segs(pseg0, pseg1)
# else:
# # from 2 connected segments
# cn.from2csegs(pseg0, pseg1)
# # if starting from a point
# else:
# pt = np.array(Gs.pos[nstr0])
# cn.fromptseg(pt, pseg1)
# # list all potential successors of interaction i1
# i2 = nx.neighbors(Gi, i1)
# ipoints = [x for x in i2 if len(x)==1 ]
# #ipoints = [ x for x in rle if len(x) == 1, i2)
# pipoints = np.array([Gs.pos[ip[0]] for ip in ipoints]).T
# # filter tuple (R | T)
# #istup = filter(lambda x : type(eval(x))==tuple,i2)
# # map first argument segment number
# #isegments = np.unique(map(lambda x : eval(x)[0],istup))
# isegments = np.unique(
# filter(lambda y: y > 0, map(lambda x: x[0], i2)))
# # if nstr0 and nstr1 are adjescent segment remove nstr0 from
# # potential next interaction
# # Fix 01/2017
# # This is not always True if the angle between
# # the two adjascent segments is < pi/2
# nb_nstr0 = Gs.neighbors(nstr0)
# nb_nstr1 = Gs.neighbors(nstr1)
# common_point = np.intersect1d(nb_nstr0,nb_nstr1)
# if len(common_point) == 1:
# num0 = [x for x in nb_nstr0 if x != common_point]
# num1 = [x for x in nb_nstr1 if x != common_point]
# p0 = np.array(Gs.pos[num0[0]])
# p1 = np.array(Gs.pos[num1[0]])
# pc = np.array(Gs.pos[common_point[0]])
# v0 = p0-pc
# v1 = p1-pc
# v0n = v0/np.sqrt(np.sum(v0*v0))
# v1n = v1/np.sqrt(np.sum(v1*v1))
# if np.dot(v0n,v1n)<=0:
# isegments = np.array([ x for x in isegments if x != nstr0 ])
# # [ x for x in rle if x != nstr0, isegments))
# # there are one or more segments
# if len(isegments) > 0:
# points = np.array(s2pc[isegments,:].todense()).T
# pta = points[0:2, :]
# phe = points[2:, :]
# # add difraction points
# # WARNING Diffraction points are added only if a segment is seen
# # it should be the case in 99% of cases
# if len(ipoints) > 0:
# isegments = np.hstack(
# (isegments, np.array(ipoints)[:, 0]))
# pta = np.hstack((pta, pipoints))
# phe = np.hstack((phe, pipoints))
# # cn.show()
# # if i0 == (38,79) and i1 == (135,79,23):
# # printi0,i1
# # import ipdb
# # ipdb.set_trace()
# # i1 : interaction T
# if len(i1) == 3:
# typ, prob = cn.belong_seg(pta, phe)
# # if bs.any():
# # plu.displot(pta[:,bs],phe[:,bs],color='g')
# # if ~bs.any():
# # plu.displot(pta[:,~bs],phe[:,~bs],color='k')
# # i1 : interaction R --> mirror
# if len(i1) == 2:
# Mpta = geu.mirror(pta, pseg1[:, 0], pseg1[:, 1])
# Mphe = geu.mirror(phe, pseg1[:, 0], pseg1[:, 1])
# typ, prob = cn.belong_seg(Mpta, Mphe)
# # printi0,i1
# # if ((i0 == (6, 0)) & (i1 == (7, 0))):
# # pdb.set_trace()
# # if bs.any():
# # plu.displot(pta[:,bs],phe[:,bs],color='g')
# # if ~bs.any():
# # plu.displot(pta[:,~bs],phe[:,~bs],color='m')
# # plt.show()
# # pdb.set_trace())
# ########
# # SOMETIMES PROBA IS 0 WHEREAS SEG IS SEEN
# ###########
# # # keep segment with prob above a threshold
# # isegkeep = isegments[prob>0]
# # # dict {numint : proba}
# # dsegprob = {k:v for k,v in zip(isegkeep,prob[prob>0])}
# # 4 lines are replaced by
# # keep segment with prob above a threshold
# utypseg = typ != 0
# isegkeep = isegments[utypseg]
# # dict {numint : proba}
# dsegprob = {k: v for k, v in zip(isegkeep, prob[utypseg])}
# #########
# # output = [ x for x in rle if x[0] in isegkeep, i2)
# output = [x for x in i2 if x[0] in isegkeep]
# # probint = map(lambda x: dsegprob[x[0]], output)
# probint = [dsegprob[x[0]] for x in output]
# # dict interaction : proba
# dintprob = {k: v for k, v in zip(output, probint)}
# # keep all segment above nstr1 and in Cone if T
# # keep all segment below nstr1 and in Cone if R
# else:
# # central interaction is a point
# # 1) Simple approach
# # output interaction are all visible interactions
# # 2) TO BE DONE
# #
# # output of the diffraction points
# # exploring
# # b
# # + right of ISB
# # + right of RSB
# #
# # + using the wedge cone
# # + using the incident cone
# #
# output = nx.neighbors(Gi, (nstr1,))
# nout = len(output)
# probint = np.ones(nout) # temporarybns
# dintprob = {k: v for k, v in zip(output, probint)}
# return(i0,i1,dintprob)
#self.Gi.add_edge(i0, i1, output=dintprob)
def intercy(self, ncy, typ='source'):
""" return the list of interactions seen from a cycle
Parameters
----------
ncy : cycle number( Project -> save project)
typ : string
if 'source' connect source cycle
if 'target' connect target cycle
Notes
-----
This method is called at the beginning of signature evaluation in order
to get the starting and ending interaction. It exploits the information
contained in teh graph Gi.
"""
# list of interactions
lint = self.Gi.node
# list of tuple interactions (R|T)
lD = [x for x in lint if len(x)==1]
lR = [x for x in lint if len(x)==2]
lT = [x for x in lint if len(x)==3]
# lD = [ x for x in rle if len(x) == 1, lint)
# lR = [ x for x in rle if len(x) == 2, lint)
# lT = [ x for x in rle if len(x) == 3, lint)
# visible R|T source cycle is ncy
lR = [ x for x in lR if x[1] == ncy ]
if typ == 'source':
lT = [ x for x in lT if x[1] == ncy ]
if typ == 'target':
lT = [ x for x in lT if x[2] == ncy ]
if typ == 'all':
lT = lT
# Finding the diffraction points
# Diffraction points are different from indoor cycle and outdoor
# cycles
#
# TODO check wedge validity.
#
vnodes = self.Gt.node[ncy]['polyg'].vnodes
vpoints = [ x for x in vnodes if x < 0 ]
lD = []
for x in vpoints:
if x in self.ddiff:
for y in self.ddiff[x][0]:
if y == ncy:
lD.append((x,))
# indoor = self.Gt.node[ncy]['indoor']
# if indoor:
# lD = map(lambda y : (y,),filter(lambda x : x in
# self.ldiffin,vpoints))
# else:
# lD = map(lambda y : (y,),filter(lambda x : x in
# self.ldiffout,vpoints))
return lR, lT, lD
def show(self, **kwargs):
""" show layout
See also
--------
showG
"""
defaults = {'show': True,
'fig': [],
'ax': [],
'nodes': False,
'edges': True,
'labels': False,
'alphan': 1.0,
'alphae': 1.0,
'width': 2,
'node_color': 'w',
'edge_color': 'k',
'node_size': 200,
'font_size': 30,
'nodelist': [],
'figsize': (5, 5),
'mode': 'cycle',
}
for key, value in defaults.items():
if key not in kwargs:
kwargs[key] = value
lair = []
if 'AIR' in self.name:
lair = self.name['AIR']
if '_AIR' in self.name:
lair = lair + self.name['_AIR']
#
# tsg : list of segment index for mapping with self.tahe
segfilt = [ x for x in self.tsg if x not in lair ]
# get the association between segment and nx edges
edges = self.Gs.edges()
Ne = len(edges)
# segments = np.array(edges)[:,0]
# segments are >0 index so max in necesssarily
# a segment number whatever the order
segments = np.array([max(x) for x in edges])
dse = {k: v for k, v in zip(segments, range(Ne))}
edfilt = list(
np.ravel(np.array(map(lambda x: [dse[x] - 1, dse[x]], segfilt))))
# edgelist is to be understood as edges of Graph and not segments of
# Layout
if hasattr(self,'extent'):
fig, ax = gkml.gearth_fig(self.extent,self.extent_c)
else:
fig = plt.gca()
ax = plt.gca()
fig, ax = self.showG('s', nodes=False, edgelist=edfilt,fig=fig,ax=ax)
# display degree 1 nodes
if 1 in self.degree:
ldeg1 = list(self.degree[1])
print(ldeg1)
fig, ax = self.showG('s',
fig=fig,
ax=ax,
nodelist=ldeg1,
edges=kwargs['edges'],
nodes=kwargs['nodes'],
node_size=kwargs['node_size'],
node_color='r')
# display degree 4 nodes
if 4 in self.degree:
ldeg4 = list(self.degree[4])
fig, ax = self.showG('s',
fig=fig,
ax=ax,
nodelist=ldeg4,
edges=kwargs['edges'],
nodes=kwargs['nodes'],
node_size=kwargs['node_size'],
node_color='g')
if hasattr(self,'extent'):
pnglayout = 'kmllayout.png'
kmzlayout = 'kmzlayout.kmz'
fig.savefig(pnglayout,transparent=True,format='png')
gkml.make_kml(self.extent,
figs = [pnglayout],
kmzfile = kmzlayout,
name = 'Layout')
# if k==1:
# fig,ax = self.showG('s',fig=fig,ax=ax,nodelist=ldeg,edges=False,nodes=True,node_size=50,node_color='c')
# if k==4:
# fig,ax = self.showG('s',fig=fig,ax=ax,nodelist=ldeg,nodes=False,node_size=50,node_color='b')
def showG(self, graph='s', **kwargs):
""" show the different graphs
Parameters
----------
graph : char
't' : Gt 'r' : Gr 's' : Gs 'v' : Gv 'i' : Gi
fig : matplotlib figure
[]
ax : matplotlib figure
[]
show : boolean
False
nodes : boolean
alse
edges : boolean
True
airwalls | aw: boolean
display airwalls (False)
subseg: boolean
display subsegments (False)
slab : boolean
display color and width of slabs (False)
labels : boolean |list
display graph labels (False)
if list precise label of which cycle to display
(e.g. ['t'])
alphan : float
transparency of nodes (1.0)
alphae : float
transparency of edges (1.0)
width : float
line width (2)
node_color: string
w
posnode_color: string
positive node color (k)
negnode_color: string
negative node color (b)
edge_color : string
k
node_size : float
20
font_size : float
15,
nodelist : list
list of nodes to be displayed (all)
edgelist : list
list of edges to be displayed (all)
mode : string
'cycle' | 'none' | 'room'
alphacy : string
transparency of cycles (0.8)
colorcy :
'#abcdef'
linter : list
list of interaction for Gi
['RR','TT','RT','TR','RD','DR','TD','DT','DD']
show0 : boolean
If true display connection to cycle 0 of Gt (False)
eded : boolean
True
ndnd : boolean
True
nded : boolean
True
width : int
2
nodelist : list
[]
overlay : boolean
diffraction :boolean
False
defaults = {'show': False,
'fig': [],
'ax': [],
'nodes': False,
'edges': True,
'sllist':[],
'airwalls': False,
'subseg': False,
'slab': True,
'labels': False,
'alphan': 1.0,
'alphae': 1.0,
'width': 2,
'node_color':'w',
'edge_color':'k',
'node_size':20,
'font_size':15,
'nodelist': [],
'edgelist': [],
'figsize': (5,5),
'mode':'nocycle',
'alphacy':0.8,
'colorcy':'abcdef',
'linter' : ['RR','TT','RT','TR','RD','DR','TD','DT','DD'],
'show0':False,
'axis':False,
'overlay':False,
'diffraction':False
}
Examples
--------
.. plot::
:include-source:
>>> from pylayers.gis.layout import *
>>> import matplotlib.pyplot as plt
>>> L = Layout('TA-Office.lay')
>>> L.dumpr()
>>> fig = plt.figure(figsize=(10,10))
>>> ax = fig.add_subplot(221)
>>> fig,ax = L.showG('s',fig=fig,ax=ax)
>>> tis = plt.title("Gs")
>>> ax = fig.add_subplot(222)
>>> fig,ax = L.showG('t',fig=fig,ax=ax)
>>> tit = plt.title("Gt")
>>> ax = fig.add_subplot(223)
>>> fig,ax = L.showG('r',fig=fig,ax=ax)
>>> tic = plt.title("Gr")
>>> ax = fig.add_subplot(224)
>>> fig,ax = L.showG('v',fig=fig,ax=ax)
>>> tiv = plt.title("Gv")
>>> plt.show()
See Also
--------
pylayers.util.graphutil.draw
"""
defaults = {'show': False,
'fig': [],
'ax': [],
'nodes': [],
'edges': True,
'sllist': [],
'airwalls': False,
'aw': [],
'subseg': False,
'slab': True,
'labels': False,
'alphan': 1.0,
'alphae': 1.0,
'width': 2,
'node_color': 'w',
'edge_color': '',
'node_size': 20,
'font_size': 15,
'nodelist': [],
'edgelist': [],
'figsize': (8, 8),
'mode': 'nocycle',
'alphacy': 0.8,
'colorcy': '#abcdef',
'lvis': ['nn', 'ne', 'ee'],
'linter': ['RR', 'TT', 'RT', 'TR', 'RD', 'DR', 'TD', 'DT', 'DD'],
'show0': False,
'axis': True,
'overlay': False,
'diffraction': False
}
for key, value in defaults.items():
if key not in kwargs:
kwargs[key] = value
if kwargs['aw'] != []:
kwargs['airwalls'] = kwargs['aw']
if 'graph' in kwargs:
graph = kwargs['graph']
# get color dictionnary from pyutil
cold = pyu.coldict()
if isinstance(kwargs['labels'], list):
labels = kwargs['labels']
elif kwargs['labels'] == True:
labels = ['s', 't', 'v', 'i', 'w']
elif isinstance(kwargs['labels'], str):
labels = kwargs['labels']
else:
labels = []
if isinstance(kwargs['nodes'], list):
dis_nodes = kwargs['nodes']
elif kwargs['nodes'] == True:
dis_nodes = ['s', 't', 'v', 'i', 'w']
elif isinstance(kwargs['nodes'], str):
dis_nodes = kwargs['nodes']
else:
dis_nodes = []
#
# s : structure graph
#
if 's' in graph:
# not efficient
G = self.Gs
# lss = [ x for x in self.Gs.nodes if # self.Gs.node[x].has_key('ss_name')]
# lss = [ x for x in lss if len(self.Gs.node[x]['ss_name'])>0 ]
# keep track of segments already printed
nodelistbkup = kwargs['nodelist']
edgelistbkup = kwargs['edgelist']
widthbkup = kwargs['width']
nodecolbkup = kwargs['edge_color']
try:
sllist = [kwargs['sllist'].pop()]
except:
sllist = list(dict(self.name).keys())
#
# Draw segment slab per slab with proper linewidth and color
#
for lmat in sllist:
#print(lmat)
lseg = self.name[lmat]
if lseg != []:
lseg2 = [np.where(np.array(self.Gs.edges()) == i)[0] for i in lseg]
kwargs['edgelist'] = []
for y in lseg2:
kwargs['edgelist'] = kwargs['edgelist'] + list(y)
#kwargs['edgelist'] = list(reduce(lambda x, y: list(x) + list(y), lseg2))
if kwargs['slab']:
if self.sl[lmat]['color'][0]=="#":
kwargs['edge_color'] = self.sl[lmat]['color']
else:
kwargs['edge_color'] = cold[self.sl[lmat]['color']]
kwargs['width'] = self.sl[lmat]['linewidth']
else:
kwargs['edge_color'] = 'k'
kwargs['width'] = 1
if 's' in labels:
kwargs['labels'] = True
else:
kwargs['labels'] = False
if 's' in dis_nodes:
kwargs['nodes'] = True
else:
kwargs['nodes'] = False
kwargs['fig'], kwargs['ax'] = gru.draw(G, **kwargs)
kwargs['nodelist'] = nodelistbkup
kwargs['width'] = widthbkup
kwargs['edge_color'] = nodecolbkup
kwargs['edgelist'] = edgelistbkup
if kwargs['subseg']:
#
# Display doors and windows subsegments with a slight offset
#
cold = pyu.coldict()
d = self.subseg()
for ss in list(dict(d).keys()):
color = cold[self.sl[ss]['color']]
for ns in d[ss]:
norm = self.Gs.node[ns[0]]['norm']
# v1.1 np1, np2 = self.Gs.neighbors(ns[0])
np1, np2 = self.Gs[ns[0]]
x = np.array(
[self.Gs.pos[np1][0], self.Gs.pos[np2][0]])
y = np.array(
[self.Gs.pos[np1][1], self.Gs.pos[np2][1]])
xoff = (1 + ns[1]) * 0.05 * norm[0]
yoff = (1 + ns[1]) * 0.05 * norm[1]
kwargs['ax'].plot(x + xoff, y + yoff,
linewidth=2, color=color)
#
# t : graph of cycles
#
if 't' in graph:
G = self.Gt
if not kwargs['show0']:
# filter out the 0 cycle
nodes = list(G.nodes())
edges = list(G.edges())
nodf = [ x for x in nodes if x != 0 ]
edf = [ x for x in np.arange(len(edges)) if ((edges[x][0]!=0) &
(edges[x][1]!=0))
]
kwargs['nodelist'] = nodf
kwargs['edgelist'] = edf
else:
kwargs['nodelist'] = G.nodes()
kwargs['edgelist'] = np.arange(len(G.edges()))
if kwargs['edge_color'] == '':
kwargs['edge_color'] = 'r'
if 't' in labels:
kwargs['labels'] = True
else:
kwargs['labels'] = False
if 't' in dis_nodes:
kwargs['nodes'] = True
else:
kwargs['nodes'] = False
fig, ax = gru.draw(G, **kwargs)
kwargs['fig'] = fig
kwargs['ax'] = ax
#
# r : graph of rooms
#
if 'r' in graph:
G = self.Gr
if kwargs['edge_color'] == '':
kwargs['edge_color'] = 'g'
kwargs['fig'], kwargs['ax'] = gru.draw(self.Gs,
nodes=False, edges=True, alphacy=1.,
fig=kwargs['fig'], ax=kwargs['ax'], labels=False)
if 'r' in labels:
kwargs['labels'] = True
else:
kwargs['labels'] = False
if 'r' in dis_nodes:
kwargs['nodes'] = True
else:
kwargs['nodes'] = False
fig, ax = gru.draw(G, **kwargs)
kwargs['fig'] = fig
kwargs['ax'] = ax
#
# v : visibility graph
# In blue : segment segment
# In red : point point (Diffraction)
# In green : point segment
#
if 'v' in graph:
G = self.Gv
G.pos = {}
# nodes of Gv are nodes of Gs
G.pos.update(self.Gs.pos)
if kwargs['edge_color'] == '':
kwargs['edge_color'] = 'm'
edges = list(G.edges())
rle = range(len(edges))
eded = [ x for x in rle if (edges[x][0] > 0) & (edges[x][1] > 0)]
ndnd = [ x for x in rle if (edges[x][0] < 0) & (edges[x][1] < 0)]
nded = [ x for x in rle if (((edges[x][0] < 0) & (edges[x][1] > 0)) |
((edges[x][0] > 0) & (edges[x][1] < 0)))]
if 'v' in labels:
kwargs['labels'] = True
else:
kwargs['labels'] = False
if 'v' in dis_nodes:
kwargs['nodes'] = True
else:
kwargs['nodes'] = False
if 'ee' in kwargs['lvis']:
kwargs['edgelist'] = eded
kwargs['edge_color'] = 'blue'
kwargs['node_size'] = 200
kwargs['fig'], kwargs['ax'] = gru.draw(G, **kwargs)
if 'nn' in kwargs['lvis']:
kwargs['edgelist'] = ndnd
kwargs['edge_color'] = 'red'
kwargs['fig'], kwargs['ax'] = gru.draw(G, **kwargs)
if 'ne' in kwargs['lvis']:
kwargs['edgelist'] = nded
kwargs['edge_color'] = 'green'
kwargs['fig'], kwargs['ax'] = gru.draw(G, **kwargs)
#
# i : interaction graph
#
if 'i' in graph:
G = self.Gi
if kwargs['edge_color'] == '':
kwargs['edge_color'] = 'k'
#
# Parsing the type of interactions
#
edges = list(G.edges())
# range len edges
rle = range(len(edges))
DD = [ x for x in rle if ((len(edges[x][0]) == 1) &
(len(edges[x][1]) == 1))]
RR = [ x for x in rle if ((len(edges[x][0]) == 2) &
(len(edges[x][1]) == 2))]
TT = [ x for x in rle if ((len(edges[x][0]) == 3) &
(len(edges[x][1]) == 3))]
RT = [ x for x in rle if ((len(edges[x][0]) == 2) &
(len(edges[x][1]) == 3))]
TR = [ x for x in rle if ((len(edges[x][0]) == 3) &
(len(edges[x][1]) == 2))]
RD = [ x for x in rle if ((len(edges[x][0]) == 2) &
(len(edges[x][1]) == 1))]
TD = [ x for x in rle if ((len(edges[x][0]) == 3) &
(len(edges[x][1]) == 1))]
DR = [ x for x in rle if ((len(edges[x][0]) == 1) &
(len(edges[x][1]) == 2))]
DT = [ x for x in rle if ((len(edges[x][0]) == 1) &
(len(edges[x][1]) == 3))]
tabcol = ['b', 'g', 'r', 'm', 'c', 'orange',
'purple', 'maroon', 'purple', 'k'][::-1]
li = []
if 'i' in labels:
kwargs['labels'] = True
else:
kwargs['labels'] = False
if 'v' in dis_nodes:
kwargs['nodes'] = True
else:
kwargs['nodes'] = False
for inter in kwargs['linter']:
if len(eval(inter)) > 0:
li.append(inter)
kwargs['edgelist'] = eval(inter)
# ndlist = map(lambda x: edges[x][0],kwargs['edgelist'])+\
# map(lambda x: edges[x][1],kwargs['edgelist'])
#ndlist = map(lambda x: edges[x][0], kwargs['edgelist']) +\
# map(lambda x: edges[x][1], kwargs['edgelist'])
ndlist = [ edges[x][0] for x in kwargs['edgelist']] + [edges[x][1] for x in kwargs['edgelist']]
# keep only unique interaction
unique = []
[unique.append(it) for it in ndlist if it not in unique]
kwargs['nodelist'] = unique
kwargs['edge_color'] = tabcol.pop()
kwargs['fig'], kwargs['ax'] = gru.draw(G, **kwargs)
legtxt = ['Gs'] + li
# plt.legend(legtxt)
#
# w : waypoint graph
#
if 'w' in graph:
G = self.Gw
if kwargs['edge_color'] == '':
kwargs['edge_color'] = 'k'
kwargs['fig'], kwargs['ax'] = gru.draw(self.Gs,
nodes=False, edges=True, alphacy=1.,
fig=kwargs['fig'], ax=kwargs['ax'], labels=False)
if 'w' in labels:
kwargs['labels'] = True
else:
kwargs['labels'] = False
fig, ax = gru.draw(G, **kwargs)
kwargs['fig'] = fig
kwargs['ax'] = ax
args = {'fig': kwargs['fig'], 'ax': kwargs['ax'], 'show': False}
if len(kwargs['edgelist']) == 0:
if kwargs['mode'] == 'cycle':
for k, ncy in enumerate(list(dict(self.Gt.node).keys())):
if k != 0:
fig, ax = self.Gt.node[ncy]['polyg'].plot(
alpha=kwargs['alphacy'], color=kwargs['colorcy'], **args)
args['fig'] = fig
args['ax'] = ax
if kwargs['mode'] == 'room':
for k, nro in enumerate(list(dict(self.Gr.node.keys()))):
if k != 0:
fig, ax = self.Gr.node[nro]['cycle'].show(**args)
args['fig'] = fig
args['ax'] = ax
kwargs['ax'].axis('scaled')
if not kwargs['axis']:
kwargs['ax'].axis('off')
if kwargs['overlay']:
imok = False
if self.display['overlay_file'] != '':
image = Image.open(os.path.join(
pro.basename, pro.pstruc['DIRIMAGE'], self.display['overlay_file']))
imok = True
if imok:
if 'v' in self.display['overlay_flip']:
print("flip v")
image = image.transpose(Image.FLIP_LEFT_RIGHT)
if 'h' in self.display['overlay_flip']:
image = image.transpose(Image.FLIP_TOP_BOTTOM)
print("flip h")
plt.axis()
kwargs['ax'].imshow(np.array(image), extent=self.display[
'overlay_axis'], alpha=self.display['alpha'], origin='lower')
if kwargs['diffraction']:
if len(self.ddiff.keys())>0:
pt = np.array([self.Gs.pos[x] for x in self.ddiff.keys()])
pta = np.array([self.Gs.pos[x] for x in self.lnss])
kwargs['ax'].scatter(pt[:, 0], pt[:, 1], c='r', s=75)
if len(self.lnss) > 0:
kwargs['ax'].scatter(pta[:, 0], pta[:, 1], c='b', s=20)
if kwargs['show']:
plt.show()
return kwargs['fig'], kwargs['ax']
def _showGv(self, **kwargs):
""" show graph Gv (visibility)
Parameters
----------
display
fig
ax
nodes : boolean
display nodes
edges : boolean
display edges
Returns
-------
fig : figure instance
ax : axes instance
"""
defaults = {'show': False,
'ax': [],
'nodes': False,
'eded': True,
'ndnd': True,
'nded': True,
'linewidth': 2,
}
for key, value in defaults.items():
if key in kwargs:
setattr(self, key, kwargs[key])
else:
setattr(self, key, value)
kwargs[key] = value
if kwargs['ax'] == []:
fig = plt.figure()
ax = fig.gca()
else:
ax = kwargs['ax']
nodes = np.array(self.Gv.nodes())
uneg = list(nodes[np.nonzero(nodes < 0)[0]])
upos = list(nodes[np.nonzero(nodes > 0)[0]])
if kwargs['nodes']:
nx.draw_networkx_nodes(self.Gv, self.Gs.pos, nodelist=upos,
node_color='blue', node_size=300, alpha=0.3)
nx.draw_networkx_nodes(self.Gv, self.Gs.pos, nodelist=uneg,
node_color='red', node_size=300, alpha=0.3)
nx.draw_networkx_labels(self.Gv, self.Gs.pos)
ndnd, nded, eded = gru.edgetype(self.Gv)
if kwargs['eded']:
nx.draw_networkx_edges(self.Gv, self.Gs.pos,
edgelist=eded, edge_color='blue', width=2)
if kwargs['ndnd']:
nx.draw_networkx_edges(self.Gv, self.Gs.pos,
edgelist=ndnd, edge_color='red', width=2)
if kwargs['nded']:
nx.draw_networkx_edges(self.Gv, self.Gs.pos,
edgelist=nded, edge_color='green', width=2)
if kwargs['show']:
plt.show()
return ax
def waypointGw(self, nroom1, nroom2):
""" get the waypoint between room1 and room2
Parameters
----------
nroom1
nroom2
Examples
--------
>>> from pylayers.gis.layout import *
>>> L = Layout('TA-Office.lay')
>>> L.build()
Notes
-----
nodes of Gw are no longer room number
"""
rooms = nx.dijkstra_path(self.Gw, nroom1, nroom2)
return(rooms, [tuple(self.Gw.pos[i]) for i in rooms])
def thwall(self, offx, offy):
""" Create a list of wall tuples (Transit.world format )
Parameters
----------
offx
offy
Returns
-------
walls : list of wall tuples (Transit format)
Examples
--------
>>> from pylayers.gis.layout import *
>>> L = Layout('DLR.lay')
>>> walls = L.thwall(0,0)
"""
keyn = list(dict(self.Gs.node).keys())
walls = []
for nd in keyn:
if nd > 0:
#v1.1 nb = self.Gs.neighbors(nd)
nb = list(dict(self.Gs[nd]).keys())
pta = self.Gs.pos[nb[0]]
phe = self.Gs.pos[nb[1]]
pn = self.Gs.node[nd]['norm']
name = self.Gs.node[nd]['name']
transition = self.Gs.node[nd]['transition']
sl = self.sl[name]
thick = sum(sl['lthick'])
p1 = np.array(pta) + \
np.array((pn[0], pn[1])) * thick / 2. + \
np.array([offx, offy])
p2 = np.array(phe) + \
np.array((pn[0], pn[1])) * thick / 2. + \
np.array([offx, offy])
p3 = np.array(phe) - \
np.array((pn[0], pn[1])) * thick / 2. + \
np.array([offx, offy])
p4 = np.array(pta) - \
np.array((pn[0], pn[1])) * thick / 2. + \
np.array([offx, offy])
wall = (tuple(p1), tuple(p2), tuple(p3), tuple(p4))
if not transition and name != 'AIR':
walls.append(wall)
return(walls)
def ptin(self, pt=np.array((0, 0, 0))):
""" check if a point is in the Layout
Parameters
----------
pt : point (ndarray)
Returns
-------
boolean : True if inside
See Also
--------
ispoint
"""
pt = pt[:2]
x = np.array((self.ax[:2]))
y = np.array((self.ax[2:]))
# being in [xmin xmax]
c0 = pt[0] <= x[1] and pt[0] >= x[0]
# being in [ymin ymax]
c1 = pt[1] <= y[1] and pt[1] >= y[0]
return (c0 & c1)
def ptGs2cy(self, n=-1):
""" Gs node to cycle
Parameters
----------
upt : point (ndarray)
Returns
-------
ncy : cycle number
Notes
-----
If a cycle contains the Gs pointt this function returns the cycle(s) number
"""
if n > 0:
return self.Gs.node[n]['ncycles']
else:
nseg = list(dict(self.Gs[n]).keys())
cy = []
for nn in nseg:
cy.extend(self.ptGs2cy(nn))
cy = np.unique(cy).tolist()
return cy
def isindoor(self,pt=np.array([0,0])):
""" test if a point is indoor
Parameters
----------
pt : np.array 1x2
2d point
Returns
-------
b1 : boolean
True if indoor
"""
cy = self.pt2cy(pt)
b1 = self.Gt.node[cy]['indoor']
return b1
def pt2cy(self, pt=np.array((0, 0))):
""" point to cycle
Parameters
----------
pt : point (ndarray)
Returns
-------
ncy : cycle number
Notes
-----
If a cycle contains point pt this function returns the cycle number
See Also
--------
Layout.cy2pt
"""
ptsh = sh.Point(pt[0], pt[1])
cycle_exists = False
for ncy in list(dict(self.Gt.node).keys()):
if ncy > 0:
criter1 = self.Gt.node[ncy]['polyg'].touches(ptsh)
criter2 = self.Gt.node[ncy]['polyg'].contains(ptsh)
if (criter1 or criter2):
cycle_exists = True
return(ncy)
if not cycle_exists:
raise NameError(str(pt) + " is not in any cycle")
def cy2pt(self, cy=0, h=1.2):
"""returns a point into a given cycle
Parameters
----------
cy : int
cycle number
h : float
point height
Returns
-------
point : nd.array
3d point
See Also
--------
Layout.pt2cy
"""
if cy in self.Gt.nodes():
pt = np.array((self.Gt.pos[cy]))
pt = np.hstack((pt, h))
return(pt)
else:
raise NameError("cycle " + str(cy) + " not in self.Gt")
def pt2ro(self, pt=np.array((0, 0))):
""" point to room
Parameters
----------
pt : point (ndarray)
Returns
-------
nr : Room number
Notes
-----
If a room contains point pt this function returns the room number
"""
ptsh = sh.Point(pt[0], pt[1])
ptshinroom = False
for nr in list(dict(self.Gr.node.keys())):
if self.Gr.node[nr]['polyg'].contains(ptsh)\
or self.Gr.node[nr]['polyg'].touches(ptsh):
ptshinroom = True
return(nr)
if not ptshinroom:
raise NameError(str(pt) + " is not in any room")
def seg2ro(self, seg):
""" return room number of a point
Parameters
----------
seg : int
Returns
-------
nr : Room number
Notes
-----
If a room contains point pt this function returns the room number
"""
rooms = []
for nr in list(dict(self.Gr.node.keys())):
# if seg in self.Gt.node[self.Gr.node[nr]['cycle']]['vnodes']:
ncy = self.Gr.node[nr]['cycle']
if seg in self.Gt.node[ncy]['cycle'].cycle:
rooms.append(nr)
return rooms
def room2segments(self, room):
""" returns the segments of a room
Parameters
----------
room : int
Returns
-------
seg : list
"""
try:
# old vnodes was there
ncy = self.Gr.node[room]['cycle']
seg = self.Gt.node[ncy].cycle
except:
raise NameError(str(room) + " is not in not on Gr")
u = np.where(seg >= 0)
seg = seg[u]
return np.sort(seg.tolist())
def room2nodes(self, room):
""" returns the nodes of a room
Parameters
----------
room : int
Returns
-------
nod : sorted list
"""
try:
ncy = self.Gr.node[room]['cycle']
nod = self.Gt.node[ncy].cycle
#nod = self.Gt.node[self.Gr.node[room]['cycle']]['vnodes']
except:
raise NameError(str(room) + " is not in not on Gr")
u = np.where(nod < 0)
nod = nod[u]
return np.sort(nod.tolist())
def get_diffslab(self,npt,lz):
""" get the 2 slabs associated to a diffraction point
Parameters
----------
lnpt : diffraction point numbers (node of Gs)
lz : array of candidate heights of the diffraction point
Notes
-----
As a diffraction point may involve iso segments the nature
of the diffraction interaction depends on a height parameter
This function extacts the couple of slab from this information
Returns
-------
- a list of 2-segments . the length of this list == length of lz
- a list of slab tuples. the length of this list == length of lz
[[443, 529], [444, 530]]
[['WALL', 'WALL'], ['AIR', 'AIR']]
"""
assert(npt in self.ddiff), logger.error('npt not a diffraction point')
lcy = self.ddiff[npt][0]
ls = []
llz = len(lz)
dz_seg= {z:[] for z in range(llz)}
dz_sl= {z:[] for z in range(llz)}
for cy in lcy:
vn = set(self.Gt.node[cy]['polyg'].vnodes)
# v1.1 lneig_pt = set(nx.neighbors(self.Gs,npt))
lneig_pt = set(self.Gs[npt])
lseg = lneig_pt.intersection(vn)
lseg_valid = [ x for x in lseg if self.Gs.node[x]['name']!='_AIR']
for x in lseg_valid:
zsup = lz >self.Gs.node[x]['z'][0]
zinf = lz <=self.Gs.node[x]['z'][1]
z = zsup & zinf
uz = np.where(z)[0]
# fill dz_seg at the correct height with a lseg_valid
# and simulnaneously
# fill dz_sl at the correct height with correspondong slab
[(dz_seg[i].append(x),dz_sl[i].append(self.Gs.node[x]['name']))
for i in uz]
return dz_seg.values(),dz_sl.values()
def _find_diffractions(self, difftol=0.01,verbose = False,tqdmkwargs={}):
""" find diffractions points of the Layout
Parameters
----------
difftol : float
tolerance in radians
Returns
-------
Update self.ddiff {nseg : ([ncy1,ncy2],wedge_angle)}
"""
# dangles = self.get_Gt_angles()
#
# Problem here point number are converted into float64
if tqdmkwargs=={}:
tqdmkwargs={'total':100.,
'desc':'find_diffractions'}
dangles = {cy: np.array(geu.get_pol_angles(self.Gt.node[cy]['polyg']))
for cy in self.Gt.nodes() if cy != 0}
#
# The candidate points for being diffraction points have degree 1 or 2
# A point diffracts toward one or several cycles
#
#ldiff = list(np.hstack((self.degree[1],self.degree[2])).astype('int'))
lpnt = [x for x in self.Gs.node if (x < 0 and x not in self.degree[0])]
self.ddiff = {}
if verbose :
cpt = 1./(len(lpnt)+1)
pbar = tqdm.tqdm(tqdmkwargs)
for k in lpnt:
if verbose :
pbar.update(100.*cpt)
# list of cycles associated with point k
lcyk = self.Gs.node[k]['ncycles']
if len(lcyk) > 2:
# Subgraph of connected cycles around k
Gtk = nx.subgraph(self.Gt, lcyk)
# ordered list of connections between cycles
try:
lccyk = nx.find_cycle(Gtk)
except:
pdb.set_trace()
# list of segment neighbours
neigh = list(dict(self.Gs[k]).keys())
# sega : list of air segment in neighors
sega = [n for n in neigh if
(self.Gs.node[n]['name'] == 'AIR' or
self.Gs.node[n]['name'] == '_AIR')]
sega_iso = [n for n in sega if len(self.Gs.node[n]['iso']) > 0]
sega_eff = list(set(sega).difference(set(sega_iso)))
nsector = len(neigh) - len(sega)
dsector = {i: [] for i in range(nsector)}
#
# team building algo
#
ct = 0
# if k ==-44:
# pdb.set_trace()
for ccy in lccyk:
#segsep = self.Gt[ccy[0]][ccy[1]]['segment'][0]
segsep = self.Gt[ccy[0]][ccy[1]]['segment']
# filter only segments connected to point k (neigh)
lvseg = [x for x in segsep if x in neigh]
if len(lvseg) == 1 and (lvseg[0] in sega_eff): # same sector
dsector[ct].append(ccy[1])
else: # change sector
ct = (ct + 1) % nsector
dsector[ct].append(ccy[1])
# typslab = self.Gs.node[segsep]['name']
# if (typslab=='AIR' or typslab=='_AIR'): # same sector
# dsector[ct].append(ccy[1])
# else: # change sector
# ct=(ct+1)%nsector
# dsector[ct].append(ccy[1])
# lcy2.append(ccy[1])
# lcy1,lcy2 = lcy2,lcy1
dagtot = {s: 0 for s in range(nsector)}
save = []
for s in dsector:
for cy in dsector[s]:
da = dangles[cy]
u = np.where(da[0, :].astype('int') == k)[0][0]
save.append((cy, da[1, u]))
dagtot[s] = dagtot[s] + da[1, u]
for s in dagtot:
if dagtot[s] > (np.pi + difftol):
self.ddiff[k] = (dsector[s], dagtot[s])
break
# if agtot1 > (np.pi+tol):
# self.ddiff[k]=(lcy1,agtot1)
# elif 2*np.pi-agtot1 > (np.pi+tol):
# self.ddiff[k]=(lcy2,2*np.pi-agtot1)
else:
# diffraction by half-plane detected
if k in self.degree[1]:
self.ddiff[k] = (lcyk, 2 * np.pi)
def buildGr(self):
""" build the graph of rooms Gr
Notes
-----
adjascent rooms are connected
Gr is at startup a deep copy of Gt
The difficulty here is to take into account the AIR transition
segments
"""
self.Gr = copy.deepcopy(self.Gt)
self.Gr.remove_node(0)
self.Gr.remove_edges_from(self.Gt.edges())
for e in list(self.Gt.edges()):
if ((not 0 in e) and
(self.Gt.node[e[0]]['indoor']) and
(self.Gt.node[e[1]]['indoor']) ):
seg = self.Gt[e[0]][e[1]]['segment']
seg = np.unique(seg)
trans_seg = [n for n in seg
if (self.Gs.node[n]['transition'])
and n not in self.segboundary]
if trans_seg != []:
self.Gr.add_edge(e[0],e[1],segment=trans_seg)
deg = dict(self.Gr.degree())
#pdb.set_trace()
self.Gr.remove_nodes_from([n for n in deg if deg[n] == 0])
def buildGw(self):
""" build Graph of waypaths
See Also
--------
buildGr
Notes
-----
for all edges of Gr (adjascent room)
if room1 and room2 have a common transition
"""
self.Gw = nx.Graph(name='Gw')
self.Gw.pos = {}
d_id = max(self.Gr.nodes()) # for numerotation of Gw nodes
d_id_index = d_id + 1
for e in self.Gr.edges(): # iterator on Gr edges
self.Gw.add_node(e[0], room=e[0], door=False)
self.Gw.add_node(e[1], room=e[1], door= False)
# transitions of room e[0]
# trans1 = self.Gr.node[e[0]]['segment']
# # transitions of room e[1]
# trans2 = self.Gr.node[e[1]]['segment']
# Id = np.intersect1d(trans1, trans2)[0] # list of common doors
# import ipdb
# ipdb.set_trace()
Ids = self.Gr[e[0]][e[1]]['segment']
# here is supposed that 2 room may have more than 1 door in common
for Id in Ids:
#v1.1 unode = self.Gs.neighbors(Id) # get edge number of common doors
unode = list(dict(self.Gs[Id]).keys()) # get edge number of common doors
up0 = self.Gs.pos[unode[0]]
up1 = self.Gs.pos[unode[1]]
name = self.Gs.node[Id]['name']
pn = self.Gs.node[Id]['norm']
sl = self.sl[name]
thick = (sum(sl['lthick']) / 2.) + 0.2
# for ""doors"" extra waypoints points are added
# in front and back of the aperture.
# this is not done for AIR slabs
if 'AIR' not in name:
# middle of the common door
pdoor0 = (np.array(up0) + pn[:2] * thick +
np.array(up1) + pn[:2] * thick) / 2.
pdoor1 = (np.array(up0) - pn[:2] * thick +
np.array(up1) - pn[:2] * thick) / 2.
P0 = sh.Point(pdoor0)
P1 = sh.Point(pdoor1)
ep0 = self.Gr.pos[e[0]]
ep1 = self.Gr.pos[e[1]]
if self.Gr.node[e[0]]['polyg'].contains(P0):
upd0 = d_id_index
self.Gw.pos[upd0] = pdoor0
self.Gw.add_node(upd0, room=e[0], door=True)
# if self.seginline(pdoor0,ep0).shape[1] <= 1:
self.Gw.add_edges_from([(e[0],upd0)])
d_id_index = d_id_index + 1
upd1 = d_id_index
self.Gw.pos[upd1] = pdoor1
self.Gw.add_node(upd1, room=e[1], door=True)
# if self.seginline(pdoor1,ep1).shape[1] <= 1:
self.Gw.add_edges_from([(e[1],upd1)])
d_id_index = d_id_index + 1
else:
upd0 = d_id_index
self.Gw.pos[upd0] = pdoor0
self.Gw.add_node(upd0, room=e[1], door=True)
# if self.seginline(pdoor0,ep1).shape[1] <= 1:
self.Gw.add_edges_from([(e[1],upd0)])
d_id_index = d_id_index + 1
upd1 = d_id_index
self.Gw.pos[upd1] = pdoor1
self.Gw.add_node(upd1, room=e[0], door=True)
# if self.seginline(pdoor1,ep0).shape[1] <= 1:
self.Gw.add_edges_from([(e[0],upd1)])
d_id_index = d_id_index + 1
self.Gw.add_edges_from([(upd0, upd1)])
else:
self.Gw.add_edges_from([(e[0],e[1])])
self.Gw.pos.update(self.Gr.pos)
def info(self):
""" gives information about the Layout
"""
print("filestr : ", self._filename)
# print("filematini : ", self.filematini)
# print("fileslabini : ", self.fileslabini)
try:
print("filegeom : ", self.filegeom)
except:
print("geomfile (.off) has no been generated")
# self.boundary()
print("boundaries ", self.ax)
print("number of Points :", self.Np)
print("number of Segments :", self.Ns)
print("number of Sub-Segments :", self.Nss)
try:
print("Gs Nodes : ", self.Gs.number_of_nodes())
print("Gs Edges : ", self.Gs.number_of_edges())
except:
print("no Gs graph")
try:
print("Gt Nodes : ", self.Gt.number_of_nodes())
print("Gt Edges : ", self.Gt.number_of_edges())
print("vnodes = Gt.node[Nc]['polyg'].vnodes")
print("poly = Gt.node[Nc]['polyg']")
except:
print("no Gt graph")
try:
print("Gr Nodes :", self.Gr.number_of_nodes())
print("Gr Edges :", self.Gr.number_of_edges())
except:
print("no Gr graph")
def facets3D(self, edlist, name='Layer', subseg=False):
""" create facet 3D for geomview
Parameters
----------
edlist
name : string
subseg : boolean
"""
filename = name + '.list'
filestruc = pyu.getlong(filename, pro.pstruc['DIRGEOM'])
fos = open(filestruc, "w")
fos.write("LIST{\n")
for e in edlist:
filename = self.facet3D(e, subseg)
if filename == 'void':
pass
else:
chaine = '{<' + filename + "}\n"
fos.write(chaine)
fos.write("}\n")
fos.close()
def numseg(self, ta, he, first=True):
""" get segment number from 2 points index
Parameters
----------
ta : int <0
he : int <0
first : Boolean
if True returns only one among the several iso segments
else returns a np.array of iso segments
Returns
-------
nseg : > 0
if 0 not a segment
"""
# v1.1 nta = np.array(nx.neighbors(self.Gs, ta))
# v1.1 nhe = np.array(nx.neighbors(self.Gs, he))
nta = np.array(list(dict(self.Gs[ta]).keys()))
nhe = np.array(list(dict(self.Gs[he]).keys()))
nseg = np.intersect1d(nta, nhe)
if len(nseg > 0):
if first:
return(nseg[0])
else:
return nseg
else:
return(0)
def isseg(self, ta, he):
""" test if ta<->he is a segment
Parameters
----------
ta : int <0
he : int <0
Returns
-------
boolean
See Also
--------
editor.py
"""
# transpose point numbering
upnt = [ x for x in self.Gs.nodes() if x < 0 ]
ta = np.nonzero(np.array(upnt) == ta)[0][0]
he = np.nonzero(np.array(upnt) == he)[0][0]
res = [x for x in zip(self.tahe[0], self.tahe[1])
if (((x[0] == ta) & (x[1] == he)) |
((x[0] == he) & (x[1] == ta))) ]
if len(res) > 0:
return True
else:
return False
def ispoint(self, pt, tol=0.05):
""" check if pt is a point of the Layout
Parameters
----------
pt : point (2,1)
tol : float
default (0.05 meters)
if True the point number (<0) is returned
else 0 is return
Returns
-------
pt : point number if point exists 0 otherwise
See Also
--------
pylayers.util.geomutil.Polygon.setvnodes
"""
# print"ispoint : pt ", pt
pts = np.array(list(self.Gs.pos.values())).T
ke = np.array(list(self.Gs.pos.keys()))
diff = pts - pt.reshape(2, 1)
v = np.sqrt(np.sum(diff * diff, axis=0))
nz = (v > tol)
b = nz.prod()
if b == 1:
# if all layout points are different from pt
#return(0,np.min(v))
return(0)
else:
nup = np.where(nz == False)[0]
if len(nup) == 1:
return(ke[nup][0])
else:
mi = np.where(min(v[nup]) == v[nup])[0]
return(ke[nup[mi]][0])
def onseg(self, pt, tol=0.01):
""" segment number from point (deprecated)
return segment number which contains point pt
Parameters
----------
pt np.array(1x2)
tol = 0.01 tolerance
"""
pts = np.array(self.Gs.pos.values()).T # structure points
ke = np.array(list(dict(self.Gs.pos).keys())) # point keys
n = np.shape(pts)[1]
nbu = np.array([])
if (n > 0):
num = np.arange(n) #
b = self.inbox(pt, tol)
ta = self.tahe[0, b]
he = self.tahe[1, b]
nb = num[b]
n = len(nb)
p = np.outer(pt, np.ones(n))
# printta
v1 = p - pts[:, ta]
v2 = pts[:, he] - p
nv1 = np.sqrt(v1[0, :] * v1[0, :] + v1[1, :] * v1[1, :])
nv2 = np.sqrt(v2[0, :] * v2[0, :] + v2[1, :] * v2[1, :])
v1n = v1 / nv1
v2n = v2 / nv2
ps = v1n[0, :] * v2n[0, :] + v1n[1, :] * v2n[1, :]
u = abs(1. - ps) < tol
nbu = nb[u]
return nbu
def facet3D(self, e, subseg=False):
""" calculate 3D facet from segment
Parameters
---------
s : int
segment number
subseg : boolean
default False
"""
P1 = np.array(np.zeros(3), dtype=np.float64)
P2 = np.array(np.zeros(3), dtype=np.float64)
P3 = np.array(np.zeros(3), dtype=np.float64)
P4 = np.array(np.zeros(3), dtype=np.float64)
# v1.1 nebr = self.Gs.neighbors(s)
nebr = list(dict(self.Gs[s]).keys())
n1 = nebr[0]
n2 = nebr[1]
P1[0:2] = np.array(self.Gs.pos[n1])
P1[2] = self.Gs.node[s]['z'][0]
P2[0:2] = np.array(self.Gs.pos[n2])
P2[2] = self.Gs.node[s]['z'][0]
P3[0:2] = np.array(self.Gs.pos[n2])
P3[2] = self.Gs.node[s]['z'][1]
P4[0:2] = np.array(self.Gs.pos[n1])
P4[2] = self.Gs.node[s]['z'][1]
cold = pyu.coldict()
if subseg:
nsseg = len(self.Gs.node[s]['ss_name'])
else:
nsseg = 0
filename = 'fa' + str(s) + '.off'
filestruc = pyu.getlong(filename, pro.pstruc['DIRGEOM'])
fos = open(filestruc, "w")
fos.write("OFF\n")
fos.write("%d %d \n\n" % (1 + (nsseg + 1) * 4, nsseg + 1))
fos.write("0.000 0.000 0.000\n")
if subseg:
try:
for k, name in enumerate(self.Gs.node[s]['ss_name']):
P1[2] = self.Gs.node[s]['ss_z'][k][0]
P2[2] = self.Gs.node[s]['ss_z'][k][0]
P3[2] = self.Gs.node[s]['ss_z'][k][1]
P4[2] = self.Gs.node[s]['ss_z'][k][1]
fos.write("%6.3f %6.3f %6.3f \n" % (P1[0], P1[1], P1[2]))
fos.write("%6.3f %6.3f %6.3f \n" % (P2[0], P2[1], P2[2]))
fos.write("%6.3f %6.3f %6.3f \n" % (P3[0], P3[1], P3[2]))
fos.write("%6.3f %6.3f %6.3f \n" % (P4[0], P4[1], P4[2]))
except:
print('no subsegment on ', s)
return('void')
else:
name = self.Gs.node[s]['name']
fos.write("%6.3f %6.3f %6.3f \n" % (P1[0], P1[1], P1[2]))
fos.write("%6.3f %6.3f %6.3f \n" % (P2[0], P2[1], P2[2]))
fos.write("%6.3f %6.3f %6.3f \n" % (P3[0], P3[1], P3[2]))
fos.write("%6.3f %6.3f %6.3f \n" % (P4[0], P4[1], P4[2]))
if subseg:
for k, name in enumerate(self.Gs.node[s]['ss_name']):
colname = sl[name]['color']
colhex = cold[colname]
col = pyu.rgb(colhex) / 255.
fos.write("4 %i %i %i %i %6.3f %6.3f %6.3f 0.4\n" % (1 + 4 * k, 2 + 4 * k,
3 + 4 * k, 4 + 4 * k, col[0], col[1], col[2]))
else:
name = self.Gs.node[s]['name']
colname = sl[name]['color']
colhex = cold[colname]
col = pyu.rgb(colhex) / 255.
fos.write("4 %i %i %i %i %6.3f %6.3f %6.3f 0.4\n" % (1, 2,
3, 4, col[0], col[1], col[2]))
return(filename)
def geomfile(self, centered=False):
""" create a .off geomview file
Parameters
----------
centered : Boolean
if True the layout is centered around its center of gravity
Notes
-----
The `.off` file can be vizualized through the show3 method
Examples
--------
>>> from pylayers.gis.layout import *
>>> L = Layout('DLR.lay')
>>> pg = L.geomfile()
"""
# calculate center of gravity
if centered:
pg = np.sum(self.pt, axis=1) / np.shape(self.pt)[1]
else:
pg = np.array([0, 0])
# en = self.Ns # number of segments
en = len(np.where(np.array(list(dict(self.Gs.node).keys())) > 0)[0])
if en != self.Ns:
logger.warning("wrong number of segments, consistency problem in layout")
#cen = self.Nss
# d : dictionnary of layout sub segments
#
d = self.subseg()
cen = 0
for k in d:
lss = d[k]
cen = cen + len(lss)
if cen != self.Nss:
logger.warning("wrong number of subsegments, consistency problem in layout")
sl = self.sl
#
# Create a polygon for each segment and subsegment
#
P1 = np.array(np.zeros([3, en + cen], dtype=np.float64))
P2 = np.array(np.zeros([3, en + cen], dtype=np.float64))
P3 = np.array(np.zeros([3, en + cen], dtype=np.float64))
P4 = np.array(np.zeros([3, en + cen], dtype=np.float64))
ik = 0
dikn = {}
for i in list(dict(self.Gs.node).keys()):
if i > 0: # segment
if ((self.Gs.node[i]['name'] != 'AIR') and
(self.Gs.node[i]['name'] != '_AIR')):
#v1.1 nebr = self.Gs.neighbors(i)
nebr = list(dict(self.Gs[i]).keys())
n1 = nebr[0]
n2 = nebr[1]
P1[0:2, ik] = np.array(self.Gs.pos[n1]) - pg
P1[2, ik] = self.Gs.node[i]['z'][0]
P2[0:2, ik] = np.array(self.Gs.pos[n2]) - pg
P2[2, ik] = self.Gs.node[i]['z'][0]
P3[0:2, ik] = np.array(self.Gs.pos[n2]) - pg
P3[2, ik] = self.Gs.node[i]['z'][1]
P4[0:2, ik] = np.array(self.Gs.pos[n1]) - pg
P4[2, ik] = self.Gs.node[i]['z'][1]
dikn[ik] = i
ik = ik + 1
else:
en = en - 1
# d = self.subseg()
# k : ss_name v: seg number
cpt = 0
subseg = {}
# pdb.set_trace()
for k in d.keys():
for l in d[k]:
ids = l[0]
subseg[cpt] = ids
order = l[1]
cpt = cpt + 1
# v1.1 nebr = self.Gs.neighbors(l[0])
nebr = list(dict(self.Gs[l[0]]).keys())
n1 = nebr[0]
n2 = nebr[1]
# printik,n1,n2
P1[0:2, ik] = np.array(self.Gs.pos[n1]) - pg
P1[2, ik] = self.Gs.node[ids]['ss_z'][order][0]
# printP1[:,ik]
P2[0:2, ik] = np.array(self.Gs.pos[n2]) - pg
P2[2, ik] = self.Gs.node[ids]['ss_z'][order][0]
# printP2[:,ik]
P3[0:2, ik] = np.array(self.Gs.pos[n2]) - pg
P3[2, ik] = self.Gs.node[ids]['ss_z'][order][1]
# printP3[:,ik]
P4[0:2, ik] = np.array(self.Gs.pos[n1]) - pg
P4[2, ik] = self.Gs.node[ids]['ss_z'][order][1]
# printP4[:,ik]
dikn[ik] = l
ik = ik + 1
npt = 4 * (en + cen)
_filename, ext = os.path.splitext(self._filename)
_filegeom = _filename + '.off'
self.filegeom = _filegeom
filegeom = pyu.getlong(_filegeom, pro.pstruc['DIRGEOM'])
fos = open(filegeom, "w")
fos.write("OFF\n")
fos.write("%d %d \n\n" % (npt + 1, en + cen))
fos.write("0.000 0.000 0.000\n")
for i in range(en + cen):
fos.write("%6.3f %6.3f %6.3f \n" % (P1[0, i], P1[1, i], P1[2, i]))
fos.write("%6.3f %6.3f %6.3f \n" % (P2[0, i], P2[1, i], P2[2, i]))
fos.write("%6.3f %6.3f %6.3f \n" % (P3[0, i], P3[1, i], P3[2, i]))
fos.write("%6.3f %6.3f %6.3f \n" % (P4[0, i], P4[1, i], P4[2, i]))
cold = pyu.coldict()
# ke = cold.keys()
#
for i in range(en + cen):
q = 4 * i
if i < en:
#ne = i + 1
ne = dikn[i]
name = self.Gs.node[ne]['name']
else:
ne = dikn[i][0]
order = dikn[i][1]
#nss = i - en
##ne = subseg[nss]
name = self.Gs.node[ne]['ss_name'][order]
# if (i<en):
# name = self.name[i]
# else:
# core = self.ce[subseg[i-en]][0]
# name = sl.di[core]
colname = sl[name]['color']
colhex = cold[colname]
col = pyu.rgb(colhex) / 255.
fos.write("4 %i %i %i %i %6.3f %6.3f %6.3f 0.4\n" % (q +
1, q + 2, q + 3, q + 4, col[0], col[1], col[2]))
fos.close()
return pg
def _show3(self, centered=False, newfig=False, opacity=1., ceil_opacity=1., show_ceil=False, cyid=False, **kwargs):
""" mayavi 3D vizualisation
Parameters
----------
newfig : Boolean
create a new mayavi Figure
opacity : float ([0,1])
set slab opacity
ceil_opacity : float
centered : Boolean
if True the layout is centered around its center of gravity
cyid : boolean
display cycle number
show_ceil: boolean
display ceil or not
Notes
-----
The `.off` file can be vizualized through the show3 method
Examples
--------
.. plot::
:include-source:
>>> from pylayers.gis.layout import *
>>> L = Layout()
"""
#
# calculate center of gravity of the layout
#
if centered:
pg = np.sum(self.pt, axis=1) / np.shape(self.pt)[1]
else:
pg = np.array([0, 0])
# en = self.Ns # number of segments
en = len(np.where(np.array(list(dict(self.Gs.node).keys())) > 0)[0])
if en != self.Ns:
logger.warning(
"wrong number of segment consistency problem in layout")
#cen = self.Nss
# d : dictionnary of layout sub segments
#
d = self.subseg()
cen = 0
for k in d:
lss = d[k]
cen = cen + len(lss)
if cen != self.Nss:
logger.warning(
"wrong number of subsegment consistency problem in layout")
sl = self.sl
#
# Create a 3D polygon for each segment and subsegment
#
P1 = np.array(np.zeros([3, en + cen], dtype=np.float64))
P2 = np.array(np.zeros([3, en + cen], dtype=np.float64))
P3 = np.array(np.zeros([3, en + cen], dtype=np.float64))
P4 = np.array(np.zeros([3, en + cen], dtype=np.float64))
ik = 0
dikn = {}
#
# segments which are not _AIR or AIR
#
for i in list(dict(self.Gs.node).keys()):
if i > 0: # segment
if ((self.Gs.node[i]['name'] != 'AIR') and
(self.Gs.node[i]['name'] != '_AIR')):
#v1.1 nebr = self.Gs.neighbors(i)
nebr = list(dict(self.Gs[i]).keys())
n1 = nebr[0]
n2 = nebr[1]
P1[0:2, ik] = np.array(self.Gs.pos[n1]) - pg
P1[2, ik] = self.Gs.node[i]['z'][0]
P2[0:2, ik] = np.array(self.Gs.pos[n1]) - pg
P2[2, ik] = self.Gs.node[i]['z'][1]
P3[0:2, ik] = np.array(self.Gs.pos[n2]) - pg
P3[2, ik] = self.Gs.node[i]['z'][1]
P4[0:2, ik] = np.array(self.Gs.pos[n2]) - pg
P4[2, ik] = self.Gs.node[i]['z'][0]
dikn[ik] = i
ik = ik + 1
else:
en = en - 1
# d = self.subseg()
# k : ss_name v: seg number
cpt = 0
subseg = {}
for k in d.keys():
for l in d[k]:
ids = l[0]
subseg[cpt] = ids
order = l[1]
cpt = cpt + 1
# v1.1 nebr = self.Gs.neighbors(l[0])
nebr = list(dict(self.Gs[l[0]]).keys())
n1 = nebr[0]
n2 = nebr[1]
# printik,n1,n2
P1[0:2, ik] = np.array(self.Gs.pos[n1]) - pg
P1[2, ik] = self.Gs.node[ids]['ss_z'][order][0]
# printP1[:,ik]
P2[0:2, ik] = np.array(self.Gs.pos[n2]) - pg
P2[2, ik] = self.Gs.node[ids]['ss_z'][order][0]
# printP2[:,ik]
P3[0:2, ik] = np.array(self.Gs.pos[n2]) - pg
P3[2, ik] = self.Gs.node[ids]['ss_z'][order][1]
# printP3[:,ik]
P4[0:2, ik] = np.array(self.Gs.pos[n1]) - pg
P4[2, ik] = self.Gs.node[ids]['ss_z'][order][1]
# printP4[:,ik]
dikn[ik] = l
ik = ik + 1
npt = 4 * (en + cen)
npt_s = (en + cen)
points = np.hstack((P1[:, 0:npt_s], P2[:, 0:npt_s]))
points = np.hstack((points, P3[:, 0:npt_s]))
points = np.hstack((points, P4[:, 0:npt_s]))
points = points.T
boxes = np.empty((int(npt / 4), 4), dtype='int')
b = np.arange(int(npt / 4))
boxes[:, 0] = b
boxes[:, 1] = b + npt_s
boxes[:, 2] = b + 2 * npt_s
boxes[:, 3] = b + 3 * npt_s
# _filename,ext = os.path.splitext(self._filename)
# _filegeom = _filename+'.off'
# self.filegeom=_filegeom
# filegeom = pyu.getlong(_filegeom, pro.pstruc['DIRGEOM'])
# fos = open(filegeom, "w")
# fos.write("OFF\n")
# fos.write("%d %d \n\n" % (npt + 1, en + cen))
# fos.write("0.000 0.000 0.000\n")
# for i in range(en + cen):
# fos.write("%6.3f %6.3f %6.3f \n" % (P1[0, i], P1[1, i], P1[2, i]))
# fos.write("%6.3f %6.3f %6.3f \n" % (P2[0, i], P2[1, i], P2[2, i]))
# fos.write("%6.3f %6.3f %6.3f \n" % (P3[0, i], P3[1, i], P3[2, i]))
# fos.write("%6.3f %6.3f %6.3f \n" % (P4[0, i], P4[1, i], P4[2, i]))
cold = pyu.coldict()
color = np.zeros((4 * (cen + en), 3))
for i in range(en + cen):
# q = 4 * i
if i < en:
ne = dikn[i]
name = self.Gs.node[ne]['name']
else:
ne = dikn[i][0]
order = dikn[i][1]
name = self.Gs.node[ne]['ss_name'][order]
colname = sl[name]['color']
colhex = cold[colname]
color[i, :] = pyu.rgb(colhex)
color[i + npt_s, :] = pyu.rgb(colhex)
color[i + 2 * npt_s, :] = pyu.rgb(colhex)
color[i + 3 * npt_s, :] = pyu.rgb(colhex)
colname = sl['FLOOR']['color']
colhex = cold[colname]
colf = np.repeat((pyu.rgb(colhex))[np.newaxis, :], 4, axis=0)
color = np.vstack((color, colf))
# trick for correcting color assignement
sc = tvtk.UnsignedCharArray()
sc.from_array(color)
# manage floor
# if Gt doesn't exists
try:
self.ma.coorddeter()
# z=np.ones(self.ma.xy.shape[1])
z = np.zeros(self.ma.xy.shape[1])
F = np.vstack((self.ma.xy, z))
tri = np.arange(len(z))
meshf = tvtk.PolyData(points=F.T, polys=np.array([tri]))
meshf.point_data.scalars = sc
meshf.point_data.scalars.name = 'scalars'
surff = mlab.pipeline.surface(meshf, opacity=opacity)
mlab.pipeline.surface(mlab.pipeline.extract_edges(surff),
color=(0, 0, 0), )
# otherwise
except:
floorx = np.array((points[:, 0].min(), points[:, 0].max()))
floory = np.array((points[:, 1].min(), points[:, 1].max()))
zmin = np.min(points[:, 2])
Pf = np.array([floorx[0], floory[0], zmin])
Pf = np.vstack((Pf, np.array([floorx[0], floory[1], zmin])))
Pf = np.vstack((Pf, np.array([floorx[1], floory[1], zmin])))
Pf = np.vstack((Pf, np.array([floorx[1], floory[0], zmin])))
points = np.vstack((points, Pf))
bf = np.arange(npt, npt + 4)
boxes = np.vstack((boxes, bf))
mesh = tvtk.PolyData(points=points, polys=boxes)
mesh.point_data.scalars = sc
mesh.point_data.scalars.name = 'scalars'
if newfig:
mlab.clf()
f = mlab.figure(bgcolor=(1, 1, 1))
else:
f = mlab.gcf()
f.scene.background = (1, 1, 1)
f.scene.disable_render = True
surf = mlab.pipeline.surface(mesh, opacity=opacity)
mlab.pipeline.surface(mlab.pipeline.extract_edges(surf),
color=(0, 0, 0), )
f.children[-1].name = 'Layout ' + self._filename
if show_ceil == True:
if len(self.Gt.nodes()) != 0:
uin = [kn for kn in self.Gt.nodes() if self.Gt.node[kn]
['indoor'] == True]
ptc = np.ndarray(shape=(3, 0))
boxc = np.ndarray(shape=(0, 3))
cpt = 0
for u in uin:
p = self.Gt.node[u]['polyg']
no = self.Gt.node[u]['polyg'].vnodes[
self.Gt.node[u]['polyg'].vnodes > 0]
for n in no:
if self.Gs.node[n]['z'][1] != 40000000:
h = self.Gs.node[n]['z'][1]
break
vert = {"vertices": np.array(p.exterior.xy).T}
dt = triangle.triangulate(vert)
nbpt = len(dt['vertices'])
pt = np.vstack((dt['vertices'].T, [h] * nbpt))
box = dt['triangles']
# if u == 114:
# import ipdb
# ipdb.set_trace()
# box = np.roll(box,1,1)
ptc = np.hstack((ptc, pt))
boxc = np.vstack((boxc, box + cpt))
cpt = cpt + nbpt
# if box.shape[0] == 2 :
# import ipdb
# ipdb.set_trace()
# print(cpt,nbpt)
# print(box)
# print(pt)
# break
# manage Ceil color
colname = sl['CEIL']['color']
colhex = cold[colname]
colf = np.repeat((pyu.rgb(colhex))[np.newaxis, :], cpt, axis=0)
# color = np.vstack((color, colf))
color=colf
# trick for correcting color assignement
sc = tvtk.UnsignedCharArray()
sc.from_array(color)
meshc = tvtk.PolyData(points=ptc.T, polys=boxc)
meshc.point_data.scalars = sc
meshc.point_data.scalars.name = 'scalars'
mlab.pipeline.surface(
meshc, opacity=ceil_opacity, reset_zoom=False)
# ptc =
# ptcxy = np.array([self.Gt.node[u]['polyg'].exterior.xy[0],self.Gt.node[u]['polyg'].exterior.xy[1]])
# ptcz = [self.Gs.node[self.Gt.node[u]['polyg'].vnodes[1]]['z'][1]]*len(self.Gt.node[u]['polyg'].exterior.xy[0])
# ptc = np.vstack((ptcxy,ptcz))
# nbpt = ptc.shape[1]
# pdb
# ceil = tvtk.PolyData(points=ptc.T, polys=np.arange(nbpt).reshape(1,nbpt))
# surf2 = mlab.pipeline.surface(ceil, opacity=opacity)
# import ipdb
# ipdb.set_trace()
if cyid:
if len(self.Gt.nodes()) > 0:
pk = self.Gt.pos.keys()
v = np.array(self.Gt.pos.values())
[mlab.text3d(v[ik, 0], v[ik, 1], 0.5, str(k))
for ik, k in enumerate(pk)]
# if segpt:
# seg = dict(filter(lambda x: x[0]>0,self.Gs.pos.items()))
# pt = dict(filter(lambda x: x[0]<0,self.Gs.pos.items()))
# pseg = np.array(seg.values())
# ppt = np.array(pt.values())
# [mlab.text3d(pseg[ik,0],pseg[ik,1],0.5,str(k)) for ik,k in enumerate(seg)]
# [mlab.text3d(ppt[ik,0],ppt[ik,1],3.,str(k)) for ik,k in enumerate(pt)]
f.scene.disable_render = False
return(f)
def show3(self, bdis=True, centered=True):
""" geomview display of the indoor structure
Parameters
----------
bdis boolean (default True)
boolean display (call geowview if True)
centered : boolean
if True center the layout before display
"""
pg = self.geomfile(centered=centered)
filename = pyu.getlong(self.filegeom, pro.pstruc['DIRGEOM'])
if (bdis):
#chaine = "geomview -nopanel -b 1 1 1 " + filename + " 2>/dev/null &"
chaine = "geomview -b 1 1 1 " + filename + " 2>/dev/null &"
os.system(chaine)
else:
return(filename)
return(pg)
def signature(self, iTx, iRx):
""" Determine signature between node iTx and node iRx
Parameters
----------
cy1 : int
source cycle
cy2 : int
target cycle
Returns
-------
sigarr :
signature :
Warnings
--------
This a temporary function
There is some algorithmic work to find the best way to determine signature
T4 : limit the ndt to only edges and nodes in visibility from Tx
"""
# Here we take all the vnodes >0 from the room
#
# Practically those list of nodes should depend on pTx , pRx
#
try:
self.Gi
except:
raise NameError(
'Interaction graph layout.Gi must be build before signature computation')
if isinstance(iTx, np.ndarray):
NroomTx = self.pt2ro(iTx)
elif isinstance(iTx, int):
NroomTx = iTx
else:
raise NameError('iTx must be an array or a room number')
if isinstance(iRx, np.ndarray):
NroomRx = self.pt2ro(iRx)
elif isinstance(iRx, int):
NroomRx = iRx
else:
raise NameError('iRx must be an array or a room number')
if not self.Gr.has_node(NroomTx) or not self.Gr.has_node(NroomRx):
raise AttributeError('Tx or Rx is not in Gr')
#
# .. todo:: modifier inter afin de ne pas retenir les points non diffractants
#
ndt = self.Gt.node[self.Gr.node[NroomTx]['cycle']]['inter']
ndr = self.Gt.node[self.Gr.node[NroomRx]['cycle']]['inter']
sigarr = np.array([]).reshape(2, 0)
for nt in ndt:
for nr in ndr:
addpath = False
if (type(nt) != type(nr)):
try:
path = nx.dijkstra_path(self.Gi, nt, nr)
addpath = True
except:
pass
# print'no path between ',nt,nr
elif (nt != nr):
try:
path = nx.dijkstra_path(self.Gi, nt, nr)
addpath = True
except:
pass
# print'no path between ',nt,nr
else:
addpath = True
path = [nt]
if addpath:
sigarr = np.hstack((sigarr, np.array([[0], [0]])))
for interaction in path:
it = eval(interaction)
if type(it) == tuple:
sigarr = np.hstack((sigarr,
np.array([[it[0]], [1]])))
elif it < 0:
sigarr = np.hstack((sigarr,
np.array([[it], [-1]])))
else:
sigarr = np.hstack((sigarr, np.array([[it], [2]])))
return sigarr
def plot(self, **kwargs):
""" plot the layout with shapely polygons
Parameters
---------
show : boolean
fig :figure
ax :
labels : list
nodes : boolean
Examples
--------
>>> L= Layout('Munich.lay',bbuild=False)
>>> L.plot(show=True)
"""
defaults = {'show': False,
'fig': [],
'ax': [],
'labels': [],
'nodes': False
}
for key, value in defaults.items():
if key not in kwargs:
kwargs[key] = value
if kwargs['fig'] == []:
fig = plt.gcf()
if kwargs['ax'] == []:
ax = plt.gca()
if isinstance(kwargs['labels'], list):
labels = kwargs['labels']
elif kwargs['labels'] == True:
labels = ['s', 't', 'v', 'i', 'w']
elif isinstance(kwargs['labels'], str):
labels = kwargs['labels']
else:
labels = []
k = list(self.Gs.pos.keys())
v = list(self.Gs.pos.values())
kk = np.array(k)
vv = np.array(v)
w = [str(x) for x in kk]
if 's' in labels:
[ax.text(vv[i, 0], vv[i, 1], w[i]) for i in range(len(w))]
if kwargs['nodes']:
ax.scatter(vv[:, 0], vv[:, 1])
ML = sh.MultiLineString(list(self._shseg.values()))
self.pltlines(ML, color='k', fig=fig, ax=ax)
return fig, ax
def get_Sg_pos(self, sigarr):
""" return position of the signatures
Parameters
----------
sigarr : signature
See Also
--------
showSig
"""
signature = sigarr[0]
sposfull = np.zeros((len(signature), 2))
iz = np.nonzero(signature != 0)[0]
spos = np.array([self.Gs.pos[i] for i in signature if i != 0])
sposfull[iz, :] = spos
return (sposfull)
def plot_segments(self, lns, **kwargs):
""""
Parameters
----------
lns
*kwargs
"""
defaults = {'show': False,
'fig': None,
'ax': None,
'color': 'b',
'linewidth': 1}
for key, value in defaults.items():
if key not in kwargs:
kwargs[key] = value
if kwargs['fig'] is None:
fig = plt.figure()
ax = fig.add_subplot(111)
elif kwargs['ax'] is None:
ax = kwargs['fig'].add_subplot(111)
else:
fig = kwargs['fig']
ax = kwargs['ax']
# v1.1 nth = np.array(map(lambda n: nx.neighbors(self.Gs, n), lns))
nth = np.array(map(lambda n: self.Gs[n], lns))
nt = nth[:, 0]
nh = nth[:, 1]
# pt : 2 x Ns
pt = np.array(map(lambda n:
[self.Gs.pos[n][0], self.Gs.pos[n][1]], nt)).T
# ph : 2 x Ns
ph = np.array(map(lambda n:
[self.Gs.pos[n][0], self.Gs.pos[n][1]], nh)).T
fig, ax = plu.displot(pt, ph, fig=fig, ax=ax, color=kwargs['color'])
return fig, ax
def showSig(self, sigarr, Tx=None, Rx=None, fig=[], ax=None):
""" Show signature
Parameters
----------
Tx : np.array (2,1)
Transmitter coordinates
Rx : np.array (2,1)
Receipter coordinates
sr : boolean
show room signature
Returns
-------
fig : figure instance
ax : axes instance
lines : lines instance
Examples
--------
"""
sig = sigarr[0]
if fig == []:
fig = plt.figure()
ax = fig.add_subplot(111)
elif ax is None:
ax = fig.add_subplot(111)
lines = []
ps = self.get_Sg_pos(sigarr)
nz = np.nonzero(sig == 0)[0]
mask = np.zeros((2, len(sig)))
mask[:, nz] = 1
vertices = np.ma.masked_array(ps.T, mask)
lines.extend(ax.plot(vertices[0, :], vertices[1, :], color='k'))
if Tx != []:
itx = np.unique(sig[nz[1:-1] + 1], return_index=True)[1]
itx2 = np.kron(itx, [1, 1])
tx = ps[itx2]
tx[range(0, len(tx), 2)] = Tx
lines.extend(ax.plot(tx[:, 0], tx[:, 1], color='r'))
if Rx != []:
irx = np.unique(sig[nz[1:-1] - 1], return_index=True)[1]
irx2 = np.kron(irx, [1, 1])
rx = ps[irx2]
rx[range(0, len(rx), 2)] = Rx
lines.extend(ax.plot(rx[:, 0], rx[:, 1], color='b'))
return (fig, ax, lines)
# lines=[]
# for s in sig:
# l=[self.Gs.pos[s[ii]] for ii in xrange(len(s))]
# if Tx!=None and Rx!=None:
# l.insert(0,Tx)
# l.insert(-1,Rx)
# ls=sh.LineString(l)
# x,y=ls.xy
# lines.extend(ax.plot(x,y,'k',lw=0.1,alpha=0.2))
# return (fig,ax,lines)
# def distwall(self, p, nroom):
# """ calculate distance to wall
#
# Parameters
# ----------
#
# p : ndarray
# point coordinate
#
# nroom : int
# room number of p
#
# Returns
# -------
#
# dist
# list of distances to walls of room nroom
#
# Notes
# -----
#
# Return dist a list of all the distances to the walls of a room
#
#
# """
# pp = sh.Point(p[0], p[1])
#
# dist = []
# p0_xy = []
# p1_xy = []
#
# vnode = self.Gr.node[nroom]['cycle'].cycle
#
# # for j in range(len(Gr[nroom]['vnodes'])):
# for j in range(len(vnodes)):
# nn = self.b_Gr[5]['vnodes'][j]
# nta = G1.tahe[0, nn - 1]
# nhe = G1.tahe[1, nn - 1]
# p0 = np.array([G1.pt[0, nta], G1.pt[1, nta]])
# p1 = np.array([G1.pt[0, nhe], G1.pt[1, nhe]])
# p0_xy.insert(j, p0)
# p1_xy.insert(j, p1)
#
# pstartwll = np.array(p0_xy)
# pfinwll = np.array(p1_xy)
#
# for i in range(len(self.b_Gr[nroom]['vnodes'])):
# line_wall = sh.LineString([(pstartwll[i, 0],
# pstartwll[i, 1]), (pfinwll[i, 0], pfinwll[i, 1])])
# dist.insert(i, line_wall.distance(pp))
# return(dist)
def randTxRx(self):
"""returns random coordinates for Tx and Rx.
Returns
-------
p_Tx : numpy.ndarray
A point of the placement of the Tx
p_Rx : numpy.ndarray
A point of the placement of the Rx
Examples
--------
>>> from pylayers.gis.layout import *
>>> L = Layout('defstr.lay')
>>> p_Tx,p_Rx = L.randTxRx()
Notes
-----
ex fn Tx_Rx_pos
"""
# self.boundary()
Tx_x = rd.uniform(self.ax[0], self.ax[1])
Tx_y = rd.uniform(self.ax[2], self.ax[3])
Rx_x = rd.uniform(self.ax[0], self.ax[1])
Rx_y = rd.uniform(self.ax[2], self.ax[3])
p_Tx = np.array([Tx_x, Tx_y])
p_Rx = np.array([Rx_x, Rx_y])
return(p_Tx, p_Rx)
def boundary(self, percx=0.15, percy=0.15, xlim=(), force=False, minD=10):
""" add a blank boundary around layout
Parameters
----------
percx : float
percentage of Dx for x offset calculation (default 0.15)
percy : float
percentage of Dy for y offset calculation (default 0.15)
minD : miimum distance for boundary
force : boolean
force modification of boundaries
self.lboundary is the list of the nodes of the added boundary
self.axn is the zone without the boundary extension
self.ax is updated
Examples
--------
>>> from pylayers.gis.layout import *
>>> L = Layout('defstr.lay')
>>> L.boundary()
"""
if not self.hasboundary or force:
if xlim!=():
xmin = xlim[0]
xmax = xlim[1]
ymin = xlim[2]
ymax = xlim[3]
elif len(self.Gs.pos.values()) != 0:
xmax = max(p[0] for p in self.Gs.pos.values())
xmin = min(p[0] for p in self.Gs.pos.values())
ymax = max(p[1] for p in self.Gs.pos.values())
ymin = min(p[1] for p in self.Gs.pos.values())
else:
xmin = -20.
xmax = 20.
ymin = -10.
ymax = 10.
Dx = np.maximum(xmax - xmin,minD)
Dy = np.maximum(ymax - ymin,minD)
dx = Dx * percx
dy = Dy * percy
n1 = self.add_fnod((xmin - dx, ymin - dy))
n2 = self.add_fnod((xmax + dx, ymin - dy))
n3 = self.add_fnod((xmax + dx, ymax + dy))
n4 = self.add_fnod((xmin - dx, ymax + dy))
self.lboundary = [n1, n2, n3, n4]
self.segboundary = []
ns1 = self.add_segment(n1, n2, name='_AIR')
ns2 = self.add_segment(n2, n3, name='_AIR')
ns3 = self.add_segment(n3, n4, name='_AIR')
ns4 = self.add_segment(n4, n1, name='_AIR')
self.segboundary.append(ns1)
self.segboundary.append(ns2)
self.segboundary.append(ns3)
self.segboundary.append(ns4)
self.axn = (xmin, xmax, ymin, ymax)
self.ax = (xmin - dx, xmax + dx, ymin - dy, ymax + dy)
self.display['box'] = self.ax
self.hasboundary = True
self.g2npy()
elif xlim!=():
# change points coordinates
self.Gs.pos[self.lboundary[0]]=(xlim[0],xlim[2])
self.Gs.pos[self.lboundary[1]]=(xlim[1],xlim[2])
self.Gs.pos[self.lboundary[2]]=(xlim[1],xlim[3])
self.Gs.pos[self.lboundary[3]]=(xlim[0],xlim[3])
self.ax = xlim
self.display['box'] = xlim
self.g2npy()
def off_overlay(self, dx=0, dy=0):
""" offset overlay image
Parameters
----------
dx : float
dy : float
"""
axis = (self.ax[0] + dx, self.ax[1] + dx,
self.ax[2] + dy, self.ax[3] + dy)
self.display['overlay_axis'] = axis
def scl_overlay(self, ax=1.0, ay=1.0):
""" scale overlay image
Parameters
----------
ax : float
ay : float
"""
axis = (self.ax[0] * ax, self.ax[1] * ax,
self.ax[2] * ay, self.ax[3] * ay)
self.display['overlay_axis'] = axis
def get_paths(self, nd_in, nd_fin):
""" returns the possible paths of graph Gs between two nodes.
Parameters
----------
nd_in: int
initial graph node (segment or point)
nd_fin: int
final graph node (segment or point)
Returns
-------
paths : list
paths between nd_in and nd_fin
"""
paths = gph.find_all_paths(self.Gs, nd_in, nd_fin)
return paths
def outputGi_func_test(args):
for k in range(10000):
y = k*k+k*k
return y
def outputGi_func(args):
# def outputGi_func(e, Gi_no, Gi_A, Gspos, sgsg, s2pc, s2pu):
# for k in range(10000):
# y = k*k
# # time.sleep(0.01)
# return y
def Gspos(n):
if n>0:
#return np.mean(s2pc[n].reshape(2,2),axis=0)
return np.mean(s2pc[n].toarray().reshape(2,2),axis=0)
else:
return p2pc[-n]
e = args[0]
#Gi_no = args[1]
#Gi_A = args[2]
#p2pc = args[3]
#sgsg = args[4]
#s2pc = args[5]
#s2pu = args[6]
print(e)
i0 = e[0]
i1 = e[1]
nstr0 = i0[0]
nstr1 = i1[0]
# list of authorized outputs. Initialized void
output = []
# nstr1 : segment number of central interaction
if nstr1 > 0:
# central interaction is a segment
# pseg1 = self.s2pc[nstr1,:].toarray().reshape(2, 2).T
pseg1 = s2pc[nstr1,:].toarray().reshape(2, 2).T
# pseg1 = self.s2pc[nstr1,:].data.reshape(2, 2).T
# pseg1o = self.seg2pts(nstr1).reshape(2, 2).T
# create a Cone object
cn = cone.Cone()
# if starting from segment
if nstr0 > 0:
# pseg0 = self.s2pc[nstr0,:].toarray().reshape(2, 2).T
pseg0 = s2pc[nstr0,:].toarray().reshape(2, 2).T
# pseg0 = self.s2pc[nstr0,:].data.reshape(2, 2).T
# pseg0o = self.seg2pts(nstr0).reshape(2, 2).T
# if nstr0 and nstr1 are connected segments
if sgsg[nstr0,nstr1] == 0:
# from 2 not connected segment
cn.from2segs(pseg0, pseg1)
else:
# from 2 connected segments
cn.from2csegs(pseg0, pseg1)
# if starting from a point
else:
pt = Gspos(nstr0)
cn.fromptseg(pt, pseg1)
# list all potential successors of interaction i1
ui2 = Gi_no.index(i1)
ui = np.where(Gi_A[ui2,:]!=0)[0]
i2 = [Gi_no[u] for u in ui]
# i2 = nx.neighbors(self.Gi, i1)
# how to find neighbors without network
# ngi=L.Gi.nodes()
# A=nx.adjacency_matrix(L.Gi)
# inter = ngi[10]
# u = ngi.index(inter)
# ui = A[u,:].indices
# neigh_inter = np.array([ngi[u] for u in ui])
ipoints = [x for x in i2 if len(x)==1 ]
#ipoints = filter(lambda x: len(x) == 1, i2)
pipoints = np.array([Gspos(ip[0]) for ip in ipoints]).T
# filter tuple (R | T)
#istup = filter(lambda x : type(eval(x))==tuple,i2)
# map first argument segment number
#isegments = np.unique(map(lambda x : eval(x)[0],istup))
# isegments = np.unique(
# filter(lambda y: y > 0, map(lambda x: x[0], i2)))
isegments = np.unique([x[0] for x in i2 if x[0]>0])
# if nstr0 and nstr1 are adjescent segment remove nstr0 from
# potential next interaction
# Fix 01/2017
# This is not always True if the angle between
# the two adjascent segments is < pi/2
# nb_nstr0 = self.Gs.neighbors(nstr0)
# nb_nstr1 = self.Gs.neighbors(nstr1)
# nb_nstr0 = np.array([self.s2pu[nstr0,0],self.s2pu[nstr0,1]])
# nb_nstr1 = np.array([self.s2pu[nstr1,0],self.s2pu[nstr1,1]])
nb_nstr0 = s2pu[nstr0,:].toarray()[0]
nb_nstr1 = s2pu[nstr1,:].toarray()[0]
print('nb_nstr0',nb_nstr0)
#nb_nstr0 = s2pu[nstr0,:]
#nb_nstr1 = s2pu[nstr1,:]
# common_point = np.intersect1d(nb_nstr0,nb_nstr1)
common_point = np.array([x for x in nb_nstr0 if x in nb_nstr1])
# if len(common_point) == 1:
if common_point.any():
num0 = [x for x in nb_nstr0 if x != common_point]
num1 = [x for x in nb_nstr1 if x != common_point]
p0 = Gspos(num0[0])
p1 = Gspos(num1[0])
pc = Gspos(common_point[0])
v0 = p0-pc
v1 = p1-pc
v0n = v0/np.sqrt(np.sum(v0*v0))
v1n = v1/np.sqrt(np.sum(v1*v1))
if np.dot(v0n,v1n)<=0:
isegments = np.array([ x for x in isegments if x != nstr0 ])
# filter(lambda x: x != nstr0, isegments))
# there are one or more segments
# if len(isegments) > 0:
if isegments.any():
li1 = len(i1)
points = self.s2pc[isegments,:].toarray().T
#points = s2pc[isegments,:].T
# points = self.s2pc[isegments,:].data.reshape(4,len(isegments))
# pointso = self.seg2pts(isegments)
pta = points[0:2, :]
phe = points[2:, :]
# add difraction points
# WARNING Diffraction points are added only if a segment is seen
# it should be the case in 99% of cases
if len(ipoints) > 0:
isegments = np.hstack(
(isegments, np.array(ipoints)[:, 0]))
pta = np.hstack((pta, pipoints))
phe = np.hstack((phe, pipoints))
# cn.show()
# if i0 == (38,79) and i1 == (135,79,23):
# printi0,i1
# import ipdb
# ipdb.set_trace()
# i1 : interaction T
if li1 == 3:
typ, prob = cn.belong_seg(pta, phe)
# if bs.any():
# plu.displot(pta[:,bs],phe[:,bs],color='g')
# if ~bs.any():
# plu.displot(pta[:,~bs],phe[:,~bs],color='k')
# i1 : interaction R --> mirror
elif li1 == 2:
Mpta = geu.mirror(pta, pseg1[:, 0], pseg1[:, 1])
Mphe = geu.mirror(phe, pseg1[:, 0], pseg1[:, 1])
typ, prob = cn.belong_seg(Mpta, Mphe)
# printi0,i1
# if ((i0 == (6, 0)) & (i1 == (7, 0))):
# pdb.set_trace()
# if bs.any():
# plu.displot(pta[:,bs],phe[:,bs],color='g')
# if ~bs.any():
# plu.displot(pta[:,~bs],phe[:,~bs],color='m')
# plt.show()
# pdb.set_trace())
########
# SOMETIMES PROBA IS 0 WHEREAS SEG IS SEEN
###########
# # keep segment with prob above a threshold
# isegkeep = isegments[prob>0]
# # dict {numint : proba}
# dsegprob = {k:v for k,v in zip(isegkeep,prob[prob>0])}
# 4 lines are replaced by
# keep segment with prob above a threshold
utypseg = typ != 0
isegkeep = isegments[utypseg]
# dict {numint : proba}
dsegprob = {k: v for k, v in zip(isegkeep, prob[utypseg])}
#########
# output = filter(lambda x: x[0] in isegkeep, i2)
output = [x for x in i2 if x[0] in isegkeep]
# probint = map(lambda x: dsegprob[x[0]], output)
probint = [dsegprob[x[0]] for x in output]
# dict interaction : proba
dintprob = {k: v for k, v in zip(output, probint)}
# keep all segment above nstr1 and in Cone if T
# keep all segment below nstr1 and in Cone if R
else:
# central interaction is a point
# 1) Simple approach
# output interaction are all visible interactions
# 2) TO BE DONE
#
# output of the diffraction points
# exploring
# b
# + right of ISB
# + right of RSB
#
# + using the wedge cone
# + using the incident cone
#
# output = nx.neighbors(self.Gi, (nstr1,))
uout = Gi_no.index((nstr1,))
ui = np.where(Gi_A[uout,:]!=0)[0]
output = [Gi_no[u] for u in ui]
nout = len(output)
probint = np.ones(nout) # temporarybns
dintprob = {k: v for k, v in zip(output, probint)}
return (i0,i1, {'output':dintprob})
# self.Gi.add_edge(i0, i1, output=dintprob)
if __name__ == "__main__":
plt.ion()
doctest.testmod()
# L = Layout('Servon Sur Vilaine',verbose=True,dist_m=60)
# L.build()
| pylayers/pylayers | pylayers/gis/layout.py | Python | mit | 367,668 | [
"Mayavi"
] | 436b86fad2dd4dd7f89da729b63263a5d2a468f840dc80744b26437777b2f688 |
from __future__ import print_function
import json
import os
import os.path
import re
import sys
import warnings
from collections import defaultdict
from distutils.command.build_scripts import build_scripts as BuildScripts
from distutils.command.sdist import sdist as SDist
try:
from setuptools import setup, find_packages
from setuptools.command.build_py import build_py as BuildPy
from setuptools.command.install_lib import install_lib as InstallLib
from setuptools.command.install_scripts import install_scripts as InstallScripts
except ImportError:
print("Ansible now needs setuptools in order to build. Install it using"
" your package manager (usually python-setuptools) or via pip (pip"
" install setuptools).", file=sys.stderr)
sys.exit(1)
sys.path.insert(0, os.path.abspath('lib'))
from ansible.release import __version__, __author__
SYMLINK_CACHE = 'SYMLINK_CACHE.json'
def _find_symlinks(topdir, extension=''):
"""Find symlinks that should be maintained
Maintained symlinks exist in the bin dir or are modules which have
aliases. Our heuristic is that they are a link in a certain path which
point to a file in the same directory.
.. warn::
We want the symlinks in :file:`bin/` that link into :file:`lib/ansible/*` (currently,
:command:`ansible`, :command:`ansible-test`, and :command:`ansible-connection`) to become
real files on install. Updates to the heuristic here *must not* add them to the symlink
cache.
"""
symlinks = defaultdict(list)
for base_path, dirs, files in os.walk(topdir):
for filename in files:
filepath = os.path.join(base_path, filename)
if os.path.islink(filepath) and filename.endswith(extension):
target = os.readlink(filepath)
if target.startswith('/'):
# We do not support absolute symlinks at all
continue
if os.path.dirname(target) == '':
link = filepath[len(topdir):]
if link.startswith('/'):
link = link[1:]
symlinks[os.path.basename(target)].append(link)
else:
# Count how many directory levels from the topdir we are
levels_deep = os.path.dirname(filepath).count('/')
# Count the number of directory levels higher we walk up the tree in target
target_depth = 0
for path_component in target.split('/'):
if path_component == '..':
target_depth += 1
# If we walk past the topdir, then don't store
if target_depth >= levels_deep:
break
else:
target_depth -= 1
else:
# If we managed to stay within the tree, store the symlink
link = filepath[len(topdir):]
if link.startswith('/'):
link = link[1:]
symlinks[target].append(link)
return symlinks
def _cache_symlinks(symlink_data):
with open(SYMLINK_CACHE, 'w') as f:
json.dump(symlink_data, f)
def _maintain_symlinks(symlink_type, base_path):
"""Switch a real file into a symlink"""
try:
# Try the cache first because going from git checkout to sdist is the
# only time we know that we're going to cache correctly
with open(SYMLINK_CACHE, 'r') as f:
symlink_data = json.load(f)
except (IOError, OSError) as e:
# IOError on py2, OSError on py3. Both have errno
if e.errno == 2:
# SYMLINKS_CACHE doesn't exist. Fallback to trying to create the
# cache now. Will work if we're running directly from a git
# checkout or from an sdist created earlier.
library_symlinks = _find_symlinks('lib', '.py')
library_symlinks.update(_find_symlinks('test/lib'))
symlink_data = {'script': _find_symlinks('bin'),
'library': library_symlinks,
}
# Sanity check that something we know should be a symlink was
# found. We'll take that to mean that the current directory
# structure properly reflects symlinks in the git repo
if 'ansible-playbook' in symlink_data['script']['ansible']:
_cache_symlinks(symlink_data)
else:
raise RuntimeError(
"Pregenerated symlink list was not present and expected "
"symlinks in ./bin were missing or broken. "
"Perhaps this isn't a git checkout?"
)
else:
raise
symlinks = symlink_data[symlink_type]
for source in symlinks:
for dest in symlinks[source]:
dest_path = os.path.join(base_path, dest)
if not os.path.islink(dest_path):
try:
os.unlink(dest_path)
except OSError as e:
if e.errno == 2:
# File does not exist which is all we wanted
pass
os.symlink(source, dest_path)
class BuildPyCommand(BuildPy):
def run(self):
BuildPy.run(self)
_maintain_symlinks('library', self.build_lib)
class BuildScriptsCommand(BuildScripts):
def run(self):
BuildScripts.run(self)
_maintain_symlinks('script', self.build_dir)
class InstallLibCommand(InstallLib):
def run(self):
InstallLib.run(self)
_maintain_symlinks('library', self.install_dir)
class InstallScriptsCommand(InstallScripts):
def run(self):
InstallScripts.run(self)
_maintain_symlinks('script', self.install_dir)
class SDistCommand(SDist):
def run(self):
# have to generate the cache of symlinks for release as sdist is the
# only command that has access to symlinks from the git repo
library_symlinks = _find_symlinks('lib', '.py')
library_symlinks.update(_find_symlinks('test/lib'))
symlinks = {'script': _find_symlinks('bin'),
'library': library_symlinks,
}
_cache_symlinks(symlinks)
SDist.run(self)
# Print warnings at the end because no one will see warnings before all the normal status
# output
if os.environ.get('_ANSIBLE_SDIST_FROM_MAKEFILE', False) != '1':
warnings.warn('When setup.py sdist is run from outside of the Makefile,'
' the generated tarball may be incomplete. Use `make snapshot`'
' to create a tarball from an arbitrary checkout or use'
' `cd packaging/release && make release version=[..]` for official builds.',
RuntimeWarning)
def read_file(file_name):
"""Read file and return its contents."""
with open(file_name, 'r') as f:
return f.read()
def read_requirements(file_name):
"""Read requirements file as a list."""
reqs = read_file(file_name).splitlines()
if not reqs:
raise RuntimeError(
"Unable to read requirements from the %s file"
"That indicates this copy of the source code is incomplete."
% file_name
)
return reqs
PYCRYPTO_DIST = 'pycrypto'
def get_crypto_req():
"""Detect custom crypto from ANSIBLE_CRYPTO_BACKEND env var.
pycrypto or cryptography. We choose a default but allow the user to
override it. This translates into pip install of the sdist deciding what
package to install and also the runtime dependencies that pkg_resources
knows about.
"""
crypto_backend = os.environ.get('ANSIBLE_CRYPTO_BACKEND', '').strip()
if crypto_backend == PYCRYPTO_DIST:
# Attempt to set version requirements
return '%s >= 2.6' % PYCRYPTO_DIST
return crypto_backend or None
def substitute_crypto_to_req(req):
"""Replace crypto requirements if customized."""
crypto_backend = get_crypto_req()
if crypto_backend is None:
return req
def is_not_crypto(r):
CRYPTO_LIBS = PYCRYPTO_DIST, 'cryptography'
return not any(r.lower().startswith(c) for c in CRYPTO_LIBS)
return [r for r in req if is_not_crypto(r)] + [crypto_backend]
def read_extras():
"""Specify any extra requirements for installation."""
extras = dict()
extra_requirements_dir = 'packaging/requirements'
for extra_requirements_filename in os.listdir(extra_requirements_dir):
filename_match = re.search(r'^requirements-(\w*).txt$', extra_requirements_filename)
if not filename_match:
continue
extra_req_file_path = os.path.join(extra_requirements_dir, extra_requirements_filename)
try:
extras[filename_match.group(1)] = read_file(extra_req_file_path).splitlines()
except RuntimeError:
pass
return extras
def get_dynamic_setup_params():
"""Add dynamically calculated setup params to static ones."""
return {
# Retrieve the long description from the README
'long_description': read_file('README.rst'),
'install_requires': substitute_crypto_to_req(
read_requirements('requirements.txt'),
),
'extras_require': read_extras(),
}
static_setup_params = dict(
# Use the distutils SDist so that symlinks are not expanded
# Use a custom Build for the same reason
cmdclass={
'build_py': BuildPyCommand,
'build_scripts': BuildScriptsCommand,
'install_lib': InstallLibCommand,
'install_scripts': InstallScriptsCommand,
'sdist': SDistCommand,
},
name='ansible',
version=__version__,
description='Radically simple IT automation',
author=__author__,
author_email='info@ansible.com',
url='https://ansible.com/',
project_urls={
'Bug Tracker': 'https://github.com/ansible/ansible/issues',
'CI: Shippable': 'https://app.shippable.com/github/ansible/ansible',
'Code of Conduct': 'https://docs.ansible.com/ansible/latest/community/code_of_conduct.html',
'Documentation': 'https://docs.ansible.com/ansible/',
'Mailing lists': 'https://docs.ansible.com/ansible/latest/community/communication.html#mailing-list-information',
'Source Code': 'https://github.com/ansible/ansible',
},
license='GPLv3+',
# Ansible will also make use of a system copy of python-six and
# python-selectors2 if installed but use a Bundled copy if it's not.
python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*',
package_dir={'': 'lib',
'ansible_test': 'test/lib/ansible_test'},
packages=find_packages('lib') + find_packages('test/lib'),
include_package_data=True,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',
'Natural Language :: English',
'Operating System :: POSIX',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: System :: Installation/Setup',
'Topic :: System :: Systems Administration',
'Topic :: Utilities',
],
scripts=[
'bin/ansible',
'bin/ansible-playbook',
'bin/ansible-pull',
'bin/ansible-doc',
'bin/ansible-galaxy',
'bin/ansible-console',
'bin/ansible-connection',
'bin/ansible-vault',
'bin/ansible-config',
'bin/ansible-inventory',
'bin/ansible-test',
],
data_files=[
("contrib/inventory", [
"contrib/inventory/ec2.py",
"contrib/inventory/lxd.py"
]),
],
# Installing as zip files would break due to references to __file__
zip_safe=False
)
def main():
"""Invoke installation process using setuptools."""
setup_params = dict(static_setup_params, **get_dynamic_setup_params())
ignore_warning_regex = (
r"Unknown distribution option: '(project_urls|python_requires)'"
)
warnings.filterwarnings(
'ignore',
message=ignore_warning_regex,
category=UserWarning,
module='distutils.dist',
)
setup(**setup_params)
warnings.resetwarnings()
if __name__ == '__main__':
main()
| 2ndQuadrant/ansible | setup.py | Python | gpl-3.0 | 13,081 | [
"Galaxy"
] | 4500dc85b70e57d018ebd30512ae59dfeba5321b70d87a30d4db27319619ad1b |
#!/usr/bin/python
#
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example demonstrates how to authenticate using OAuth2.
This example is meant to be run from the command line and requires
user input.
"""
__author__ = 'api.shamjeff@gmail.com (Jeff Sham)'
import httplib2
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..', '..'))
from adspygoogle import DfpClient
from oauth2client.client import FlowExchangeError
from oauth2client.client import OAuth2WebServerFlow
APPLICATION_NAME = 'INSERT_APPLICATION_NAME_HERE'
NETWORK_CODE = 'INSERT_NETWORK_CODE_HERE'
# Visit https://code.google.com/apis/console to generate your client_id,
# client_secret and to register your redirect_uri.
# See the oauth2client wiki for more information on performing the OAuth2 flow:
# http://code.google.com/p/google-api-python-client/wiki/OAuth2
OAUTH2_CLIENT_ID = 'INSERT_OAUTH2_CLIENT_ID_HERE'
OAUTH2_CLIENT_SECRET = 'INSERT_OAUTH2_CLIENT_SECRET_HERE'
def main(application_name, network_code, oauth2_client_id,
oauth2_client_secret):
# We're using the oauth2client library:
# http://code.google.com/p/google-api-python-client/downloads/list
flow = OAuth2WebServerFlow(
client_id=oauth2_client_id,
client_secret=oauth2_client_secret,
scope='https://www.google.com/apis/ads/publisher',
user_agent='oauth2 code example')
# Get the authorization URL to direct the user to.
authorize_url = flow.step1_get_authorize_url()
print ('Log in to your DFP account and open the following URL: \n%s\n' %
authorize_url)
print 'After approving the token enter the verification code (if specified).'
code = raw_input('Code: ').strip()
credential = None
try:
credential = flow.step2_exchange(code)
except FlowExchangeError, e:
sys.exit('Authentication has failed: %s' % e)
# Create the DfpClient and set the OAuth2 credentials.
client = DfpClient(headers={
'networkCode': network_code,
'applicationName': application_name,
'userAgent': 'OAuth2 Example',
'oauth2credentials': credential
})
# OAuth2 credentials objects can be reused
credentials = client.oauth2credentials
print 'OAuth2 authorization successful!'
# OAuth2 credential objects can be refreshed via credentials.refresh() - the
# access token expires after 1 hour.
credentials.refresh(httplib2.Http())
# Note: you could simply set the credentials as below and skip the previous
# steps once access has been granted.
client.oauth2credentials = credentials
network_service = client.GetService('NetworkService', version='v201208')
# Get all networks that you have access to with the current login credentials.
networks = network_service.GetAllNetworks()
for network in networks:
print ('Network with network code \'%s\' and display name \'%s\' was found.'
% (network['networkCode'], network['displayName']))
print
print 'Number of results found: %s' % len(networks)
if __name__ == '__main__':
main(APPLICATION_NAME, NETWORK_CODE, OAUTH2_CLIENT_ID, OAUTH2_CLIENT_SECRET)
| donspaulding/adspygoogle | examples/adspygoogle/dfp/v201208/misc/use_oauth2.py | Python | apache-2.0 | 3,643 | [
"VisIt"
] | a9f4034bb54e91949c8c846c1877437e19e4a9e8551b3e01be486160089edb9c |
"""
Kernel Density Estimation
-------------------------
"""
# Author: Jake Vanderplas <jakevdp@cs.washington.edu>
import numpy as np
from scipy.special import gammainc
from ..base import BaseEstimator
from ..utils import check_random_state
from ..utils.validation import _check_sample_weight, check_is_fitted
from ..utils.validation import _deprecate_positional_args
from ..utils.extmath import row_norms
from ._ball_tree import BallTree, DTYPE
from ._kd_tree import KDTree
VALID_KERNELS = ['gaussian', 'tophat', 'epanechnikov', 'exponential', 'linear',
'cosine']
TREE_DICT = {'ball_tree': BallTree, 'kd_tree': KDTree}
# TODO: implement a brute force version for testing purposes
# TODO: bandwidth estimation
# TODO: create a density estimation base class?
class KernelDensity(BaseEstimator):
"""Kernel Density Estimation.
Read more in the :ref:`User Guide <kernel_density>`.
Parameters
----------
bandwidth : float, default=1.0
The bandwidth of the kernel.
algorithm : {'kd_tree', 'ball_tree', 'auto'}, default='auto'
The tree algorithm to use.
kernel : {'gaussian', 'tophat', 'epanechnikov', 'exponential', 'linear', \
'cosine'}, default='gaussian'
The kernel to use.
metric : str, default='euclidean'
The distance metric to use. Note that not all metrics are
valid with all algorithms. Refer to the documentation of
:class:`BallTree` and :class:`KDTree` for a description of
available algorithms. Note that the normalization of the density
output is correct only for the Euclidean distance metric. Default
is 'euclidean'.
atol : float, default=0
The desired absolute tolerance of the result. A larger tolerance will
generally lead to faster execution.
rtol : float, default=0
The desired relative tolerance of the result. A larger tolerance will
generally lead to faster execution.
breadth_first : bool, default=True
If true (default), use a breadth-first approach to the problem.
Otherwise use a depth-first approach.
leaf_size : int, default=40
Specify the leaf size of the underlying tree. See :class:`BallTree`
or :class:`KDTree` for details.
metric_params : dict, default=None
Additional parameters to be passed to the tree for use with the
metric. For more information, see the documentation of
:class:`BallTree` or :class:`KDTree`.
Attributes
----------
tree_ : ``BinaryTree`` instance
The tree algorithm for fast generalized N-point problems.
See Also
--------
sklearn.neighbors.KDTree : K-dimensional tree for fast generalized N-point
problems.
sklearn.neighbors.BallTree : Ball tree for fast generalized N-point
problems.
Examples
--------
Compute a gaussian kernel density estimate with a fixed bandwidth.
>>> import numpy as np
>>> rng = np.random.RandomState(42)
>>> X = rng.random_sample((100, 3))
>>> kde = KernelDensity(kernel='gaussian', bandwidth=0.5).fit(X)
>>> log_density = kde.score_samples(X[:3])
>>> log_density
array([-1.52955942, -1.51462041, -1.60244657])
"""
@_deprecate_positional_args
def __init__(self, *, bandwidth=1.0, algorithm='auto',
kernel='gaussian', metric="euclidean", atol=0, rtol=0,
breadth_first=True, leaf_size=40, metric_params=None):
self.algorithm = algorithm
self.bandwidth = bandwidth
self.kernel = kernel
self.metric = metric
self.atol = atol
self.rtol = rtol
self.breadth_first = breadth_first
self.leaf_size = leaf_size
self.metric_params = metric_params
# run the choose algorithm code so that exceptions will happen here
# we're using clone() in the GenerativeBayes classifier,
# so we can't do this kind of logic in __init__
self._choose_algorithm(self.algorithm, self.metric)
if bandwidth <= 0:
raise ValueError("bandwidth must be positive")
if kernel not in VALID_KERNELS:
raise ValueError("invalid kernel: '{0}'".format(kernel))
def _choose_algorithm(self, algorithm, metric):
# given the algorithm string + metric string, choose the optimal
# algorithm to compute the result.
if algorithm == 'auto':
# use KD Tree if possible
if metric in KDTree.valid_metrics:
return 'kd_tree'
elif metric in BallTree.valid_metrics:
return 'ball_tree'
else:
raise ValueError("invalid metric: '{0}'".format(metric))
elif algorithm in TREE_DICT:
if metric not in TREE_DICT[algorithm].valid_metrics:
raise ValueError("invalid metric for {0}: "
"'{1}'".format(TREE_DICT[algorithm],
metric))
return algorithm
else:
raise ValueError("invalid algorithm: '{0}'".format(algorithm))
def fit(self, X, y=None, sample_weight=None):
"""Fit the Kernel Density model on the data.
Parameters
----------
X : array-like of shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
y : None
Ignored. This parameter exists only for compatibility with
:class:`~sklearn.pipeline.Pipeline`.
sample_weight : array-like of shape (n_samples,), default=None
List of sample weights attached to the data X.
.. versionadded:: 0.20
Returns
-------
self : object
Returns instance of object.
"""
algorithm = self._choose_algorithm(self.algorithm, self.metric)
X = self._validate_data(X, order='C', dtype=DTYPE)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X, DTYPE)
if sample_weight.min() <= 0:
raise ValueError("sample_weight must have positive values")
kwargs = self.metric_params
if kwargs is None:
kwargs = {}
self.tree_ = TREE_DICT[algorithm](X, metric=self.metric,
leaf_size=self.leaf_size,
sample_weight=sample_weight,
**kwargs)
return self
def score_samples(self, X):
"""Evaluate the log density model on the data.
Parameters
----------
X : array-like of shape (n_samples, n_features)
An array of points to query. Last dimension should match dimension
of training data (n_features).
Returns
-------
density : ndarray of shape (n_samples,)
The array of log(density) evaluations. These are normalized to be
probability densities, so values will be low for high-dimensional
data.
"""
check_is_fitted(self)
# The returned density is normalized to the number of points.
# For it to be a probability, we must scale it. For this reason
# we'll also scale atol.
X = self._validate_data(X, order='C', dtype=DTYPE, reset=False)
if self.tree_.sample_weight is None:
N = self.tree_.data.shape[0]
else:
N = self.tree_.sum_weight
atol_N = self.atol * N
log_density = self.tree_.kernel_density(
X, h=self.bandwidth, kernel=self.kernel, atol=atol_N,
rtol=self.rtol, breadth_first=self.breadth_first, return_log=True)
log_density -= np.log(N)
return log_density
def score(self, X, y=None):
"""Compute the total log probability density under the model.
Parameters
----------
X : array-like of shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
y : None
Ignored. This parameter exists only for compatibility with
:class:`~sklearn.pipeline.Pipeline`.
Returns
-------
logprob : float
Total log-likelihood of the data in X. This is normalized to be a
probability density, so the value will be low for high-dimensional
data.
"""
return np.sum(self.score_samples(X))
def sample(self, n_samples=1, random_state=None):
"""Generate random samples from the model.
Currently, this is implemented only for gaussian and tophat kernels.
Parameters
----------
n_samples : int, default=1
Number of samples to generate.
random_state : int, RandomState instance or None, default=None
Determines random number generation used to generate
random samples. Pass an int for reproducible results
across multiple function calls.
See :term: `Glossary <random_state>`.
Returns
-------
X : array-like of shape (n_samples, n_features)
List of samples.
"""
check_is_fitted(self)
# TODO: implement sampling for other valid kernel shapes
if self.kernel not in ['gaussian', 'tophat']:
raise NotImplementedError()
data = np.asarray(self.tree_.data)
rng = check_random_state(random_state)
u = rng.uniform(0, 1, size=n_samples)
if self.tree_.sample_weight is None:
i = (u * data.shape[0]).astype(np.int64)
else:
cumsum_weight = np.cumsum(np.asarray(self.tree_.sample_weight))
sum_weight = cumsum_weight[-1]
i = np.searchsorted(cumsum_weight, u * sum_weight)
if self.kernel == 'gaussian':
return np.atleast_2d(rng.normal(data[i], self.bandwidth))
elif self.kernel == 'tophat':
# we first draw points from a d-dimensional normal distribution,
# then use an incomplete gamma function to map them to a uniform
# d-dimensional tophat distribution.
dim = data.shape[1]
X = rng.normal(size=(n_samples, dim))
s_sq = row_norms(X, squared=True)
correction = (gammainc(0.5 * dim, 0.5 * s_sq) ** (1. / dim)
* self.bandwidth / np.sqrt(s_sq))
return data[i] + X * correction[:, np.newaxis]
def _more_tags(self):
return {
'_xfail_checks': {
'check_sample_weights_invariance':
'sample_weight must have positive values',
}
}
| glemaitre/scikit-learn | sklearn/neighbors/_kde.py | Python | bsd-3-clause | 10,839 | [
"Gaussian"
] | 5163d51e1e9391b5db021d89977d2e2d716bcc3cef373f4808695bf3d81175b0 |
# -*- coding: utf-8 -*-
#
"""
ORCA Open Remote Control Application
Copyright (C) 2013-2020 Carsten Thielepape
Please contact me by : http://www.orca-remote.org/
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from __future__ import annotations
from typing import Dict
from typing import List
from typing import Union
from typing import Tuple
import socket
import select
import threading
from kivy.clock import Clock
from kivy.uix.button import Button
from ORCA.scripts.BaseScriptSettings import cBaseScriptSettings
from ORCA.scripttemplates.Template_Discover import cDiscoverScriptTemplate
from ORCA.ui.ShowErrorPopUp import ShowMessagePopUp
from ORCA.utils.TypeConvert import ToFloat
from ORCA.utils.TypeConvert import ToBool
from ORCA.vars.QueryDict import TypedQueryDict
from ORCA.utils.FileName import cFileName
from ORCA.utils.TypeConvert import ToUnicode
import ORCA.Globals as Globals
'''
<root>
<repositorymanager>
<entry>
<name>ELV MAX Discover</name>
<description language='English'>Discover ELV MAX cubes</description>
<description language='German'>Erkennt bwz. sucht ELV MAX Cubes</description>
<author>Carsten Thielepape</author>
<version>5.0.4</version>
<minorcaversion>5.0.4</minorcaversion>
<sources>
<source>
<local>$var(APPLICATIONPATH)/scripts/discover/discover_elvmax</local>
<sourcefile>$var(REPOSITORYWWWPATH)/scripts/discover_elvmax.zip</sourcefile>
<targetpath>scripts/discover</targetpath>
</source>
</sources>
<skipfiles>
</skipfiles>
</entry>
</repositorymanager>
</root>
'''
class cScript(cDiscoverScriptTemplate):
"""
WikiDoc:Doc
WikiDoc:Context:Scripts
WikiDoc:Page:Scripts-Discover-ELVMAX
WikiDoc:TOCTitle:Discover ELVMAX
= Script Discover ELVMAX =
The ELV MAX discover script discovers ELV MAX cubes for heating control.
You can filter the discover result by passing the following parameters::
<div style="overflow:auto; ">
{| class="wikitable"
! align="left" | Attribute
! align="left" | Description
|-
|serial
|Discover models only with a specific serial number (leave it blank to discover all)
|-
|timeout
|Specifies the timout for discover
|}</div>
WikiDoc:End
"""
class cScriptSettings(cBaseScriptSettings):
def __init__(self,oScript:cScript):
super().__init__(oScript)
self.aIniSettings.fTimeOut = 5.0
def __init__(self):
super().__init__()
self.uSubType:str = u'ELVMAX'
self.aResults:List[TypedQueryDict] = []
self.dDevices:Dict[str,TypedQueryDict] = {}
self.dReq = TypedQueryDict()
self.uScriptTitle = u'ELV:MAX Discovery'
def Init(self,uObjectName:str,oFnScript:Union[cFileName,None]=None) -> None:
super().Init(uObjectName= uObjectName, oFnObject=oFnScript)
self.oObjectConfig.dDefaultSettings['TimeOut']['active'] = "enabled"
def GetHeaderLabels(self) -> List[str]:
return ['$lvar(5029)','$lvar(SCRIPT_DISC_ELVMAX_1)','$lvar(5035)']
def ListDiscover(self) -> None:
self.SendStartNotification()
Clock.schedule_once(self.ListDiscover_Step2, 0)
return
def ListDiscover_Step2(self, *largs):
dDevice: TypedQueryDict
dArgs:Dict = {"onlyonce": 0,
"ipversion": "All",
"donotwait":1}
self.dDevices.clear()
self.Discover(**dArgs)
def CreateDiscoverList_ShowDetails(self,oButton:Button) -> None:
dDevice:TypedQueryDict = oButton.dDevice
uText= u"$lvar(5029): %s \n" \
u"$lvar(5035): %s \n" \
u"$lvar(1063): %s \n" \
u"$lvar(SCRIPT_DISC_ELVMAX_1): %s " % (dDevice.uFoundIP,dDevice.uFoundHostName,dDevice.uFoundName,dDevice.uFoundSerial)
ShowMessagePopUp(uMessage=uText)
@classmethod
def GetConfigJSONforParameters(cls,dDefaults:Dict) -> Dict[str,Dict]:
return {"Serial Number": {"type": "string", "order":0, "title": "$lvar(SCRIPT_DISC_ELVMAX_1)", "desc": "$lvar(SCRIPT_DISC_ELVMAX_1)", "key": "serialnumber", "default":"" }
}
def Discover(self,**kwargs):
self.dReq.uSerial = kwargs.get('serialnumber',"")
uConfigName:str = kwargs.get('configname',self.uConfigName)
oSetting:cBaseScriptSettings = self.GetSettingObjectForConfigName(uConfigName=uConfigName)
fTimeOut:float = ToFloat(kwargs.get('timeout', oSetting.aIniSettings.fTimeOut))
bOnlyOnce:bool = ToBool(kwargs.get('onlyonce',"1"))
self.bDoNotWait = ToBool(kwargs.get('donotwait',"0"))
del self.aResults[:]
del self.aThreads[:]
self.ShowDebug (uMsg=u'Try to discover ELV MAX device: %s ' % self.dReq.uSerial)
try:
oThread = cThread_Discover_ELVMAX(bOnlyOnce=bOnlyOnce,dReq=self.dReq,uIPVersion="IPv4Only", fTimeOut=fTimeOut, oCaller=self)
self.aThreads.append(oThread)
self.aThreads[-1].start()
if not self.bDoNotWait:
for oT in self.aThreads:
oT.join()
self.SendEndNotification()
if len(self.aResults)>0:
return TypedQueryDict([("Host", self.aResults[0].uFoundIP),("Hostname",self.aResults[0].uFoundHostName), ("Serial",self.aResults[0].uFoundSerial), ("Name",self.aResults[0].uFoundName)])
else:
self.ShowWarning(uMsg=u'No ELV MAX Cube found %s' % self.dReq.uSerial)
TypedQueryDict([("Host", ""), ("Hostname", ""), ("Serial", ""), ("Name", "")])
else:
self.ClockCheck=Clock.schedule_interval(self.CheckFinished,0.1)
except Exception as e:
self.ShowError(uMsg="Error on Discover",oException=e)
return TypedQueryDict([("Host", ""), ("Hostname", ""), ("Serial", ""), ("Name", "")])
class cThread_Discover_ELVMAX(threading.Thread):
oWaitLock = threading.Lock()
def __init__(self, bOnlyOnce:bool,dReq:TypedQueryDict,uIPVersion:str, fTimeOut:float,oCaller:cScript):
threading.Thread.__init__(self)
self.bOnlyOnce:bool = bOnlyOnce
self.uIPVersion:str = uIPVersion
self.oCaller:cScript = oCaller
self.fTimeOut:float = fTimeOut
self.dReq:TypedQueryDict= dReq
self.iPort:int = 23272
def run(self) -> None:
bReturnNow:bool = False
if self.bOnlyOnce:
cThread_Discover_ELVMAX.oWaitLock.acquire()
if len(self.oCaller.aResults)>0:
bReturnNow=True
cThread_Discover_ELVMAX.oWaitLock.release()
if bReturnNow:
return
self.Discover()
def Discover(self) -> None:
oSendSocket:Union[socket.socket,None] = None
oReceiveSocket:Union[socket.socket,None] = None
try:
oSendSocket:socket.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
#oSendSocket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, True)
oSendSocket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
oSendSocket.settimeout(10)
byData:bytearray = bytearray("eQ3Max", "utf-8") + \
bytearray("*\0", "utf-8") + \
bytearray('*' * 10, "utf-8") + \
bytearray('I', "utf-8")
oSendSocket.sendto(byData,("<broadcast>",self.iPort))
oReceiveSocket:socket.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
oReceiveSocket.settimeout(self.fTimeOut)
oReceiveSocket.bind(("0.0.0.0", self.iPort))
while True:
# we do not wait too long
aReady:Tuple = select.select([oReceiveSocket],[],[],self.fTimeOut)
if aReady[0]:
# Get a response
byData, tSenderAddr = oReceiveSocket.recvfrom(50)
dRet = self.GetDeviceDetails(byData, tSenderAddr)
self.CheckDeviceDetails(dRet=dRet)
if dRet.bFound:
uTageLine:str = dRet.uFoundIP+dRet.uFoundSerial+dRet.uFoundHostName
if self.oCaller.dDevices.get(uTageLine) is None:
cThread_Discover_ELVMAX.oWaitLock.acquire()
self.oCaller.dDevices[uTageLine]=dRet
self.oCaller.aResults.append(dRet)
Globals.oNotifications.SendNotification(uNotification="DISCOVER_SCRIPTFOUND",**{"script":self,"scriptname":self.oCaller.uObjectName,"line":[dRet.uFoundIP,dRet.uFoundSerial,dRet.uFoundHostName],"device":dRet})
cThread_Discover_ELVMAX.oWaitLock.release()
else:
break
if oSendSocket:
oSendSocket.close()
if oReceiveSocket:
oReceiveSocket.close()
except Exception as e:
self.oCaller.ShowError(uMsg="Error on Discover",oException=e)
if oSendSocket:
oSendSocket.close()
if oReceiveSocket:
oReceiveSocket.close()
def GetDeviceDetails(self,byData:bytes,tSenderAddr:Tuple) -> TypedQueryDict:
dRet:TypedQueryDict = TypedQueryDict()
dRet.bFound = True
dRet.uFoundIP = tSenderAddr[0] # ==10
dRet.uData = ToUnicode(byData[:18])
dRet.uFoundName = byData[0:8].decode('utf-8')
dRet.uFoundSerial = byData[8:18].decode('utf-8')
dRet.uFoundHostName = socket.gethostbyaddr(dRet.uFoundIP)[0]
dRet.uIPVersion = u"IPv4"
self.oCaller.ShowInfo(uMsg=u'Discovered device %s:%s:%s at %s' % (dRet.uFoundName, dRet.uFoundHostName, dRet.uFoundSerial, dRet.uFoundIP))
return dRet
def CheckDeviceDetails(self, dRet:TypedQueryDict) -> None:
if dRet.bFound:
if self.dReq.uSerial != u'':
if MatchWildCard(uValue=dRet.uFoundSerial,uMatchWithWildCard=self.dReq.uSerial):
dRet.bFound = True
else:
dRet.bFound = False
| thica/ORCA-Remote | src/scripts/discover/discover_elvmax/script.py | Python | gpl-3.0 | 11,740 | [
"ORCA"
] | bd379a9a46b5d006161b24a580dd5762df46cc17a5878d44c4679e50043ac7b4 |
##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
__version__='$Revision: 1.6 $'[11:-2]
from SelectCompiler import ast
ListType = type([])
TupleType = type(())
SequenceTypes = (ListType, TupleType)
class MutatingWalker:
def __init__(self, visitor):
self.visitor = visitor
self._cache = {}
def defaultVisitNode(self, node, walker=None, exclude=None):
for name, child in node.__dict__.items():
if exclude is not None and name in exclude:
continue
v = self.dispatchObject(child)
if v is not child:
# Replace the node.
node.__dict__[name] = v
return node
def visitSequence(self, seq):
res = seq
for idx in range(len(seq)):
child = seq[idx]
v = self.dispatchObject(child)
if v is not child:
# Change the sequence.
if type(res) is ListType:
res[idx : idx + 1] = [v]
else:
res = res[:idx] + (v,) + res[idx + 1:]
return res
def dispatchObject(self, ob):
'''
Expected to return either ob or something that will take
its place.
'''
if isinstance(ob, ast.Node):
return self.dispatchNode(ob)
elif type(ob) in SequenceTypes:
return self.visitSequence(ob)
else:
return ob
def dispatchNode(self, node):
klass = node.__class__
meth = self._cache.get(klass, None)
if meth is None:
className = klass.__name__
meth = getattr(self.visitor, 'visit' + className,
self.defaultVisitNode)
self._cache[klass] = meth
return meth(node, self)
def walk(tree, visitor):
return MutatingWalker(visitor).dispatchNode(tree)
| tempson-py/tempson | tempson/RestrictedPython/MutatingWalker.py | Python | mit | 2,436 | [
"VisIt"
] | f5b2b703368b191c426d9c38324201e0223d93a5d2aa08ed6a0ea45ab05bbbf9 |
# Copyright 2017-2020 The GPflow Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Type, Union, cast
import tensorflow as tf
from .. import kernels
from .. import mean_functions as mfn
from ..inducing_variables import InducingPoints, InducingVariables
from ..probability_distributions import DiagonalGaussian, Gaussian, MarkovGaussian
from . import dispatch
from .expectations import ExpectationObject, PackedExpectationObject, expectation
NoneType: Type[None] = type(None)
# ================ exKxz transpose and mean function handling =================
@dispatch.expectation.register(
(Gaussian, MarkovGaussian), mfn.Identity, NoneType, kernels.Linear, InducingPoints
)
def _expectation_gaussian__linear_inducingpoints(
p: Union[Gaussian, MarkovGaussian],
mean: mfn.Identity,
_: None,
kernel: kernels.Linear,
inducing_variable: InducingPoints,
nghp: None = None,
) -> tf.Tensor:
"""
Compute the expectation:
expectation[n] = <x_n K_{x_n, Z}>_p(x_n)
- K_{.,} :: Linear kernel
or the equivalent for MarkovGaussian
:return: NxDxM
"""
return tf.linalg.adjoint(expectation(p, (kernel, inducing_variable), mean))
@dispatch.expectation.register(
(Gaussian, MarkovGaussian), kernels.Kernel, InducingVariables, mfn.MeanFunction, NoneType
)
def _expectation_gaussian_kernel_inducingvariables__meanfunction(
p: Union[Gaussian, MarkovGaussian],
kernel: kernels.Kernel,
inducing_variable: InducingVariables,
mean: mfn.MeanFunction,
_: None,
nghp: None = None,
) -> tf.Tensor:
"""
Compute the expectation:
expectation[n] = <K_{Z, x_n} m(x_n)>_p(x_n)
or the equivalent for MarkovGaussian
:return: NxMxQ
"""
return tf.linalg.adjoint(expectation(p, mean, (kernel, inducing_variable), nghp=nghp))
@dispatch.expectation.register(Gaussian, mfn.Constant, NoneType, kernels.Kernel, InducingPoints)
def _expectation_gaussian_constant__kernel_inducingpoints(
p: Gaussian,
constant_mean: mfn.Constant,
_: None,
kernel: kernels.Kernel,
inducing_variable: InducingPoints,
nghp: None = None,
) -> tf.Tensor:
"""
Compute the expectation:
expectation[n] = <m(x_n)^T K_{x_n, Z}>_p(x_n)
- m(x_i) = c :: Constant function
- K_{.,.} :: Kernel function
:return: NxQxM
"""
c = constant_mean(p.mu) # NxQ
eKxz = expectation(p, (kernel, inducing_variable), nghp=nghp) # NxM
return c[..., None] * eKxz[:, None, :]
@dispatch.expectation.register(Gaussian, mfn.Linear, NoneType, kernels.Kernel, InducingPoints)
def _expectation_gaussian_linear__kernel_inducingpoints(
p: Gaussian,
linear_mean: mfn.Linear,
_: None,
kernel: kernels.Kernel,
inducing_variable: InducingPoints,
nghp: None = None,
) -> tf.Tensor:
"""
Compute the expectation:
expectation[n] = <m(x_n)^T K_{x_n, Z}>_p(x_n)
- m(x_i) = A x_i + b :: Linear mean function
- K_{.,.} :: Kernel function
:return: NxQxM
"""
N = tf.shape(p.mu)[0]
D = tf.shape(p.mu)[1]
exKxz = expectation(p, mfn.Identity(D), (kernel, inducing_variable), nghp=nghp)
eKxz = expectation(p, (kernel, inducing_variable), nghp=nghp)
eAxKxz = tf.linalg.matmul(
tf.tile(linear_mean.A[None, :, :], (N, 1, 1)), exKxz, transpose_a=True
)
ebKxz = linear_mean.b[None, :, None] * eKxz[:, None, :]
return eAxKxz + ebKxz
@dispatch.expectation.register(Gaussian, mfn.Identity, NoneType, kernels.Kernel, InducingPoints)
def _expectation_gaussian__kernel_inducingpoints(
p: Gaussian,
identity_mean: mfn.Identity,
_: None,
kernel: kernels.Kernel,
inducing_variable: InducingPoints,
nghp: None = None,
) -> tf.Tensor:
"""
This prevents infinite recursion for kernels that don't have specific
implementations of _expectation(p, identity_mean, None, kernel, inducing_variable).
Recursion can arise because Identity is a subclass of Linear mean function
so _expectation(p, linear_mean, none, kernel, inducing_variable) would call itself.
More specific signatures (e.g. (p, identity_mean, None, RBF, inducing_variable)) will
be found and used whenever available
"""
raise NotImplementedError
# ============== Conversion to Gaussian from Diagonal or Markov ===============
# Catching missing DiagonalGaussian implementations by converting to full Gaussian:
@dispatch.expectation.register(
DiagonalGaussian, object, (InducingVariables, NoneType), object, (InducingVariables, NoneType)
)
def _expectation_diagonal_generic(
p: DiagonalGaussian,
obj1: ExpectationObject,
feat1: Optional[InducingVariables],
obj2: ExpectationObject,
feat2: Optional[InducingVariables],
nghp: None = None,
) -> tf.Tensor:
gaussian = Gaussian(p.mu, tf.linalg.diag(p.cov))
return expectation(
gaussian,
cast(PackedExpectationObject, (obj1, feat1)),
cast(PackedExpectationObject, (obj2, feat2)),
nghp=nghp,
)
# Catching missing MarkovGaussian implementations by converting to Gaussian (when indifferent):
@dispatch.expectation.register(
MarkovGaussian, object, (InducingVariables, NoneType), object, (InducingVariables, NoneType)
)
def _expectation_markov_generic(
p: MarkovGaussian,
obj1: ExpectationObject,
feat1: Optional[InducingVariables],
obj2: ExpectationObject,
feat2: Optional[InducingVariables],
nghp: None = None,
) -> tf.Tensor:
"""
Nota Bene: if only one object is passed, obj1 is
associated with x_n, whereas obj2 with x_{n+1}
"""
if obj2 is None:
gaussian = Gaussian(p.mu[:-1], p.cov[0, :-1])
return expectation(gaussian, cast(PackedExpectationObject, (obj1, feat1)), nghp=nghp)
elif obj1 is None:
gaussian = Gaussian(p.mu[1:], p.cov[0, 1:])
return expectation(gaussian, cast(PackedExpectationObject, (obj2, feat2)), nghp=nghp)
else:
return expectation(
p,
cast(PackedExpectationObject, (obj1, feat1)),
cast(PackedExpectationObject, (obj2, feat2)),
nghp=nghp,
)
| GPflow/GPflow | gpflow/expectations/misc.py | Python | apache-2.0 | 6,697 | [
"Gaussian"
] | 7b6b2b40e22b54fbb28be8d6a198796c6679d4d61cadf2a0a6595072be4f10c7 |
''' Tratihubis converts Trac tickets to Github issues by using the
following steps:
1. The user manually exports the Trac tickets to convert to a CSV file.
2. Tratihubis reads the CSV file and uses the data to create Github issues and milestones.
Installation
============
To install tratihubis, use ``pip`` or ``easy_install``::
$ pip install tratihubis
If necessary, this also installs the `PyGithub <http://pypi.python.org/pypi/PyGithub/>`_ package.
* If it does not, do `sudo pip install PyGithub`
Usage
=====
Information about Trac tickets to convert has to be provided in several CSV files. To obtain these CSV files, create
new Trac queries using the SQL statements stored in
`query_tickets.sql <https://github.com/roskakori/tratihubis/blob/master/query_tickets.sql>`_ (or better, the current version from this repo) and
`query_comments.sql <https://github.com/roskakori/tratihubis/blob/master/query_comments.sql>`_. Then
execute the queries and save the results by clicking "Download in other formats: Comma-delimited Text" and
choosing for example ``/Users/me/mytool/tickets.csv`` and ``/Users/me/mytool/comments.csv`` as output files. Do the same for `query_attachments.sql`.
Next create a config file to describe how to login to Github and what to convert. For an example, see `sample-ticket-export.cfg`. For example, you could
store the following in ``~/mytool/tratihubis.cfg``::
[tratihubis]
token = my_github_token
repo = mytool
tickets = /Users/me/mytool/tickets.csv
comments = /Users/me/mytool/comments.csv
Then run::
$ tratihubis ~/mytool/tratihubis.cfg
This tests that the input data and Github information is valid and writes a log to the console describing
which operations would be performed.
To actually create the Github issues, you need to enable the command line option ``--really``::
$ tratihubis --really ~/mytool/tratihubis.cfg
Be aware that Github issues and milestones cannot be deleted in case you mess up. Your only remedy is to
remove the whole repository and start anew. So make sure that tratihubis does what you want before you
enable ``--really``. A good practice would be to do a practice import into a junk repository, check that you like the results, then delete that
repository and redo it using your real repository.
For large imports, you may run into Github abuse prevention limits. Contact Github support to have your account temporarily whitelisted.
Or tune the sleeps sprinkled in this code that are intended to avoid those limits.
A large import may fail partway through. Use the --skipExisting option to pick up where you left off, and go back and manually edit the last
issue which may have been created but not completed.
Mapping users
-------------
By default all tickets and comments are created by the user specified with the option `token`. For
a private Trac project with a single user this already gives the desired result in the Github project.
In case there are multiple Trac users, you can map them to different Github tokens using the option
`users`. For example::
users = johndoe: johndoe_token, *: another_token, sally: *
This maps the Trac user `johndoe` using John Doe's Github token and everyone else to
`another_token`. Sally is mapped to default token as specified with the `token` option above,
which in this example is `my_github_token`.
The default value is::
users = *:*
This maps every Trac user to the default token.
You may also use the config `userLogins` to map trac login names to github login names. This is used to capture more information in
the new comments. Be sure to use the correct GitHub login in order for tickets to be properly assigned.
Mapping labels
--------------
Github labels somewhat mimic the functionality Trac stores in the ``type`` and ``resolution`` field of
tickets. By default, Github supports the following labels:
* bug
* duplicate
* enhancement
* invalid
* question
* wontfix
Trac on the other hand has a ``type`` field which by default can be:
* bug
* enhancement
* task
Furthermore closed Trac tickets have a ``resolution`` which, among others, can be:
* duplicate
* invalid
* wontfix
The ``labels`` config option allows you to map Trac fields to Github labels. For example::
labels = type=defect: bug, type=enhancement: enhancement, resolution=wontfix: wontfix
Here, ``labels`` is a comma separated list of mappings taking the form
``<trac-field>=<trac-value>:<github-label>``.
In case types or labels contain other characters than ASCII letters, digits and underscore (_), put them
between quotes::
labels = type="software defect": bug
This script will also support labels of type `priority` and `keyword` matching the corresponding Trac fields.
``IMPORTANT``: You must pre-create all the above labels in Github for the import to complete.
Note that components will also map to labels if you supply the config option `addComponentLabels=true`. In this case, the script will create the needed label if not present.
Attachments
-----------
You can find some notes on this in `issue #19 <https://github.com/roskakori/tratihubis/issues/19>`.
In short, Github doesn't directly support attachments. Instead, this script can create a comment that includes a link to a document elsewhere.
For example, you can create a Gist for the attachment, or create a repository. Run `query_attachments.sql` to get the paths / descriptions of attachments.
Then set `attachmentsPrefix` in the config. The script will create a comment referencing the URL <prefix>/<issue#>/<attachmentName>.
Converting Trac Wiki Markup to Github Markdown
----------------------------------------------
Tratihubis makes an attempt to convert Trac Wiki markup into
Github markdown with the use of a number of regular expression
substitutions. This is far from a perfect process so care should be
taken and the the regular expressions may need amending on a case by
case basis. This conversion process also tries to preserve links
between tickets with expressions such as `ticket:XX` converted to
`issue #YY`. Also links in Trac such as `rXXXX` when referring to
subversion changeset will link back to the original Trac repository if
required. Use::
convert_text = true
in the `.cfg` file to attempt the markup conversion. Stipulate the
Trac repository with::
trac_url = https://trac/url
Limitations
===========
The author of Github issues and comments always is the user specified in the config, even if a different
user opened the original Trac ticket or wrote the original Trac comment - except where the config file supplies a `user` and `userLogin` for
this other user.
Github issues and comments have the current time as time stamp instead of the time from Trac.
The due date of Trac milestones is not migrated to Github milestones, so when the conversion is done, you
have to set it manually. Similarly, closed milestones will not be closed.
Trac milestones without any tickets are not converted to Github milestones.
Note that Github is working on an issue import API. When available, that will likely be a better option. It will
support importing original creation dates, and a bulk import that is both faster and avoids the abuse prevention limits.
Support
=======
In case of questions and problems, open an issue at <https://github.com/roskakori/tratihubis/issues>.
To obtain the source code or create your own fork to implement fixes or improvements, visit
<https://github.com/roskakori/tratihubis>.
License
=======
Copyright (c) 2012-2013, Thomas Aglassinger. All rights reserved. Distributed under the
`BSD License <http://www.opensource.org/licenses/bsd-license.php>`_.
Changes
=======
2015-05
(Contributed by Aaron Helsinger)
* Fixes to translation of wiki markup
* Revised SQL to match my environment (Trac 0.11 backed by PostgreSQL)
* And export CC list, keywords, priority
* Add support for labels from keywords, priorities
* Add option `--skipExisting` to skip tickets whose # conflicts with a pre-existing issue / pull request.
* Watch the Github rate limit and sleep until the reset time if needed. Also sleep N seconds after every M
creates by a given token, plus another couple seconds per issue. All to avoid the Github abuse prevention mechanisms.
Better to get your usernames whitelisted if you are doing a large import.
* Include the CC list (minus email domain) in comments
* Work on including the proper Github user login and having the proper Github user be the reporter / assignee.
* Cache all Github API objects, optionally calling update() if you specify --updateObjects
* New config option `ticketToStartAt` to support resuming an import
* Combine applying labels with closing issues into a single API call.
Version 1.0, 2014-06-14
(Contributed by Daniel Wheeler)
* Changed user authentication from password to token.
* Added basic translation of Wiki markup.
* Added conversion of ticket:xx links.
* Added backlink to from Github issue to original Trac link.
Version 0.5, 2013-02-13
(Contributed by Steven Di Rocco)
* Added support for file attachments.
* Added work around for information lost due GitHub API limitations:
* Added trac commenter and date at the top of each comment.
* Added automatic comment to each isseu showing original author, date authored, and last modification date.
* Fixed API calls to work with PyGithub 1.8.
Version 0.4, 2012-05-04
* Added config option ``labels`` to map Trac status and resolution to Github labels.
Version 0.3, 2012-05-03
* Added config option ``comments`` to convert Trac ticket comments.
* Added closing of issue for which the corresponding Trac ticket has been closed already.
* Added validation of users issues are assigned to. They must have an active Github user.
Version 0.2, 2012-05-02
* Added config option ``users`` to map Trac users to Github users.
* Added binary in order to run ``tratihubis`` instead of ``python -m tratihubis``.
* Changed supposed issue number in log to take existing issues in account.
Version 0.1, 2012-05-01
* Initial release.
'''
# Copyright (c) 2012-2013, Thomas Aglassinger
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of Thomas Aglassinger nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import codecs
import collections
import ConfigParser
import csv
import github
import logging
import optparse
import os.path
import StringIO
import sys
import time
import token
import tokenize
import datetime
import dateutil.parser
from translator import Translator, NullTranslator
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)-8s: %(message)s',datefmt='%H:%M:%S')
_log = logging.getLogger('tratihubis')
__version__ = "1.0"
_SECTION = 'tratihubis'
_OPTION_LABELS = 'labels'
_OPTION_USERS = 'users'
_validatedGithubTokens = set()
_tokenToHubMap = {}
_FakeMilestone = collections.namedtuple('_FakeMilestone', ['number', 'title'])
_FakeIssue = collections.namedtuple('_FakeIssue', ['number', 'title', 'body', 'state'])
_editedIssues = []
_createdIssues = []
# Track how many creates each token has done. To avoid abuse rate limits.
# The limit is surely for over some period of time, but I don't know what time frame. So instead,
# just plan to sleep for M seconds every N creates by a given token
_createsByToken = {}
# For storing if we should call update() on github objects
_doUpdateVar = {}
def _setUpdate(doit=False):
if doit:
_doUpdateVar['val'] = True
else:
_doUpdateVar['val'] = False
def _doUpdate():
return _doUpdateVar['val']
csv.field_size_limit(sys.maxsize)
class _ConfigError(Exception):
def __init__(self, option, message):
assert option is not None
assert message is not None
Exception.__init__(self, u'cannot process config option "%s" in section [%s]: %s'
% (option, _SECTION, message))
class _CsvDataError(Exception):
def __init__(self, csvPath, rowIndex, message):
assert csvPath is not None
assert rowIndex is not None
assert rowIndex >= 0
assert message is not None
Exception.__init__(self, u'%s:%d: %s' % (os.path.basename(csvPath), rowIndex + 1, message))
class _UTF8Recoder:
"""
Iterator that reads an encoded stream and reencodes the input to UTF-8
"""
def __init__(self, f, encoding):
self.reader = codecs.getreader(encoding)(f)
def __iter__(self):
return self
def next(self): # @ReservedAssignment
result = self.reader.next().encode("utf-8")
return result
class _UnicodeCsvReader:
"""
A CSV reader which will iterate over lines in the CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
f = _UTF8Recoder(f, encoding)
self.reader = csv.reader(f, dialect=dialect, **kwds)
def next(self): # @ReservedAssignment
row = self.reader.next()
return [unicode(s, "utf-8") for s in row]
def __iter__(self):
return self
class _LabelTransformations(object):
def __init__(self, repo, definition):
assert repo is not None
self._transformations = []
self._labelMap = {}
if definition:
self._buildLabelMap(repo)
self._buildTransformations(repo, definition)
def _buildLabelMap(self, repo):
assert repo is not None
_log.info(u'analyze existing labels (read from repo)')
self._labelMap = {}
if len(_repoLabels) == 0 or _doUpdate():
_log.debug("About to do repo.get_labels")
labels = repo.get_labels()
for l in labels:
_repoLabels.append(l)
for label in _repoLabels:
_log.debug(u' found label "%s"', label.name)
self._labelMap[label.name] = label
_log.info(u' found %d labels', len(self._labelMap))
def _buildTransformations(self, repo, definition):
assert repo is not None
assert definition is not None
STATE_AT_TRAC_FIELD = 'f'
STATE_AT_COMPARISON_OPERATOR = '='
STATE_AT_TRAC_VALUE = 'v'
STATE_AT_COLON = ':'
STATE_AT_LABEL = 'l'
STATE_AT_COMMA = ','
self._transformations = []
state = STATE_AT_TRAC_FIELD
for tokenType, tokenText, _, _, _ in tokenize.generate_tokens(StringIO.StringIO(definition).readline):
if tokenType == token.STRING:
tokenText = tokenText[1:len(tokenText) - 1]
if state == STATE_AT_TRAC_FIELD:
tracField = tokenText
tracValue = None
labelValue = None
state = STATE_AT_COMPARISON_OPERATOR
elif state == STATE_AT_COMPARISON_OPERATOR:
if tokenText != '=':
raise _ConfigError(_OPTION_LABELS,
u'Trac field "%s" must be followed by \'=\' instead of %r'
% (tracField, tokenText))
state = STATE_AT_TRAC_VALUE
elif state == STATE_AT_TRAC_VALUE:
tracValue = tokenText
state = STATE_AT_COLON
elif state == STATE_AT_COLON:
if tokenText != ':':
raise _ConfigError(_OPTION_LABELS,
u'value for comparison "%s" with Trac field "%s" must be followed by \':\' instead of %r'
% (tracValue, tracField, tokenText))
state = STATE_AT_LABEL
elif state == STATE_AT_LABEL:
labelValue = tokenText
if not labelValue in self._labelMap:
raise _ConfigError(_OPTION_LABELS,
u'unknown label "%s" must be replaced by one of: %s'
% (labelValue, sorted(self._labelMap.keys())))
self._transformations.append((tracField, tracValue, labelValue))
state = STATE_AT_COMMA
elif state == STATE_AT_COMMA:
if (tokenType != token.ENDMARKER) and (tokenText != ','):
raise _ConfigError(_OPTION_LABELS,
u'label transformation for Trac field "%s" must end with \',\' instead of %r'
% (tracField, tokenText))
state = STATE_AT_TRAC_FIELD
else:
assert False, u'state=%r' % state
def labelFor(self, tracField, tracValue):
assert tracField
assert tracValue is not None
result = None
transformationIndex = 0
while (result is None) and (transformationIndex < len(self._transformations)):
transformedField, transformedValueToCompareWith, transformedLabel = \
self._transformations[transformationIndex]
if (transformedField == tracField) and (transformedValueToCompareWith == tracValue):
assert transformedLabel in self._labelMap
result = self._labelMap[transformedLabel]
else:
transformationIndex += 1
return result
def _getConfigOption(config, name, required=True, defaultValue=None, boolean=False):
try:
if boolean:
result = config.getboolean(_SECTION, name)
else:
result = config.get(_SECTION, name)
except ConfigParser.NoOptionError:
if required:
raise _ConfigError(name, 'config must contain a value for this option')
result = defaultValue
except ConfigParser.NoSectionError:
raise _ConfigError(name, u'config must contain this section')
return result
def _shortened(text):
assert text is not None
THRESHOLD = 30
if len(text) > THRESHOLD:
result = text[:THRESHOLD] + '...'
else:
result = text
return result
_repoLabels = []
def _addNewLabel(label, repo):
addCnt = 0
if label and (len(_repoLabels) == 0 or _doUpdate()):
_log.debug("About to do repo.get_labels")
labels = repo.get_labels()
for l in labels:
_repoLabels.append(l)
if label not in [l.name for l in _repoLabels]:
lObject = repo.create_label(label, '5319e7')
_repoLabels.append(lObject)
addCnt += 1
return addCnt
def _tracTicketMaps(ticketsCsvPath):
"""
Sequence of maps where each items describes the relevant fields of each row from the tickets CSV exported
from Trac.
"""
EXPECTED_COLUMN_COUNT = 15
_log.info(u'read ticket details from "%s"', ticketsCsvPath)
with open(ticketsCsvPath, "rb") as ticketCsvFile:
csvReader = _UnicodeCsvReader(ticketCsvFile)
hasReadHeader = False
for rowIndex, row in enumerate(csvReader):
columnCount = len(row)
if columnCount != EXPECTED_COLUMN_COUNT:
raise _CsvDataError(ticketsCsvPath, rowIndex,
u'ticket row must have %d columns but has %d: %r' %
(EXPECTED_COLUMN_COUNT, columnCount, row))
if hasReadHeader:
ticketMap = {
'id': long(row[0]),
'type': row[1],
'owner': row[2],
'reporter': row[3],
'milestone': row[4],
'status': row[5],
'resolution': row[6],
'summary': row[7],
'description': row[8],
# 'createdtime': datetime.datetime.fromtimestamp(long(row[9])),
# 'modifiedtime': datetime.datetime.fromtimestamp(long(row[10])),
'createdtime': dateutil.parser.parse(str(row[9])),
'modifiedtime': dateutil.parser.parse(str(row[10])),
'component': row[11],
'priority': row[12],
'keywords': row[13],
'cc': row[14]
}
if ticketMap['keywords'] and str(ticketMap['keywords']).strip() != "":
kws = str(ticketMap['keywords']).strip()
kwArray = []
for kw in ticketMap['keywords'].split():
kwArray.append(kw.strip())
ticketMap['keywords'] = kwArray
yield ticketMap
else:
hasReadHeader = True
def _createMilestoneMap(repo):
def addMilestones(targetMap, state):
for milestone in repo.get_milestones(state=state):
_log.debug(u' %d: %s', milestone.number, milestone.title)
targetMap[milestone.title] = milestone
result = {}
_log.info(u'analyze existing milestones (read milestones, both open and closed)')
addMilestones(result, 'open')
addMilestones(result, 'closed')
_log.info(u' found %d milestones', len(result))
return result
def _createIssueMap(repo):
def addIssues(targetMap, state):
_log.debug("Looking up all issues that are %s", state)
for issue in repo.get_issues(state=state):
_log.debug(u' %s: (%s) %s', issue.number, issue.state, issue.title)
targetMap[issue.number] = issue
result = {}
_log.info(u'analyze existing issues')
addIssues(result, 'open')
addIssues(result, 'closed')
_log.info(u' found %d issues', len(result))
return result
def _createTicketToCommentsMap(commentsCsvPath):
EXPECTED_COLUMN_COUNT = 4
result = {}
if commentsCsvPath is not None:
_log.info(u'read ticket comments from "%s"', commentsCsvPath)
with open(commentsCsvPath, "rb") as commentsCsvFile:
csvReader = _UnicodeCsvReader(commentsCsvFile)
hasReadHeader = False
for rowIndex, row in enumerate(csvReader):
columnCount = len(row)
if columnCount != EXPECTED_COLUMN_COUNT:
raise _CsvDataError(commentsCsvPath, rowIndex,
u'comment row must have %d columns but has %d: %r' %
(EXPECTED_COLUMN_COUNT, columnCount, row))
if hasReadHeader:
commentMap = {
'id': long(row[0]),
# 'date': datetime.datetime.fromtimestamp(long(row[1])),
'date': dateutil.parser.parse(str(row[1])),
'author': row[2],
'body': row[3],
}
ticketId = commentMap['id']
ticketComments = result.get(ticketId)
if ticketComments is None:
ticketComments = []
result[ticketId] = ticketComments
ticketComments.append(commentMap)
else:
hasReadHeader = True
return result
def is_int(s):
try:
long(s)
return True
except ValueError:
return False
def _createTicketsToAttachmentsMap(attachmentsCsvPath, attachmentsPrefix):
EXPECTED_COLUMN_COUNT = 4
result = {}
if attachmentsCsvPath is not None and attachmentsPrefix is None:
_log.error(u'attachments csv path specified but attachmentsprefix is not\n')
return result
if attachmentsCsvPath is not None:
_log.info(u'read attachments from "%s"', attachmentsCsvPath)
else:
return result
with open(attachmentsCsvPath, "rb") as attachmentsCsvFile:
attachmentsReader = _UnicodeCsvReader(attachmentsCsvFile)
hasReadHeader = False
for rowIndex, row in enumerate(attachmentsReader):
columnCount = len(row)
if columnCount != EXPECTED_COLUMN_COUNT:
raise _CsvDataError(attachmentsCsvPath, rowIndex,
u'attachment row must have %d columns but has %d: %r' %
(EXPECTED_COLUMN_COUNT, columnCount, row))
if hasReadHeader:
id_string = row[0]
if is_int(id_string):
attachmentMap = {
'id': long(id_string),
'author': row[3],
'filename': row[1],
# 'date': datetime.datetime.fromtimestamp(long(row[2])),
'date': dateutil.parser.parse(str(row[2])),
'fullpath': u'%s/%s/%s' % (attachmentsPrefix, row[0], row[1]),
}
if not attachmentMap['id'] in result:
result[attachmentMap['id']] = [attachmentMap]
else:
result[attachmentMap['id']].append(attachmentMap)
else:
hasReadHeader = True
return result
def createTicketsToIssuesMap(ticketsCsvPath, existingIssues, firstTicketIdToConvert, lastTicketIdToConvert, skipExisting):
ticketsToIssuesMap = dict()
fakeIssueId = 1 + len(existingIssues)
# FIXME: This probably doesn't do the right thing if the issues to convert doesn't start with 1
if skipExisting:
_log.debug("Skipping existing tickets. The tickets to issues map will pretend there are no existing issues.")
fakeIssueId = 1
else:
if len(existingIssues) > 0:
_log.debug("Due to existing %d issues, 1st ticket %d will become issue %d", len(existingIssues), firstTicketIdToConvert, fakeIssueId)
else:
_log.debug("No existing issues. 1st ticket %d will be issue %d", firstTicketIdToConvert, fakeIssueId)
for ticketMap in _tracTicketMaps(ticketsCsvPath):
ticketId = ticketMap['id']
if (ticketId >= firstTicketIdToConvert) \
and ((ticketId <= lastTicketIdToConvert) or (lastTicketIdToConvert == 0)):
ticketsToIssuesMap[int(ticketId)] = fakeIssueId
fakeIssueId += 1
return ticketsToIssuesMap
sleepsByToken = {} # create count when last slept for this token
def migrateTickets(hub, repo, defaultToken, ticketsCsvPath,
commentsCsvPath=None, attachmentsCsvPath=None,
firstTicketIdToConvert=1, lastTicketIdToConvert=0,
labelMapping=None, userMapping="*:*",
attachmentsPrefix=None, pretend=True,
trac_url=None, convert_text=False, ticketsToRender=False, addComponentLabels=False, userLoginMapping="*:*", skipExisting=False):
assert hub is not None
assert repo is not None
assert ticketsCsvPath is not None
assert userMapping is not None
# How many issues are created before sleeping,
# or how many creates of anything by a given token before sleeping
createsBeforeSleep = 20
# If all your users are whitelisted by github support, no need to sleep
# createsBeforeSleep = 2000
# If the above # is hit, how long in seconds to sleep
secondsToSleep = 50
# If all your users are whitelisted, no need to sleep
# secondsToSleep = 1
_log.debug("Doing getuser")
baseUserO = _getUserFromHub(hub)
baseUser = baseUserO.login
#baseUser = hub.get_user().login
tracTicketToCommentsMap = _createTicketToCommentsMap(commentsCsvPath)
tracTicketToAttachmentsMap = _createTicketsToAttachmentsMap(attachmentsCsvPath, attachmentsPrefix)
existingIssues = _createIssueMap(repo)
existingMilestones = _createMilestoneMap(repo)
tracToGithubUserMap = _createTracToGithubUserMap(hub, userMapping, defaultToken)
tracToGithubLoginMap = _createTracToGithubLoginMap(hub, userLoginMapping, baseUser)
labelTransformations = _LabelTransformations(repo, labelMapping)
ticketsToIssuesMap = createTicketsToIssuesMap(ticketsCsvPath, existingIssues, firstTicketIdToConvert, lastTicketIdToConvert, skipExisting)
if convert_text:
Translator_ = Translator
else:
Translator_ = NullTranslator
translator = Translator_(repo, ticketsToIssuesMap, trac_url=trac_url, attachmentsPrefix=attachmentsPrefix)
def possiblyAddLabel(labels, tracField, tracValue):
label = labelTransformations.labelFor(tracField, tracValue)
if label is not None:
_log.info(' add label "%s"', label.name)
if not pretend:
labels.append(label.name)
fakeIssueId = 1 + len(existingIssues)
createdCount = 0
createdCountLastSleep = 0 # num issues created when last did long sleep
for ticketMap in _tracTicketMaps(ticketsCsvPath):
_log.debug("")
_log.debug("Rate limit status: %r resets at %r", hub.rate_limiting, datetime.datetime.fromtimestamp(hub.rate_limiting_resettime))
# _log.debug("%d issues created so far (sleep every %d)...", createdCount, createsBeforeSleep)
_log.debug("%d issues created so far...", createdCount)
# rate limit is 5000 per hour, after which you get an error: "403 Forbidden" with message "API rate limit exceeded...."
if hub.rate_limiting[0] < 10:
# This solution to the rate limit is fairly crude: when we're about to hit the limit, sleep until the reset time.
# That could be nearly an hour (maybe). An alternative would be to (a) try to catch the error when you hit the limit and sleep then, and/or (b)
# sleep 10 minutes and retry when we need to, or (c) sleep periodically if we are burning up our API calls quickly
_log.warning("Rate limit nearly exceeded. Sleeping until reset time of %s", datetime.datetime.fromtimestamp(hub.rate_limiting_resettime))
# FIXME: TZ handling
now = datetime.datetime.now()
sleeptime = datetime.datetime.fromtimestamp(hub.rate_limiting_resettime) - now
_log.info("Will sleep for %d seconds. See you at %s! \nZzz.....", int(sleeptime.total_seconds()), datetime.datetime.fromtimestamp(hub.rate_limiting_resettime))
time.sleep(int(sleeptime.total_seconds()) + 1)
_log.info(" ... And, we're back!")
createdCountLastSleep = createdCount
# elif createdCount > 0 and createdCount % createsBeforeSleep == 0 and createdCount != createdCountLastSleep:
# # Argh. Github applies other abuse rate limits. See EG https://github.com/octokit/octokit.net/issues/638
# # and https://developer.github.com/v3/#abuse-rate-limits
# # Some people sleep 1sec per issues, other 3sec per issue. Others 70sec per 20 issues.
# # Some comments suggest the limit is 20 create calls in a minute.
# _log.warning("Have created another %d issues. Sleeping %d seconds...\n...", createsBeforeSleep, secondsToSleep)
# if not pretend:
# time.sleep(secondsToSleep)
# _log.info(" ... and, we're back!")
# createdCountLastSleep = createdCount
elif createdCountLastSleep != createdCount:
didSleep = False
for t in _createsByToken:
_h = _getHub(t)
_u = _getUserFromHub(_h).login
_log.debug("User %s has %d creates", _u, _createsByToken[t])
for t in _createsByToken:
if (t not in sleepsByToken and _createsByToken[t] >= createsBeforeSleep) or \
(t in sleepsByToken and (_createsByToken[t] - sleepsByToken[t] >= createsBeforeSleep)):
# if _createsByToken[t] % createsBeforeSleep == 0 and (t not in sleepsByToken or _createsByToken[t] != sleepsByToken[t]):
_h = _getHub(t)
_u = _getUserFromHub(_h).login
_log.info("User %s has %d creates. Sleep %d seconds...\n...", _u, _createsByToken[t], secondsToSleep)
if not pretend:
time.sleep(secondsToSleep)
_log.info("... and, we're back!")
didSleep = True
createdCountLastSleep = createdCount
sleepsByToken[t] = _createsByToken[t]
break
if not didSleep and createdCount > 0 and not pretend:
# If all your users are whitelisted by github support, no need to sleep
#pass
time.sleep(2)
ticketId = ticketMap['id']
# FIXME: This probably doesn't do the right thing if the issues to convert doesn't start with 1
if skipExisting and ticketId in existingIssues:
iss = existingIssues.get(ticketId)
_log.debug("Skipping Trac ticket %s because its ID overlaps an existing issue %s:%s", ticketId, iss.number, iss.title)
continue
_log.debug("Looking at ticket %s", ticketId)
title = ticketMap['summary']
renderTicket = True
if ticketsToRender:
if not ticketId in ticketsToRender:
renderTicket = False
if renderTicket and (ticketId >= firstTicketIdToConvert) \
and ((ticketId <= lastTicketIdToConvert) or (lastTicketIdToConvert == 0)):
body = ticketMap['description']
tracReporter = ticketMap['reporter'].strip()
tokenReporter = _tokenFor(hub, tracToGithubUserMap, tracReporter)
_hub = _getHub(tokenReporter)
tracOwner = ticketMap['owner'].strip()
tokenOwner = _tokenFor(hub, tracToGithubUserMap, tracOwner)
_hubOwner = _getHub(tokenOwner)
_log.debug("Repo will be %s", '{0}/{1}'.format(repo.owner.login, repo.name))
_repo = _getRepoNoUser(_hub, '{0}/{1}'.format(repo.owner.login, repo.name))
#_repo = _hub.get_repo('{0}/{1}'.format(repo.owner.login, repo.name))
#_repo = _getRepo(hub, '{0}/{1}'.format(repo.owner.login, repo.name))
githubAssignee = _getUserFromHub(_hubOwner)
#githubAssignee = _hubOwner.get_user()
ghAssigneeLogin = _loginFor(tracToGithubLoginMap, tracOwner)
ghlRaw = tracToGithubLoginMap.get(tracOwner)
ghlIsDefault = False
if ghlRaw is None or ghlRaw == '*':
ghlIsDefault = True
_log.debug("For ticket %d got tracOwner %s, token %s, hub user's login: %s, ghAssigneeLogin from lookup on tracOwner: %s, ghlRaw: %s, isDefault: %s", ticketId, tracOwner, tokenOwner, githubAssignee.login, ghAssigneeLogin, ghlRaw, ghlIsDefault)
milestoneTitle = ticketMap['milestone'].strip()
if len(milestoneTitle) != 0:
if milestoneTitle not in existingMilestones:
_log.info(u'add milestone: %s', milestoneTitle)
_log.info(u'Existing milestones: %s', existingMilestones)
if not pretend:
newMilestone = repo.create_milestone(milestoneTitle)
if defaultToken in _createsByToken:
_createsByToken[defaultToken] += 1
else:
_createsByToken[defaultToken] = 1
else:
newMilestone = _FakeMilestone(len(existingMilestones) + 1, milestoneTitle)
if defaultToken in _createsByToken:
_createsByToken[defaultToken] += 1
else:
_createsByToken[defaultToken] = 1
existingMilestones[milestoneTitle] = newMilestone
milestone = existingMilestones[milestoneTitle]
milestoneNumber = milestone.number
else:
milestone = None
milestoneNumber = 0
_log.info(u'convert ticket #%d: %s', ticketId, _shortened(title))
origtitle = title
title = translator.translate(title)
if title != origtitle:
if ticketId not in _editedIssues:
_editedIssues.append(ticketId)
origbody = body
body = translator.translate(body, ticketId=ticketId)
if body != origbody:
if ticketId not in _editedIssues:
_editedIssues.append(ticketId)
if pretend:
_log.debug("Translated body from '%s' to '%s'", origbody, body)
dateformat = "%m-%d-%Y at %H:%M"
ticketString = '#{0}'.format(ticketId)
if trac_url:
ticket_url = '/'.join([trac_url, 'ticket', str(ticketId)])
ticketString = '[{0}]({1})'.format(ticketString, ticket_url)
legacyInfo = u"\n\n _Imported from trac ticket %s, created by %s on %s, last modified: %s_\n" \
% (ticketString, ticketMap['reporter'], ticketMap['createdtime'].strftime(dateformat),
ticketMap['modifiedtime'].strftime(dateformat))
if ticketMap['cc'] and str(ticketMap['cc']).strip() != "":
# strip out email domains (privacy)
import re
ccList = ticketMap['cc']
sub = re.compile(r"([^\@\s\,]+)(@[^\,\s]+)?", re.DOTALL)
ccListNew = sub.sub(r"\1@...", ccList)
if ccListNew != ccList:
_log.debug("Edited ccList from '%s' to '%s'", ccList, ccListNew)
legacyInfo += u" CCing: %s" % ccListNew
body += legacyInfo
if ticketsToRender:
_log.info(u'body of ticket:\n%s', body)
githubAssigneeLogin = None
if ghAssigneeLogin:
githubAssigneeLogin = ghAssigneeLogin
elif githubAssignee:
githubAssigneeLogin = githubAssignee.login
_log.debug("Found ghAssignee login: %s", githubAssigneeLogin)
# Argh and FIXME
# After carefully setting things up to use just a login name for assigning tickets, that seems to fail
# for a login that I think should have worked, I got:
#GithubException: 422 {u'documentation_url': u'https://developer.github.com/v3/issues/#create-an-issue', u'message': u'Validation Failed', u'errors': [{u'field': u'assignee', u'code': u'invalid', u'resource': u'Issue', u'value': u'tcmitchell'}]}
# So for now, assign things to me or leave them unassigned.
# Hmm. Nope, the assignee should be a login. That much is true. However, the _repo instance needs to have been created
# with a token that matches the login.
useLogin = None
if githubAssignee and ((not ghlIsDefault) or githubAssignee.login != baseUser) and ghAssigneeLogin == githubAssignee.login:
useLogin = githubAssignee.login
_log.debug("Will use the token of the owner with login %s", useLogin)
else:
_log.debug("Either had no ghAssignee or it is assigned to me by default, so leave it unassigned")
issue = None
if not pretend:
try:
if milestone is None:
if useLogin:
issue = _repo.create_issue(title, body, assignee=useLogin)
else:
issue = _repo.create_issue(title, body)
else:
if useLogin:
# if githubAssigneeLogin:
issue = _repo.create_issue(title, body, assignee=useLogin, milestone=milestone)
else:
issue = _repo.create_issue(title, body, milestone=milestone)
if tokenReporter in _createsByToken:
_createsByToken[tokenReporter] += 1
else:
_createsByToken[tokenReporter] = 1
except github.GithubException, ghe:
_log.error("Failed to create issue for ticket %d: %s", ticketId, ghe)
#_log.info("Title: '%s', assignee: %s, milestone: %s, body: '%s'", title, useLogin, milestone, body)
# if ghe.status == 403 and "abuse detection mechanism" in ghe.data:
# # Could we sleep and retry?
# _log.warning("Hit the abuse limits! Sleep for a minute and see if we can continue?")
raise
else:
issue = _FakeIssue(fakeIssueId, title, body, 'open')
fakeIssueId += 1
if tokenReporter in _createsByToken:
_createsByToken[tokenReporter] += 1
else:
_createsByToken[tokenReporter] = 1
createdCount += 1
# if githubAssigneeLogin:
if useLogin:
_log.info(u' issue #%s: owner=%s-->%s; milestone=%s (%d)',
issue.number, tracOwner, useLogin, milestoneTitle, milestoneNumber)
else:
_log.info(u' issue #%s: owner=%s--><unassigned>; milestone=%s (%d)',
issue.number, tracOwner, milestoneTitle, milestoneNumber)
labels = []
possiblyAddLabel(labels, 'type', ticketMap['type'])
possiblyAddLabel(labels, 'resolution', ticketMap['resolution'])
possiblyAddLabel(labels, 'priority', ticketMap['priority'])
for kw in ticketMap['keywords']:
possiblyAddLabel(labels, 'keyword', kw)
if addComponentLabels and ticketMap['component'] != 'None':
if not pretend:
labels.append(ticketMap['component'])
if not pretend:
for l in labels:
addCnt = _addNewLabel(l, repo)
if defaultToken in _createsByToken:
_createsByToken[defaultToken] += addCnt
else:
_createsByToken[defaultToken] = addCnt
# Moving actual addition of labels down later to be done in a single edit call
# FIXME: Why is this whole block not: issue.edit(labels=labels)?
# That is, why get a new hub, repo, and issue instance?
# Done this way, the default person applies all labels.
# Done my suggested way, the issue reporter applies all labels, which seems better.
# issue.edit(labels=labels)
# _hub = _getHub(defaultToken)
# _repo = _getRepoNoUser(_hub, '{0}/{1}'.format(repo.owner.login, repo.name))
# #_repo = _hub.get_repo('{0}/{1}'.format(repo.owner.login, repo.name))
# _issue = _getIssueFromRepo(_repo,issue.number)
# #_issue = _repo.get_issue(issue.number)
# _log.debug("Setting labels on issue %d: %s", issue.number, labels)
# _issue.edit(labels=labels)
attachmentsToAdd = tracTicketToAttachmentsMap.get(ticketId)
if attachmentsToAdd is not None:
for attachment in attachmentsToAdd:
token = _tokenFor(repo, tracToGithubUserMap, attachment['author'], False)
attachmentAuthor = _userFor(token)
_hub = _getHub(token)
_repo = _getRepoNoUser(_hub, '{0}/{1}'.format(repo.owner.login, repo.name))
#_repo = _hub.get_repo('{0}/{1}'.format(repo.owner.login, repo.name))
attachmentAuthorLogin = _loginFor(tracToGithubLoginMap, attachment['author'])
if attachmentAuthorLogin and attachmentAuthorLogin != baseUser:
legacyInfo = u"_%s (%s) attached [%s](%s) on %s_\n" \
% (attachment['author'], attachmentAuthorLogin, attachment['filename'], attachment['fullpath'], attachment['date'].strftime(dateformat))
_log.info(u' added attachment from %s', attachmentAuthorLogin)
else:
legacyInfo = u"_%s attached [%s](%s) on %s_\n" \
% (attachment['author'], attachment['filename'], attachment['fullpath'], attachment['date'].strftime(dateformat))
_log.info(u' added attachment from %s', attachmentAuthor.login)
if ticketsToRender:
_log.info(u'attachment legacy info:\n%s',legacyInfo)
if not pretend:
_issue = _getIssueFromRepo(_repo,issue.number)
#_issue = _repo.get_issue(issue.number)
assert _issue is not None
try:
_issue.create_comment(legacyInfo)
if token in _createsByToken:
_createsByToken[token] += 1
else:
_createsByToken[token] = 1
except github.GithubException, ghe:
_log.error("Failed to create comment about attachment for ticket %d: %s", ticketId, ghe)
_log.info("Attachment comment: '%s'", _shortened(legacyInfo))
raise
else:
if token in _createsByToken:
_createsByToken[token] += 1
else:
_createsByToken[token] = 1
commentsToAdd = tracTicketToCommentsMap.get(ticketId)
if commentsToAdd is not None:
for comment in commentsToAdd:
token = _tokenFor(repo, tracToGithubUserMap, comment['author'], False)
commentAuthor = _userFor(token)
commentAuthorLogin = _loginFor(tracToGithubLoginMap, comment['author'])
_hub = _getHub(token)
_repo = _getRepoNoUser(_hub, '{0}/{1}'.format(repo.owner.login, repo.name))
#_repo = _hub.get_repo('{0}/{1}'.format(repo.owner.login, repo.name))
if commentAuthorLogin and commentAuthorLogin != baseUser:
commentBody = u"%s\n\n_Trac comment by %s (github user: %s) on %s_\n" % (comment['body'], comment['author'], commentAuthorLogin, comment['date'].strftime(dateformat))
_log.info(u' add comment by %s: %r', commentAuthorLogin, _shortened(commentBody))
else:
commentBody = u"%s\n\n_Trac comment by %s on %s_\n" % (comment['body'], comment['author'], comment['date'].strftime(dateformat))
_log.info(u' add comment by %s: %r', commentAuthor.login, _shortened(commentBody))
origComment = commentBody
commentBody = translator.translate(commentBody, ticketId=ticketId)
if origComment != commentBody:
if ticketId not in _editedIssues:
_editedIssues.append(ticketId)
if ticketsToRender:
_log.info(u'commentBody:\n%s',commentBody)
if not pretend:
# Here we use the token from the users map
# so the real github user creates the comment if possible
_issue = _getIssueFromRepo(_repo,issue.number)
#_issue = _repo.get_issue(issue.number)
assert _issue is not None
try:
_issue.create_comment(commentBody)
if token in _createsByToken:
_createsByToken[token] += 1
else:
_createsByToken[token] = 1
except github.GithubException, ghe:
_log.error("Failed to create comment for ticket %d: %s", ticketId, ghe)
_log.info("Comment should be: '%s'", _shortened(commentBody))
raise
else:
if token in _createsByToken:
_createsByToken[token] += 1
else:
_createsByToken[token] = 1
# Done adding any comments
# Now edit the issue: apply labels and close it if necessary
if len(labels) > 0 or ticketMap['status'] == 'closed':
# 'issue' is by the reporter
# Make the issue owner make these changes:
# (note that _hubOwner itself might not be quite right but with
# useLogin it should be)
if useLogin and githubAssignee:
_repo = _getRepoNoUser(_hubOwner, '{0}/{1}'.format(repo.owner.login, repo.name))
if not pretend:
_issue = _getIssueFromRepo(_repo,issue.number)
else:
_issue = issue
if len(labels) > 0:
_log.debug("Setting labels on issue %d: %s", issue.number, labels)
if ticketMap['status'] == 'closed':
_log.info(u' close issue')
if not pretend:
_issue.edit(labels=labels, state='closed')
elif not pretend:
_issue.edit(labels=labels)
elif ticketMap['status'] == 'closed':
_log.info(u' close issue')
if not pretend:
_issue.edit(state='closed')
# if ticketMap['status'] == 'closed':
# _log.info(u' close issue')
# if not pretend:
# # FIXME: perhaps it would be better if the issue instance were one from the assignee if any
# issue.edit(state='closed')
_createdIssues.append(ticketId)
else:
_log.info(u'skip ticket #%d: %s', ticketId, title)
if pretend:
_log.info(u'Finished pretend creating %d issues from %d tickets', createdCount, len(ticketsToIssuesMap))
else:
_log.info(u'Finished really creating %d issues from %d tickets', createdCount, len(ticketsToIssuesMap))
def _parsedOptions(arguments):
assert arguments is not None
# Parse command line options.
Usage = 'usage: %prog [options] CONFIGFILE\n\n Convert Trac tickets to Github issues.'
parser = optparse.OptionParser(
usage=Usage,
version="%prog " + __version__
)
parser.add_option("-R", "--really", action="store_true", dest="really",
help="really perform the conversion")
parser.add_option("-v", "--verbose", action="store_true", dest="verbose",
help="log all actions performed in console")
parser.add_option("-s", "--skipExisting", action="store_true", default=False, dest="skipExisting",
help="Skip tickets whose # overlaps an existing GitHub Issue (default %default)")
parser.add_option("--updateObjects", action="store_true", default=False,
help="Update cached Github objects (each is a 5sec call that only counts against rate limit if the object changed; usually not needed)")
(options, others) = parser.parse_args(arguments)
if len(others) == 0:
parser.error(u"CONFIGFILE must be specified")
elif len(others) > 1:
parser.error(u"unknown options must be removed: %s" % others[1:])
if options.verbose:
_log.setLevel(logging.DEBUG)
configPath = others[0]
return options, configPath
def _validateGithubUser(hub, tracUser, token):
assert hub is not None
assert tracUser is not None
assert token is not None
if token not in _validatedGithubTokens:
try:
_log.debug(u' check for token "%s"', token)
_hub = _getHub(token)
githubUser = _getUserFromHub(_hub)
_log.debug(u' user is "%s"', githubUser.login)
except Exception, e:
import traceback
_log.debug("Error from Github API: %s", traceback.format_exc())
# FIXME: After PyGithub API raises a predictable error, use "except WahteverException".
raise _ConfigError(_OPTION_USERS,
u'Trac user "%s" must be mapped to an existing Github users token instead of "%s" = "%s"'
% (tracUser, githubUser, token))
_validatedGithubTokens.add(token)
def _createTracToGithubUserMap(hub, definition, defaultToken):
result = {}
for mapping in definition.split(','):
words = [word.strip() for word in mapping.split(':')]
if words:
if len(words) != 2:
raise _ConfigError(_OPTION_USERS,
u'mapping must use syntax "trac-user: token" but is: "%s"' % mapping)
tracUser, token = words
if token == '*':
token = defaultToken
# if len(tracUser) == 0:
# raise _ConfigError(_OPTION_USERS, u'Trac user must not be empty: "%s"' % mapping)
if len(token) == 0:
raise _ConfigError(_OPTION_USERS, u'Token must not be empty: "%s"' % mapping)
existingMappedGithubUser = result.get(tracUser)
if existingMappedGithubUser is not None:
raise _ConfigError(_OPTION_USERS,
u'Trac user "%s" must be mapped to only one token instead of "%s" and "%s"'
% (tracUser, existingMappedGithubUser, token))
result[tracUser] = token
if token != '*':
_validateGithubUser(hub, tracUser, token)
for user in result.keys():
_log.debug("User token mapping found for: %s", user)
return result
def _createTracToGithubLoginMap(hub, definition, defaultLogin):
result = {}
for mapping in definition.split(','):
words = [word.strip() for word in mapping.split(':')]
if words:
if len(words) != 2:
raise _ConfigError(_OPTION_USERS,
u'mapping must use syntax "trac-user: github-login" but is: "%s"' % mapping)
tracUser, login = words
if login == '*':
login = defaultLogin
# if len(tracUser) == 0:
# raise _ConfigError(_OPTION_USERS, u'Trac user must not be empty: "%s"' % mapping)
if len(login) == 0:
raise _ConfigError(_OPTION_USERS, u'Login must not be empty: "%s"' % mapping)
existingMappedGithubUser = result.get(tracUser)
if existingMappedGithubUser is not None:
raise _ConfigError(_OPTION_USERS,
u'Trac user "%s" must be mapped to only one login instead of "%s" and "%s"'
% (tracUser, existingMappedGithubUser, login))
result[tracUser] = login
for user in result.keys():
_log.debug("User login mapping found for: %s=%s", user, result.get(user))
return result
def _loginFor(tracToGithubLoginMap, tracUser):
assert tracToGithubLoginMap is not None
assert tracUser is not None
result = tracToGithubLoginMap.get(tracUser)
if result is None:
result = tracToGithubLoginMap.get('*')
if result is None:
raise _ConfigError(_OPTION_USERS, u'Trac user "%s" must be mapped to a Github user' % (tracUser,))
return result
def _tokenFor(hub, tracToGithubUserMap, tracUser, validate=True):
assert tracToGithubUserMap is not None
assert tracUser is not None
result = tracToGithubUserMap.get(tracUser)
if result is None:
result = tracToGithubUserMap.get('*')
if result is None:
raise _ConfigError(_OPTION_USERS, u'Trac user "%s" must be mapped to a Github user' % (tracUser,))
if validate:
_validateGithubUser(hub, tracUser, result)
return result
def _getHub(token):
if token in _tokenToHubMap:
hub = _tokenToHubMap[token]
return hub
_log.debug("Getting hub object from token")
_hub = github.Github(token)
if _hub:
_tokenToHubMap[token] = _hub
return _hub
def _userFor(token):
_hub = _getHub(token)
return _getUserFromHub(_hub)
_orgsByHub = {} # key is hub object, value is hash by orgname of org objects
_reposByOrg = {} # key is org object, value is hash by reponame of repo objects
#_reposByHub = {} # key is hub object, value is hash by reponame of repo objects
def _getRepo(hub, repoName):
# For initially getting the repo, split the repoName on / into org and repo
if '/' in repoName:
(orgname, repoName) = repoName.split('/')
_log.info("Repo %s belongs to org %s", repoName, orgname)
if hub in _orgsByHub:
if orgname in _orgsByHub[hub]:
org = _orgsByHub[hub][orgname]
if _doUpdate():
_log.debug("Doing org.update for %s", orgname)
org.update()
_orgsByHub[hub][orgname] = org
else:
_log.debug("getting org %s object", orgname)
org = hub.get_organization(orgname)
_orgsByHub[hub][orgname] = org
else:
_orgsByHub[hub] = {}
_log.debug("getting org %s object", orgname)
org = hub.get_organization(orgname)
_orgsByHub[hub][orgname] = org
_log.debug("Org ID: %d, login: %s, name: %s, url: %s", org.id, org.login, org.name, org.url)
if org in _reposByOrg:
reposForOrg = _reposByOrg[org]
else:
reposForOrg = {}
if repoName in reposForOrg:
repo = reposForOrg[repoName]
if _doUpdate():
_log.debug("Doing repo.update for repo %s under org %s", repoName, orgname)
repo.update()
else:
_log.debug("looking up repo %s on org", repoName)
repo = org.get_repo(repoName)
reposForOrg[repoName] = repo
_reposByOrg[org] = reposForOrg
_log.debug("Repo full_name %s, id: %d, name: %s, organization name: %s, owner %s, url: %s", repo.full_name, repo.id, repo.name, repo.organization.name, repo.owner.login, repo.url)
_log.debug("So later get_repo will get %s/%s", repo.owner.login, repo.name)
return repo
# return hub.get_repo(repoName)
_log.debug("Doing get_repo %s on a user", repoName)
return _getUserFromHub(hub).get_repo(repoName)
_reposNoUserByHub = {} # key is hub, value is array by repo name of repo objects
def _getRepoNoUser(hub, repoName):
if hub not in _reposNoUserByHub:
_reposByName = {}
else:
_reposByName = _reposNoUserByHub[hub]
if repoName not in _reposByName:
# For some reason we fall in here relatively often. I suspect it is because
# The hub instances are for different ticket reporters
_log.debug("Looking up repo %s as %s", repoName, _getUserFromHub(hub).login)
repo = hub.get_repo(repoName)
_reposByName[repoName] = repo
_reposNoUserByHub[hub] = _reposByName
return repo
else:
repo = _reposByName[repoName]
# This takes 5 seconds each time and we do it a lot.
if _doUpdate():
_log.debug("Doing repo.update for %s", repoName)
if repo.update():
_reposByName[repoName] = repo
_reposNoUserByHub[hub] = _reposByName
return repo
_hubToUser = {} # key is hub object, value is user object
def _getUserFromHub(hub):
if hub in _hubToUser:
user = _hubToUser[hub]
# Doing user.update takes 5-11 seconds, and we do this often
if _doUpdate():
_log.debug("Doing user.update from hub")
user.update()
_hubToUser[hub] = user
return user
else:
_log.debug("Doing get user from hub")
user = hub.get_user()
_hubToUser[hub] = user
return user
_repoToIssue = {} # key is repo object, value is array by issue # of issue objects
def _getIssueFromRepo(repo, issueNumber):
if repo in _repoToIssue:
issuesForRepo = _repoToIssue[repo]
else:
issuesForRepo = {}
if issueNumber in issuesForRepo:
issue = issuesForRepo[issueNumber]
# Update calls are 5 seconds each. Since we're creating the object, it shouldn't have changed on us
if _doUpdate():
_log.debug("Updating issue %d", issueNumber)
if issue.update():
issuesForRepo[issueNumber] = issue
_repoToIssue[repo] = issuesForRepo
return issue
# Unfortunately we fall through here often, because each
# commenter on a ticket has their own instance here.
_log.debug("looking up issue %d", issueNumber)
issue = repo.get_issue(issueNumber)
issuesForRepo[issueNumber] = issue
_repoToIssue[repo] = issuesForRepo
return issue
def main(argv=None):
if argv is None:
argv = sys.argv
exitCode = 1
try:
options, configPath = _parsedOptions(argv[1:])
config = ConfigParser.SafeConfigParser()
config.read(configPath)
commentsCsvPath = _getConfigOption(config, 'comments', False)
attachmentsCsvPath = _getConfigOption(config, 'attachments', False)
attachmentsPrefix = _getConfigOption(config, 'attachmentsprefix', False)
labelMapping = _getConfigOption(config, 'labels', False)
repoName = _getConfigOption(config, 'repo')
ticketsCsvPath = _getConfigOption(config, 'tickets', False, 'tickets.csv')
token = _getConfigOption(config, 'token')
userMapping = _getConfigOption(config, 'users', False, '*:{0}'.format(token))
userLoginMapping = _getConfigOption(config, 'userLogins', False, '*:*')
trac_url = _getConfigOption(config, 'trac_url', False)
convert_text = _getConfigOption(config, 'convert_text',
required=False,
defaultValue=False,
boolean=True)
ticketsToRender = _getConfigOption(config,
'ticketsToRender',
required=False,
defaultValue=False,
boolean=False)
ticketToStartAt = _getConfigOption(config,
'ticketToStartAt',
required=False,
defaultValue=1,
boolean=False)
addComponentLabels = _getConfigOption(config, 'addComponentLabels',
required=False,
defaultValue=False,
boolean=True)
if ticketToStartAt:
ticketToStartAt = long(ticketToStartAt)
if ticketToStartAt < 1:
ticketToStartAt = 1
if ticketToStartAt > 1:
_log.info("Starting import with ticket# %d", ticketToStartAt)
else:
ticketToStartAt = 1
if ticketsToRender:
ticketsToRender = [long(x) for x in ticketsToRender.split(',')]
if ticketToStartAt and ticketToStartAt > 1:
tkt2 = []
for tkt in ticketsToRender:
if tkt >= ticketToStartAt:
tkt2.append(tkt)
ticketsToRender = tkt2
_log.info("Only rendering tickets %s", ticketsToRender)
if not options.really:
_log.warning(u'no actions are performed unless command line option --really is specified')
else:
_log.warning(u'Really doing the ticket import!')
if options.skipExisting:
_log.warning(u'Tickets whose #s overlap with existing issues will not be copied over.')
if options.updateObjects:
_log.info("Will update Github objects as needed - adds time")
_setUpdate(True)
else:
_setUpdate(False)
hub = _getHub(token)
_log.info(u'log on to github as user "%s"', _getUserFromHub(hub).login)
repo = _getRepo(hub, repoName)
_log.info(u'connect to github repo "%s"', repoName)
migrateTickets(hub, repo, token, ticketsCsvPath,
commentsCsvPath, attachmentsCsvPath, firstTicketIdToConvert=ticketToStartAt,
userMapping=userMapping,
labelMapping=labelMapping,
attachmentsPrefix=attachmentsPrefix,
pretend=not options.really,
trac_url=trac_url, convert_text=convert_text, ticketsToRender=ticketsToRender, addComponentLabels=addComponentLabels, userLoginMapping=userLoginMapping,
skipExisting=options.skipExisting)
exitCode = 0
except (EnvironmentError, OSError, _ConfigError, _CsvDataError), error:
exitCode = str(error)
_log.error(error)
except KeyboardInterrupt, error:
exitCode = str(error)
_log.warning(u"interrupted by user")
except Exception, error:
exitCode = str(error)
_log.exception(error)
_log.info("Tickets with wiki to markdown edits: %s", _editedIssues)
if len(_createdIssues) > 0:
_log.info("Issues created: %d. Last issue created: #%d", len(_createdIssues), _createdIssues[-1])
else:
_log.info("No issues created")
_log.debug("Rate limit status: %r resets at %s", hub.rate_limiting, datetime.datetime.fromtimestamp(hub.rate_limiting_resettime))
for t in _createsByToken:
_h = _getHub(t)
_u = _getUserFromHub(_h).login
if t in sleepsByToken:
_log.info("User %s had %d creates. Last sleep at %d", _u, _createsByToken[t], sleepsByToken[t])
else:
_log.info("User %s had %d creates. No Last sleep for this user", _u, _createsByToken[t])
return exitCode
def _mainEntryPoint():
# logging.basicConfig(level=logging.INFO)
sys.exit(main())
if __name__ == "__main__":
_mainEntryPoint()
| ahelsing/tratihubis | tratihubis.py | Python | bsd-3-clause | 67,669 | [
"VisIt"
] | def5ac6e0ee0c8e3204a424c28ad356940790f4c7f13f807c4f67b30fea44842 |
import itertools
from typing import Container, ItemsView, List, Optional, Tuple
from data import iter_util
from data.graph import _op_mixin, bloom_node
from data.graph.ops import anagram_op, anagram_transform_op
def merge(host: 'bloom_node.BloomNode') -> None:
op = host.op
_, merge_fn, _ = _operator_functions[op.operator()]
operands = op.operands()
if not operands:
return
nodes = []
extra = []
for operator in operands:
if isinstance(operator, bloom_node.BloomNode):
nodes.append(operator)
else:
extra.append(operator)
merge_fn(
host, nodes, extra, whitelist=None, blacklist=host.edges(readonly=True))
def reduce(
host: 'bloom_node.BloomNode',
whitelist: Container[str] = None,
blacklist: Container[str] = None) -> ItemsView[str, 'bloom_node.BloomNode']:
op = host.op
operands = op.operands()
if not operands:
return {}.items()
nodes = []
edges = []
extra = []
for operator in operands:
if isinstance(operator, bloom_node.BloomNode):
nodes.append(operator)
edges.append(operator.edges())
else:
extra.append(operator)
iterator_fn, merge_fn, visitor_fn = _operator_functions[op.operator()]
for key, sources in iterator_fn(
edges,
whitelist=whitelist,
blacklist=blacklist):
result = visitor_fn(sources, extra)
if result is not None:
yield key, result
# Merge must run after visit: visit will add host's outgoing edges and set
# mask properties which merge expects to be present.
merge_fn(host, nodes, extra, whitelist=whitelist, blacklist=blacklist)
def _merge_add(
host: 'bloom_node.BloomNode',
sources: List['bloom_node.BloomNode'],
extra: list,
**kwargs) -> None:
del kwargs
provide_mask, require_mask, lengths_mask, max_weight, match_weight = (
_reduce_add(sources, extra))
host.provide_mask |= provide_mask
host.require_mask &= require_mask
host.lengths_mask |= lengths_mask
host.max_weight = max(host.max_weight, max_weight)
host.match_weight = max(host.match_weight, match_weight)
for source in sources:
host.annotate(source.annotations())
def _merge_multiply(
host: 'bloom_node.BloomNode',
sources: List['bloom_node.BloomNode'],
extra: list,
**kwargs) -> None:
del kwargs
provide_mask, require_mask, lengths_mask, max_weight, match_weight = (
_reduce_multiply(sources, extra))
host.provide_mask &= provide_mask
host.require_mask &= require_mask
host.lengths_mask |= lengths_mask
host.max_weight = max(host.max_weight, max_weight)
host.match_weight = max(host.match_weight, match_weight)
for source in sources:
host.annotate(source.annotations())
def _merge_call(
host: 'bloom_node.BloomNode',
sources: List['bloom_node.BloomNode'],
extra: list,
**kwargs) -> None:
assert len(extra) == 2
call_args, call_kwargs = extra
call_kwargs = call_kwargs.copy()
call_fn = call_kwargs.pop('merge', None)
if not call_fn:
return
call_kwargs.pop('visit', None)
call_kwargs.update(kwargs)
call_fn(host, sources, *call_args, **call_kwargs)
def _visit_identity(
sources: List['bloom_node.BloomNode'],
extra: list) -> 'bloom_node.BloomNode':
if not extra and len(sources) == 1:
return sources[0]
raise NotImplementedError(
'OP_IDENTITY failed to reduce %s, %s' % (sources, extra))
def _reduce_add(
sources: List['bloom_node.BloomNode'],
extra: list) -> Tuple[int, int, int, float, float]:
if extra:
raise NotImplementedError('OP_ADD failed to reduce %s' % extra)
# Round up all of the values from all available sources.
provide_mask = sources[0].provide_mask
require_mask = sources[0].require_mask
lengths_mask = sources[0].lengths_mask
max_weight = sources[0].max_weight
match_weight = sources[0].match_weight
pos = 1
l = len(sources)
while pos < l:
source = sources[pos]
provide_mask |= source.provide_mask # Letters from either are provided.
require_mask &= source.require_mask # Requirements are reduced.
lengths_mask |= source.lengths_mask # Lengths from either are provided.
if source.max_weight > max_weight:
max_weight = source.max_weight
if source.match_weight > match_weight:
match_weight = source.match_weight
pos += 1
return provide_mask, require_mask, lengths_mask, max_weight, match_weight
def _visit_add(
sources: List['bloom_node.BloomNode'],
extra: list) -> Optional['bloom_node.BloomNode']:
if len(sources) == 1:
assert not extra
return sources[0]
provide_mask, require_mask, lengths_mask, max_weight, match_weight = (
_reduce_add(sources, extra))
reduced = bloom_node.BloomNode(_op_mixin.Op(_op_mixin.OP_ADD, sources))
reduced.provide_mask = provide_mask
reduced.require_mask = require_mask
reduced.lengths_mask = lengths_mask
reduced.max_weight = max_weight
reduced.match_weight = match_weight
return reduced
def _reduce_multiply(
sources: List['bloom_node.BloomNode'],
extra: list) -> Tuple[int, int, int, float, float]:
scale = 1
for n in extra:
scale *= n
# First, verify there are matching masks. It's required for a valid
# solution and is trivial to verify satisfaction.
lengths_mask = sources[0].lengths_mask
provide_mask = sources[0].provide_mask
require_mask = sources[0].require_mask
max_weight = sources[0].max_weight * scale
match_weight = sources[0].match_weight * scale
pos = 1
l = len(sources)
while pos < l and lengths_mask and (
not require_mask or (provide_mask & require_mask) == require_mask):
source = sources[pos]
lengths_mask &= source.lengths_mask # Overlapping solution lengths exist.
provide_mask &= source.provide_mask # Overlapping letters are provided.
require_mask |= source.require_mask # Requirements are combined.
max_weight *= source.max_weight # Trends to 0.
match_weight *= source.match_weight
pos += 1
return provide_mask, require_mask, lengths_mask, max_weight, match_weight
def _visit_multiply(
sources: List['bloom_node.BloomNode'],
extra: list) -> Optional['bloom_node.BloomNode']:
if not extra and len(sources) == 1:
return sources[0]
provide_mask, require_mask, lengths_mask, max_weight, match_weight = (
_reduce_multiply(sources, extra))
if not any(source.op for source in sources) and (
not lengths_mask or
not max_weight or
(require_mask and (provide_mask & require_mask) != require_mask)):
# Unsatisfiable: no common requirements or child operations which could
# potentially expand into more edges.
return None
# Verify all combinations are mutually satisfiable.
for a, b in itertools.combinations(sources, 2):
if not a.satisfies(b) or not b.satisfies(a):
return None
if extra:
reduced = bloom_node.BloomNode(
_op_mixin.Op(_op_mixin.OP_MULTIPLY, sources + extra))
else:
reduced = bloom_node.BloomNode(_op_mixin.Op(_op_mixin.OP_MULTIPLY, sources))
reduced.provide_mask = provide_mask
reduced.require_mask = require_mask
reduced.lengths_mask = lengths_mask
reduced.max_weight = max_weight
reduced.match_weight = match_weight
return reduced
def _visit_call(
sources: List['bloom_node.BloomNode'],
extra: list) -> Optional['bloom_node.BloomNode']:
assert len(extra) == 2
call_args, call_kwargs = extra
call_kwargs = call_kwargs.copy()
call_kwargs.pop('merge', None)
call_fn = call_kwargs.pop('visit', None)
if not call_fn:
return
call_fn(sources, *call_args, **call_kwargs)
def _visit_fail(
sources: List['bloom_node.BloomNode'],
extra: list) -> None:
del sources, extra
raise NotImplementedError('Reduce visitor unsupported for this operator')
# Note: Order of operators must match _op_mixin.
_operator_functions = [
(iter_util.map_common, _merge_add, _visit_identity),
(iter_util.map_both, _merge_add, _visit_add),
(iter_util.map_common, _merge_multiply, _visit_multiply),
(iter_util.map_none, anagram_op.merge_fn, _visit_fail),
(iter_util.map_none, anagram_transform_op.merge_fn, _visit_fail),
(iter_util.map_both, _merge_call, _visit_call),
]
| PhilHarnish/forge | src/data/graph/bloom_node_reducer.py | Python | mit | 8,155 | [
"VisIt"
] | 665c3e3c39ba3f50c6f9bca40e67b74beefdd397dd7be3c36af7d7a62cc8da25 |
# Copyright 2012 Tom Hayward <tom@tomh.us>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
FIPS_STATES = {
"Alaska" : 2,
"Alabama" : 1,
"Arkansas" : 5,
"Arizona" : 4,
"California" : 6,
"Colorado" : 8,
"Connecticut" : 9,
"District of Columbia" : 11,
"Delaware" : 10,
"Florida" : 12,
"Georgia" : 13,
"Guam" : 66,
"Hawaii" : 15,
"Iowa" : 19,
"Idaho" : 16,
"Illinois" : 17,
"Indiana" : 18,
"Kansas" : 20,
"Kentucky" : 21,
"Louisiana" : 22,
"Massachusetts" : 25,
"Maryland" : 24,
"Maine" : 23,
"Michigan" : 26,
"Minnesota" : 27,
"Missouri" : 29,
"Mississippi" : 28,
"Montana" : 30,
"North Carolina" : 37,
"North Dakota" : 38,
"Nebraska" : 31,
"New Hampshire" : 33,
"New Jersey" : 34,
"New Mexico" : 35,
"Nevada" : 32,
"New York" : 36,
"Ohio" : 39,
"Oklahoma" : 40,
"Oregon" : 41,
"Pennsylvania" : 42,
"Puerto Rico" : 72,
"Rhode Island" : 44,
"South Carolina" : 45,
"South Dakota" : 46,
"Tennessee" : 47,
"Texas" : 48,
"Utah" : 49,
"Virginia" : 51,
"Virgin Islands" : 78,
"Vermont" : 50,
"Washington" : 53,
"Wisconsin" : 55,
"West Virginia" : 54,
"Wyoming" : 56,
"Alberta" : "CA01",
"British Columbia" : "CA02",
"Manitoba" : "CA03",
"New Brunswick" : "CA04",
"Newfoundland and Labrador": "CA05",
"Northwest Territories": "CA13",
"Nova Scotia" : "CA07",
"Nunavut" : "CA14",
"Ontario" : "CA08",
"Prince Edward Island" : "CA09",
"Quebec" : "CA10",
"Saskatchewan" : "CA11",
"Yukon" : "CA12",
}
FIPS_COUNTIES = {
1: { '--All--': '%',
'Autauga County, AL': '001',
'Baldwin County, AL': '003',
'Barbour County, AL': '005',
'Bibb County, AL': '007',
'Blount County, AL': '009',
'Bullock County, AL': '011',
'Butler County, AL': '013',
'Calhoun County, AL': '015',
'Chambers County, AL': '017',
'Cherokee County, AL': '019',
'Chilton County, AL': '021',
'Choctaw County, AL': '023',
'Clarke County, AL': '025',
'Clay County, AL': '027',
'Cleburne County, AL': '029',
'Coffee County, AL': '031',
'Colbert County, AL': '033',
'Conecuh County, AL': '035',
'Coosa County, AL': '037',
'Covington County, AL': '039',
'Crenshaw County, AL': '041',
'Cullman County, AL': '043',
'Dale County, AL': '045',
'Dallas County, AL': '047',
'DeKalb County, AL': '049',
'Elmore County, AL': '051',
'Escambia County, AL': '053',
'Etowah County, AL': '055',
'Fayette County, AL': '057',
'Franklin County, AL': '059',
'Geneva County, AL': '061',
'Greene County, AL': '063',
'Hale County, AL': '065',
'Henry County, AL': '067',
'Houston County, AL': '069',
'Jackson County, AL': '071',
'Jefferson County, AL': '073',
'Lamar County, AL': '075',
'Lauderdale County, AL': '077',
'Lawrence County, AL': '079',
'Lee County, AL': '081',
'Limestone County, AL': '083',
'Lowndes County, AL': '085',
'Macon County, AL': '087',
'Madison County, AL': '089',
'Marengo County, AL': '091',
'Marion County, AL': '093',
'Marshall County, AL': '095',
'Mobile County, AL': '097',
'Monroe County, AL': '099',
'Montgomery County, AL': '101',
'Morgan County, AL': '103',
'Perry County, AL': '105',
'Pickens County, AL': '107',
'Pike County, AL': '109',
'Randolph County, AL': '111',
'Russell County, AL': '113',
'Shelby County, AL': '117',
'St. Clair County, AL': '115',
'Sumter County, AL': '119',
'Talladega County, AL': '121',
'Tallapoosa County, AL': '123',
'Tuscaloosa County, AL': '125',
'Walker County, AL': '127',
'Washington County, AL': '129',
'Wilcox County, AL': '131',
'Winston County, AL': '133'},
2: { '--All--': '%',
'Aleutians East Borough, AK': '013',
'Aleutians West Census Area, AK': '016',
'Anchorage Borough/municipality, AK': '020',
'Bethel Census Area, AK': '050',
'Bristol Bay Borough, AK': '060',
'Denali Borough, AK': '068',
'Dillingham Census Area, AK': '070',
'Fairbanks North Star Borough, AK': '090',
'Haines Borough, AK': '100',
'Juneau Borough/city, AK': '110',
'Kenai Peninsula Borough, AK': '122',
'Ketchikan Gateway Borough, AK': '130',
'Kodiak Island Borough, AK': '150',
'Lake and Peninsula Borough, AK': '164',
'Matanuska-Susitna Borough, AK': '170',
'Nome Census Area, AK': '180',
'North Slope Borough, AK': '185',
'Northwest Arctic Borough, AK': '188',
'Prince of Wales-Outer Ketchikan Census Area, AK': '201',
'Sitka Borough/city, AK': '220',
'Skagway-Hoonah-Angoon Census Area, AK': '232',
'Southeast Fairbanks Census Area, AK': '240',
'Valdez-Cordova Census Area, AK': '261',
'Wade Hampton Census Area, AK': '270',
'Wrangell-Petersburg Census Area, AK': '280',
'Yakutat Borough, AK': '282',
'Yukon-Koyukuk Census Area, AK': '290'},
4: { '--All--': '%',
'Apache County, AZ': '001',
'Cochise County, AZ': '003',
'Coconino County, AZ': '005',
'Gila County, AZ': '007',
'Graham County, AZ': '009',
'Greenlee County, AZ': '011',
'La Paz County, AZ': '012',
'Maricopa County, AZ': '013',
'Mohave County, AZ': '015',
'Navajo County, AZ': '017',
'Pima County, AZ': '019',
'Pinal County, AZ': '021',
'Santa Cruz County, AZ': '023',
'Yavapai County, AZ': '025',
'Yuma County, AZ': '027'},
5: { '--All--': '%',
'Arkansas County, AR': '001',
'Ashley County, AR': '003',
'Baxter County, AR': '005',
'Benton County, AR': '007',
'Boone County, AR': '009',
'Bradley County, AR': '011',
'Calhoun County, AR': '013',
'Carroll County, AR': '015',
'Chicot County, AR': '017',
'Clark County, AR': '019',
'Clay County, AR': '021',
'Cleburne County, AR': '023',
'Cleveland County, AR': '025',
'Columbia County, AR': '027',
'Conway County, AR': '029',
'Craighead County, AR': '031',
'Crawford County, AR': '033',
'Crittenden County, AR': '035',
'Cross County, AR': '037',
'Dallas County, AR': '039',
'Desha County, AR': '041',
'Drew County, AR': '043',
'Faulkner County, AR': '045',
'Franklin County, AR': '047',
'Fulton County, AR': '049',
'Garland County, AR': '051',
'Grant County, AR': '053',
'Greene County, AR': '055',
'Hempstead County, AR': '057',
'Hot Spring County, AR': '059',
'Howard County, AR': '061',
'Independence County, AR': '063',
'Izard County, AR': '065',
'Jackson County, AR': '067',
'Jefferson County, AR': '069',
'Johnson County, AR': '071',
'Lafayette County, AR': '073',
'Lawrence County, AR': '075',
'Lee County, AR': '077',
'Lincoln County, AR': '079',
'Little River County, AR': '081',
'Logan County, AR': '083',
'Lonoke County, AR': '085',
'Madison County, AR': '087',
'Marion County, AR': '089',
'Miller County, AR': '091',
'Mississippi County, AR': '093',
'Monroe County, AR': '095',
'Montgomery County, AR': '097',
'Nevada County, AR': '099',
'Newton County, AR': '101',
'Ouachita County, AR': '103',
'Perry County, AR': '105',
'Phillips County, AR': '107',
'Pike County, AR': '109',
'Poinsett County, AR': '111',
'Polk County, AR': '113',
'Pope County, AR': '115',
'Prairie County, AR': '117',
'Pulaski County, AR': '119',
'Randolph County, AR': '121',
'Saline County, AR': '125',
'Scott County, AR': '127',
'Searcy County, AR': '129',
'Sebastian County, AR': '131',
'Sevier County, AR': '133',
'Sharp County, AR': '135',
'St. Francis County, AR': '123',
'Stone County, AR': '137',
'Union County, AR': '139',
'Van Buren County, AR': '141',
'Washington County, AR': '143',
'White County, AR': '145',
'Woodruff County, AR': '147',
'Yell County, AR': '149'},
6: { '--All--': '%',
'Alameda County, CA': '001',
'Alpine County, CA': '003',
'Amador County, CA': '005',
'Butte County, CA': '007',
'Calaveras County, CA': '009',
'Colusa County, CA': '011',
'Contra Costa County, CA': '013',
'Del Norte County, CA': '015',
'El Dorado County, CA': '017',
'Fresno County, CA': '019',
'Glenn County, CA': '021',
'Humboldt County, CA': '023',
'Imperial County, CA': '025',
'Inyo County, CA': '027',
'Kern County, CA': '029',
'Kings County, CA': '031',
'Lake County, CA': '033',
'Lassen County, CA': '035',
'Los Angeles County, CA': '037',
'Madera County, CA': '039',
'Marin County, CA': '041',
'Mariposa County, CA': '043',
'Mendocino County, CA': '045',
'Merced County, CA': '047',
'Modoc County, CA': '049',
'Mono County, CA': '051',
'Monterey County, CA': '053',
'Napa County, CA': '055',
'Nevada County, CA': '057',
'Orange County, CA': '059',
'Placer County, CA': '061',
'Plumas County, CA': '063',
'Riverside County, CA': '065',
'Sacramento County, CA': '067',
'San Benito County, CA': '069',
'San Bernardino County, CA': '071',
'San Diego County, CA': '073',
'San Francisco County/city, CA': '075',
'San Joaquin County, CA': '077',
'San Luis Obispo County, CA': '079',
'San Mateo County, CA': '081',
'Santa Barbara County, CA': '083',
'Santa Clara County, CA': '085',
'Santa Cruz County, CA': '087',
'Shasta County, CA': '089',
'Sierra County, CA': '091',
'Siskiyou County, CA': '093',
'Solano County, CA': '095',
'Sonoma County, CA': '097',
'Stanislaus County, CA': '099',
'Sutter County, CA': '101',
'Tehama County, CA': '103',
'Trinity County, CA': '105',
'Tulare County, CA': '107',
'Tuolumne County, CA': '109',
'Ventura County, CA': '111',
'Yolo County, CA': '113',
'Yuba County, CA': '115'},
8: { '--All--': '%',
'Adams County, CO': '001',
'Alamosa County, CO': '003',
'Arapahoe County, CO': '005',
'Archuleta County, CO': '007',
'Baca County, CO': '009',
'Bent County, CO': '011',
'Boulder County, CO': '013',
'Broomfield County/city, CO': '014',
'Chaffee County, CO': '015',
'Cheyenne County, CO': '017',
'Clear Creek County, CO': '019',
'Conejos County, CO': '021',
'Costilla County, CO': '023',
'Crowley County, CO': '025',
'Custer County, CO': '027',
'Delta County, CO': '029',
'Denver County/city, CO': '031',
'Dolores County, CO': '033',
'Douglas County, CO': '035',
'Eagle County, CO': '037',
'El Paso County, CO': '041',
'Elbert County, CO': '039',
'Fremont County, CO': '043',
'Garfield County, CO': '045',
'Gilpin County, CO': '047',
'Grand County, CO': '049',
'Gunnison County, CO': '051',
'Hinsdale County, CO': '053',
'Huerfano County, CO': '055',
'Jackson County, CO': '057',
'Jefferson County, CO': '059',
'Kiowa County, CO': '061',
'Kit Carson County, CO': '063',
'La Plata County, CO': '067',
'Lake County, CO': '065',
'Larimer County, CO': '069',
'Las Animas County, CO': '071',
'Lincoln County, CO': '073',
'Logan County, CO': '075',
'Mesa County, CO': '077',
'Mineral County, CO': '079',
'Moffat County, CO': '081',
'Montezuma County, CO': '083',
'Montrose County, CO': '085',
'Morgan County, CO': '087',
'Otero County, CO': '089',
'Ouray County, CO': '091',
'Park County, CO': '093',
'Phillips County, CO': '095',
'Pitkin County, CO': '097',
'Prowers County, CO': '099',
'Pueblo County, CO': '101',
'Rio Blanco County, CO': '103',
'Rio Grande County, CO': '105',
'Routt County, CO': '107',
'Saguache County, CO': '109',
'San Juan County, CO': '111',
'San Miguel County, CO': '113',
'Sedgwick County, CO': '115',
'Summit County, CO': '117',
'Teller County, CO': '119',
'Washington County, CO': '121',
'Weld County, CO': '123',
'Yuma County, CO': '125'},
9: { '--All--': '%',
'Fairfield County, CT': '001',
'Hartford County, CT': '003',
'Litchfield County, CT': '005',
'Middlesex County, CT': '007',
'New Haven County, CT': '009',
'New London County, CT': '011',
'Tolland County, CT': '013',
'Windham County, CT': '015'},
10: { '--All--': '%',
'Kent County, DE': '001',
'New Castle County, DE': '003',
'Sussex County, DE': '005'},
11: { '--All--': '%', 'District of Columbia': '001'},
12: { '--All--': '%',
'Alachua County, FL': '001',
'Baker County, FL': '003',
'Bay County, FL': '005',
'Bradford County, FL': '007',
'Brevard County, FL': '009',
'Broward County, FL': '011',
'Calhoun County, FL': '013',
'Charlotte County, FL': '015',
'Citrus County, FL': '017',
'Clay County, FL': '019',
'Collier County, FL': '021',
'Columbia County, FL': '023',
'DeSoto County, FL': '027',
'Dixie County, FL': '029',
'Duval County, FL': '031',
'Escambia County, FL': '033',
'Flagler County, FL': '035',
'Franklin County, FL': '037',
'Gadsden County, FL': '039',
'Gilchrist County, FL': '041',
'Glades County, FL': '043',
'Gulf County, FL': '045',
'Hamilton County, FL': '047',
'Hardee County, FL': '049',
'Hendry County, FL': '051',
'Hernando County, FL': '053',
'Highlands County, FL': '055',
'Hillsborough County, FL': '057',
'Holmes County, FL': '059',
'Indian River County, FL': '061',
'Jackson County, FL': '063',
'Jefferson County, FL': '065',
'Lafayette County, FL': '067',
'Lake County, FL': '069',
'Lee County, FL': '071',
'Leon County, FL': '073',
'Levy County, FL': '075',
'Liberty County, FL': '077',
'Madison County, FL': '079',
'Manatee County, FL': '081',
'Marion County, FL': '083',
'Martin County, FL': '085',
'Miami-Dade County, FL': '086',
'Monroe County, FL': '087',
'Nassau County, FL': '089',
'Okaloosa County, FL': '091',
'Okeechobee County, FL': '093',
'Orange County, FL': '095',
'Osceola County, FL': '097',
'Palm Beach County, FL': '099',
'Pasco County, FL': '101',
'Pinellas County, FL': '103',
'Polk County, FL': '105',
'Putnam County, FL': '107',
'Santa Rosa County, FL': '113',
'Sarasota County, FL': '115',
'Seminole County, FL': '117',
'St. Johns County, FL': '109',
'St. Lucie County, FL': '111',
'Sumter County, FL': '119',
'Suwannee County, FL': '121',
'Taylor County, FL': '123',
'Union County, FL': '125',
'Volusia County, FL': '127',
'Wakulla County, FL': '129',
'Walton County, FL': '131',
'Washington County, FL': '133'},
13: { '--All--': '%',
'Appling County, GA': '001',
'Atkinson County, GA': '003',
'Bacon County, GA': '005',
'Baker County, GA': '007',
'Baldwin County, GA': '009',
'Banks County, GA': '011',
'Barrow County, GA': '013',
'Bartow County, GA': '015',
'Ben Hill County, GA': '017',
'Berrien County, GA': '019',
'Bibb County, GA': '021',
'Bleckley County, GA': '023',
'Brantley County, GA': '025',
'Brooks County, GA': '027',
'Bryan County, GA': '029',
'Bulloch County, GA': '031',
'Burke County, GA': '033',
'Butts County, GA': '035',
'Calhoun County, GA': '037',
'Camden County, GA': '039',
'Candler County, GA': '043',
'Carroll County, GA': '045',
'Catoosa County, GA': '047',
'Charlton County, GA': '049',
'Chatham County, GA': '051',
'Chattahoochee County, GA': '053',
'Chattooga County, GA': '055',
'Cherokee County, GA': '057',
'Clarke County, GA': '059',
'Clay County, GA': '061',
'Clayton County, GA': '063',
'Clinch County, GA': '065',
'Cobb County, GA': '067',
'Coffee County, GA': '069',
'Colquitt County, GA': '071',
'Columbia County, GA': '073',
'Cook County, GA': '075',
'Coweta County, GA': '077',
'Crawford County, GA': '079',
'Crisp County, GA': '081',
'Dade County, GA': '083',
'Dawson County, GA': '085',
'DeKalb County, GA': '089',
'Decatur County, GA': '087',
'Dodge County, GA': '091',
'Dooly County, GA': '093',
'Dougherty County, GA': '095',
'Douglas County, GA': '097',
'Early County, GA': '099',
'Echols County, GA': '101',
'Effingham County, GA': '103',
'Elbert County, GA': '105',
'Emanuel County, GA': '107',
'Evans County, GA': '109',
'Fannin County, GA': '111',
'Fayette County, GA': '113',
'Floyd County, GA': '115',
'Forsyth County, GA': '117',
'Franklin County, GA': '119',
'Fulton County, GA': '121',
'Gilmer County, GA': '123',
'Glascock County, GA': '125',
'Glynn County, GA': '127',
'Gordon County, GA': '129',
'Grady County, GA': '131',
'Greene County, GA': '133',
'Gwinnett County, GA': '135',
'Habersham County, GA': '137',
'Hall County, GA': '139',
'Hancock County, GA': '141',
'Haralson County, GA': '143',
'Harris County, GA': '145',
'Hart County, GA': '147',
'Heard County, GA': '149',
'Henry County, GA': '151',
'Houston County, GA': '153',
'Irwin County, GA': '155',
'Jackson County, GA': '157',
'Jasper County, GA': '159',
'Jeff Davis County, GA': '161',
'Jefferson County, GA': '163',
'Jenkins County, GA': '165',
'Johnson County, GA': '167',
'Jones County, GA': '169',
'Lamar County, GA': '171',
'Lanier County, GA': '173',
'Laurens County, GA': '175',
'Lee County, GA': '177',
'Liberty County, GA': '179',
'Lincoln County, GA': '181',
'Long County, GA': '183',
'Lowndes County, GA': '185',
'Lumpkin County, GA': '187',
'Macon County, GA': '193',
'Madison County, GA': '195',
'Marion County, GA': '197',
'McDuffie County, GA': '189',
'McIntosh County, GA': '191',
'Meriwether County, GA': '199',
'Miller County, GA': '201',
'Mitchell County, GA': '205',
'Monroe County, GA': '207',
'Montgomery County, GA': '209',
'Morgan County, GA': '211',
'Murray County, GA': '213',
'Muscogee County, GA': '215',
'Newton County, GA': '217',
'Oconee County, GA': '219',
'Oglethorpe County, GA': '221',
'Paulding County, GA': '223',
'Peach County, GA': '225',
'Pickens County, GA': '227',
'Pierce County, GA': '229',
'Pike County, GA': '231',
'Polk County, GA': '233',
'Pulaski County, GA': '235',
'Putnam County, GA': '237',
'Quitman County, GA': '239',
'Rabun County, GA': '241',
'Randolph County, GA': '243',
'Richmond County, GA': '245',
'Rockdale County, GA': '247',
'Schley County, GA': '249',
'Screven County, GA': '251',
'Seminole County, GA': '253',
'Spalding County, GA': '255',
'Stephens County, GA': '257',
'Stewart County, GA': '259',
'Sumter County, GA': '261',
'Talbot County, GA': '263',
'Taliaferro County, GA': '265',
'Tattnall County, GA': '267',
'Taylor County, GA': '269',
'Telfair County, GA': '271',
'Terrell County, GA': '273',
'Thomas County, GA': '275',
'Tift County, GA': '277',
'Toombs County, GA': '279',
'Towns County, GA': '281',
'Treutlen County, GA': '283',
'Troup County, GA': '285',
'Turner County, GA': '287',
'Twiggs County, GA': '289',
'Union County, GA': '291',
'Upson County, GA': '293',
'Walker County, GA': '295',
'Walton County, GA': '297',
'Ware County, GA': '299',
'Warren County, GA': '301',
'Washington County, GA': '303',
'Wayne County, GA': '305',
'Webster County, GA': '307',
'Wheeler County, GA': '309',
'White County, GA': '311',
'Whitfield County, GA': '313',
'Wilcox County, GA': '315',
'Wilkes County, GA': '317',
'Wilkinson County, GA': '319',
'Worth County, GA': '321'},
15: { '--All--': '%',
'Hawaii County, HI': '001',
'Honolulu County/city, HI': '003',
'Kauai County, HI': '007',
'Maui County, HI': '009'},
16: { '--All--': '%',
'Ada County, ID': '001',
'Adams County, ID': '003',
'Bannock County, ID': '005',
'Bear Lake County, ID': '007',
'Benewah County, ID': '009',
'Bingham County, ID': '011',
'Blaine County, ID': '013',
'Boise County, ID': '015',
'Bonner County, ID': '017',
'Bonneville County, ID': '019',
'Boundary County, ID': '021',
'Butte County, ID': '023',
'Camas County, ID': '025',
'Canyon County, ID': '027',
'Caribou County, ID': '029',
'Cassia County, ID': '031',
'Clark County, ID': '033',
'Clearwater County, ID': '035',
'Custer County, ID': '037',
'Elmore County, ID': '039',
'Franklin County, ID': '041',
'Fremont County, ID': '043',
'Gem County, ID': '045',
'Gooding County, ID': '047',
'Idaho County, ID': '049',
'Jefferson County, ID': '051',
'Jerome County, ID': '053',
'Kootenai County, ID': '055',
'Latah County, ID': '057',
'Lemhi County, ID': '059',
'Lewis County, ID': '061',
'Lincoln County, ID': '063',
'Madison County, ID': '065',
'Minidoka County, ID': '067',
'Nez Perce County, ID': '069',
'Oneida County, ID': '071',
'Owyhee County, ID': '073',
'Payette County, ID': '075',
'Power County, ID': '077',
'Shoshone County, ID': '079',
'Teton County, ID': '081',
'Twin Falls County, ID': '083',
'Valley County, ID': '085',
'Washington County, ID': '087'},
17: { '--All--': '%',
'Adams County, IL': '001',
'Alexander County, IL': '003',
'Bond County, IL': '005',
'Boone County, IL': '007',
'Brown County, IL': '009',
'Bureau County, IL': '011',
'Calhoun County, IL': '013',
'Carroll County, IL': '015',
'Cass County, IL': '017',
'Champaign County, IL': '019',
'Christian County, IL': '021',
'Clark County, IL': '023',
'Clay County, IL': '025',
'Clinton County, IL': '027',
'Coles County, IL': '029',
'Cook County, IL': '031',
'Crawford County, IL': '033',
'Cumberland County, IL': '035',
'De Witt County, IL': '039',
'DeKalb County, IL': '037',
'Douglas County, IL': '041',
'DuPage County, IL': '043',
'Edgar County, IL': '045',
'Edwards County, IL': '047',
'Effingham County, IL': '049',
'Fayette County, IL': '051',
'Ford County, IL': '053',
'Franklin County, IL': '055',
'Fulton County, IL': '057',
'Gallatin County, IL': '059',
'Greene County, IL': '061',
'Grundy County, IL': '063',
'Hamilton County, IL': '065',
'Hancock County, IL': '067',
'Hardin County, IL': '069',
'Henderson County, IL': '071',
'Henry County, IL': '073',
'Iroquois County, IL': '075',
'Jackson County, IL': '077',
'Jasper County, IL': '079',
'Jefferson County, IL': '081',
'Jersey County, IL': '083',
'Jo Daviess County, IL': '085',
'Johnson County, IL': '087',
'Kane County, IL': '089',
'Kankakee County, IL': '091',
'Kendall County, IL': '093',
'Knox County, IL': '095',
'La Salle County, IL': '099',
'Lake County, IL': '097',
'Lawrence County, IL': '101',
'Lee County, IL': '103',
'Livingston County, IL': '105',
'Logan County, IL': '107',
'Macon County, IL': '115',
'Macoupin County, IL': '117',
'Madison County, IL': '119',
'Marion County, IL': '121',
'Marshall County, IL': '123',
'Mason County, IL': '125',
'Massac County, IL': '127',
'McDonough County, IL': '109',
'McHenry County, IL': '111',
'McLean County, IL': '113',
'Menard County, IL': '129',
'Mercer County, IL': '131',
'Monroe County, IL': '133',
'Montgomery County, IL': '135',
'Morgan County, IL': '137',
'Moultrie County, IL': '139',
'Ogle County, IL': '141',
'Peoria County, IL': '143',
'Perry County, IL': '145',
'Piatt County, IL': '147',
'Pike County, IL': '149',
'Pope County, IL': '151',
'Pulaski County, IL': '153',
'Putnam County, IL': '155',
'Randolph County, IL': '157',
'Richland County, IL': '159',
'Rock Island County, IL': '161',
'Saline County, IL': '165',
'Sangamon County, IL': '167',
'Schuyler County, IL': '169',
'Scott County, IL': '171',
'Shelby County, IL': '173',
'St. Clair County, IL': '163',
'Stark County, IL': '175',
'Stephenson County, IL': '177',
'Tazewell County, IL': '179',
'Union County, IL': '181',
'Vermilion County, IL': '183',
'Wabash County, IL': '185',
'Warren County, IL': '187',
'Washington County, IL': '189',
'Wayne County, IL': '191',
'White County, IL': '193',
'Whiteside County, IL': '195',
'Will County, IL': '197',
'Williamson County, IL': '199',
'Winnebago County, IL': '201',
'Woodford County, IL': '203'},
18: { '--All--': '%',
'Adams County, IN': '001',
'Allen County, IN': '003',
'Bartholomew County, IN': '005',
'Benton County, IN': '007',
'Blackford County, IN': '009',
'Boone County, IN': '011',
'Brown County, IN': '013',
'Carroll County, IN': '015',
'Cass County, IN': '017',
'Clark County, IN': '019',
'Clay County, IN': '021',
'Clinton County, IN': '023',
'Crawford County, IN': '025',
'Daviess County, IN': '027',
'DeKalb County, IN': '033',
'Dearborn County, IN': '029',
'Decatur County, IN': '031',
'Delaware County, IN': '035',
'Dubois County, IN': '037',
'Elkhart County, IN': '039',
'Fayette County, IN': '041',
'Floyd County, IN': '043',
'Fountain County, IN': '045',
'Franklin County, IN': '047',
'Fulton County, IN': '049',
'Gibson County, IN': '051',
'Grant County, IN': '053',
'Greene County, IN': '055',
'Hamilton County, IN': '057',
'Hancock County, IN': '059',
'Harrison County, IN': '061',
'Hendricks County, IN': '063',
'Henry County, IN': '065',
'Howard County, IN': '067',
'Huntington County, IN': '069',
'Jackson County, IN': '071',
'Jasper County, IN': '073',
'Jay County, IN': '075',
'Jefferson County, IN': '077',
'Jennings County, IN': '079',
'Johnson County, IN': '081',
'Knox County, IN': '083',
'Kosciusko County, IN': '085',
'LaGrange County, IN': '087',
'LaPorte County, IN': '091',
'Lake County, IN': '089',
'Lawrence County, IN': '093',
'Madison County, IN': '095',
'Marion County, IN': '097',
'Marshall County, IN': '099',
'Martin County, IN': '101',
'Miami County, IN': '103',
'Monroe County, IN': '105',
'Montgomery County, IN': '107',
'Morgan County, IN': '109',
'Newton County, IN': '111',
'Noble County, IN': '113',
'Ohio County, IN': '115',
'Orange County, IN': '117',
'Owen County, IN': '119',
'Parke County, IN': '121',
'Perry County, IN': '123',
'Pike County, IN': '125',
'Porter County, IN': '127',
'Posey County, IN': '129',
'Pulaski County, IN': '131',
'Putnam County, IN': '133',
'Randolph County, IN': '135',
'Ripley County, IN': '137',
'Rush County, IN': '139',
'Scott County, IN': '143',
'Shelby County, IN': '145',
'Spencer County, IN': '147',
'St. Joseph County, IN': '141',
'Starke County, IN': '149',
'Steuben County, IN': '151',
'Sullivan County, IN': '153',
'Switzerland County, IN': '155',
'Tippecanoe County, IN': '157',
'Tipton County, IN': '159',
'Union County, IN': '161',
'Vanderburgh County, IN': '163',
'Vermillion County, IN': '165',
'Vigo County, IN': '167',
'Wabash County, IN': '169',
'Warren County, IN': '171',
'Warrick County, IN': '173',
'Washington County, IN': '175',
'Wayne County, IN': '177',
'Wells County, IN': '179',
'White County, IN': '181',
'Whitley County, IN': '183'},
19: { '--All--': '%',
'Adair County, IA': '001',
'Adams County, IA': '003',
'Allamakee County, IA': '005',
'Appanoose County, IA': '007',
'Audubon County, IA': '009',
'Benton County, IA': '011',
'Black Hawk County, IA': '013',
'Boone County, IA': '015',
'Bremer County, IA': '017',
'Buchanan County, IA': '019',
'Buena Vista County, IA': '021',
'Butler County, IA': '023',
'Calhoun County, IA': '025',
'Carroll County, IA': '027',
'Cass County, IA': '029',
'Cedar County, IA': '031',
'Cerro Gordo County, IA': '033',
'Cherokee County, IA': '035',
'Chickasaw County, IA': '037',
'Clarke County, IA': '039',
'Clay County, IA': '041',
'Clayton County, IA': '043',
'Clinton County, IA': '045',
'Crawford County, IA': '047',
'Dallas County, IA': '049',
'Davis County, IA': '051',
'Decatur County, IA': '053',
'Delaware County, IA': '055',
'Des Moines County, IA': '057',
'Dickinson County, IA': '059',
'Dubuque County, IA': '061',
'Emmet County, IA': '063',
'Fayette County, IA': '065',
'Floyd County, IA': '067',
'Franklin County, IA': '069',
'Fremont County, IA': '071',
'Greene County, IA': '073',
'Grundy County, IA': '075',
'Guthrie County, IA': '077',
'Hamilton County, IA': '079',
'Hancock County, IA': '081',
'Hardin County, IA': '083',
'Harrison County, IA': '085',
'Henry County, IA': '087',
'Howard County, IA': '089',
'Humboldt County, IA': '091',
'Ida County, IA': '093',
'Iowa County, IA': '095',
'Jackson County, IA': '097',
'Jasper County, IA': '099',
'Jefferson County, IA': '101',
'Johnson County, IA': '103',
'Jones County, IA': '105',
'Keokuk County, IA': '107',
'Kossuth County, IA': '109',
'Lee County, IA': '111',
'Linn County, IA': '113',
'Louisa County, IA': '115',
'Lucas County, IA': '117',
'Lyon County, IA': '119',
'Madison County, IA': '121',
'Mahaska County, IA': '123',
'Marion County, IA': '125',
'Marshall County, IA': '127',
'Mills County, IA': '129',
'Mitchell County, IA': '131',
'Monona County, IA': '133',
'Monroe County, IA': '135',
'Montgomery County, IA': '137',
'Muscatine County, IA': '139',
"O'Brien County, IA": '141',
'Osceola County, IA': '143',
'Page County, IA': '145',
'Palo Alto County, IA': '147',
'Plymouth County, IA': '149',
'Pocahontas County, IA': '151',
'Polk County, IA': '153',
'Pottawattamie County, IA': '155',
'Poweshiek County, IA': '157',
'Ringgold County, IA': '159',
'Sac County, IA': '161',
'Scott County, IA': '163',
'Shelby County, IA': '165',
'Sioux County, IA': '167',
'Story County, IA': '169',
'Tama County, IA': '171',
'Taylor County, IA': '173',
'Union County, IA': '175',
'Van Buren County, IA': '177',
'Wapello County, IA': '179',
'Warren County, IA': '181',
'Washington County, IA': '183',
'Wayne County, IA': '185',
'Webster County, IA': '187',
'Winnebago County, IA': '189',
'Winneshiek County, IA': '191',
'Woodbury County, IA': '193',
'Worth County, IA': '195',
'Wright County, IA': '197'},
20: { '--All--': '%',
'Allen County, KS': '001',
'Anderson County, KS': '003',
'Atchison County, KS': '005',
'Barber County, KS': '007',
'Barton County, KS': '009',
'Bourbon County, KS': '011',
'Brown County, KS': '013',
'Butler County, KS': '015',
'Chase County, KS': '017',
'Chautauqua County, KS': '019',
'Cherokee County, KS': '021',
'Cheyenne County, KS': '023',
'Clark County, KS': '025',
'Clay County, KS': '027',
'Cloud County, KS': '029',
'Coffey County, KS': '031',
'Comanche County, KS': '033',
'Cowley County, KS': '035',
'Crawford County, KS': '037',
'Decatur County, KS': '039',
'Dickinson County, KS': '041',
'Doniphan County, KS': '043',
'Douglas County, KS': '045',
'Edwards County, KS': '047',
'Elk County, KS': '049',
'Ellis County, KS': '051',
'Ellsworth County, KS': '053',
'Finney County, KS': '055',
'Ford County, KS': '057',
'Franklin County, KS': '059',
'Geary County, KS': '061',
'Gove County, KS': '063',
'Graham County, KS': '065',
'Grant County, KS': '067',
'Gray County, KS': '069',
'Greeley County, KS': '071',
'Greenwood County, KS': '073',
'Hamilton County, KS': '075',
'Harper County, KS': '077',
'Harvey County, KS': '079',
'Haskell County, KS': '081',
'Hodgeman County, KS': '083',
'Jackson County, KS': '085',
'Jefferson County, KS': '087',
'Jewell County, KS': '089',
'Johnson County, KS': '091',
'Kearny County, KS': '093',
'Kingman County, KS': '095',
'Kiowa County, KS': '097',
'Labette County, KS': '099',
'Lane County, KS': '101',
'Leavenworth County, KS': '103',
'Lincoln County, KS': '105',
'Linn County, KS': '107',
'Logan County, KS': '109',
'Lyon County, KS': '111',
'Marion County, KS': '115',
'Marshall County, KS': '117',
'McPherson County, KS': '113',
'Meade County, KS': '119',
'Miami County, KS': '121',
'Mitchell County, KS': '123',
'Montgomery County, KS': '125',
'Morris County, KS': '127',
'Morton County, KS': '129',
'Nemaha County, KS': '131',
'Neosho County, KS': '133',
'Ness County, KS': '135',
'Norton County, KS': '137',
'Osage County, KS': '139',
'Osborne County, KS': '141',
'Ottawa County, KS': '143',
'Pawnee County, KS': '145',
'Phillips County, KS': '147',
'Pottawatomie County, KS': '149',
'Pratt County, KS': '151',
'Rawlins County, KS': '153',
'Reno County, KS': '155',
'Republic County, KS': '157',
'Rice County, KS': '159',
'Riley County, KS': '161',
'Rooks County, KS': '163',
'Rush County, KS': '165',
'Russell County, KS': '167',
'Saline County, KS': '169',
'Scott County, KS': '171',
'Sedgwick County, KS': '173',
'Seward County, KS': '175',
'Shawnee County, KS': '177',
'Sheridan County, KS': '179',
'Sherman County, KS': '181',
'Smith County, KS': '183',
'Stafford County, KS': '185',
'Stanton County, KS': '187',
'Stevens County, KS': '189',
'Sumner County, KS': '191',
'Thomas County, KS': '193',
'Trego County, KS': '195',
'Wabaunsee County, KS': '197',
'Wallace County, KS': '199',
'Washington County, KS': '201',
'Wichita County, KS': '203',
'Wilson County, KS': '205',
'Woodson County, KS': '207',
'Wyandotte County, KS': '209'},
21: { '--All--': '%',
'Adair County, KY': '001',
'Allen County, KY': '003',
'Anderson County, KY': '005',
'Ballard County, KY': '007',
'Barren County, KY': '009',
'Bath County, KY': '011',
'Bell County, KY': '013',
'Boone County, KY': '015',
'Bourbon County, KY': '017',
'Boyd County, KY': '019',
'Boyle County, KY': '021',
'Bracken County, KY': '023',
'Breathitt County, KY': '025',
'Breckinridge County, KY': '027',
'Bullitt County, KY': '029',
'Butler County, KY': '031',
'Caldwell County, KY': '033',
'Calloway County, KY': '035',
'Campbell County, KY': '037',
'Carlisle County, KY': '039',
'Carroll County, KY': '041',
'Carter County, KY': '043',
'Casey County, KY': '045',
'Christian County, KY': '047',
'Clark County, KY': '049',
'Clay County, KY': '051',
'Clinton County, KY': '053',
'Crittenden County, KY': '055',
'Cumberland County, KY': '057',
'Daviess County, KY': '059',
'Edmonson County, KY': '061',
'Elliott County, KY': '063',
'Estill County, KY': '065',
'Fayette County, KY': '067',
'Fleming County, KY': '069',
'Floyd County, KY': '071',
'Franklin County, KY': '073',
'Fulton County, KY': '075',
'Gallatin County, KY': '077',
'Garrard County, KY': '079',
'Grant County, KY': '081',
'Graves County, KY': '083',
'Grayson County, KY': '085',
'Green County, KY': '087',
'Greenup County, KY': '089',
'Hancock County, KY': '091',
'Hardin County, KY': '093',
'Harlan County, KY': '095',
'Harrison County, KY': '097',
'Hart County, KY': '099',
'Henderson County, KY': '101',
'Henry County, KY': '103',
'Hickman County, KY': '105',
'Hopkins County, KY': '107',
'Jackson County, KY': '109',
'Jefferson County, KY': '111',
'Jessamine County, KY': '113',
'Johnson County, KY': '115',
'Kenton County, KY': '117',
'Knott County, KY': '119',
'Knox County, KY': '121',
'Larue County, KY': '123',
'Laurel County, KY': '125',
'Lawrence County, KY': '127',
'Lee County, KY': '129',
'Leslie County, KY': '131',
'Letcher County, KY': '133',
'Lewis County, KY': '135',
'Lincoln County, KY': '137',
'Livingston County, KY': '139',
'Logan County, KY': '141',
'Lyon County, KY': '143',
'Madison County, KY': '151',
'Magoffin County, KY': '153',
'Marion County, KY': '155',
'Marshall County, KY': '157',
'Martin County, KY': '159',
'Mason County, KY': '161',
'McCracken County, KY': '145',
'McCreary County, KY': '147',
'McLean County, KY': '149',
'Meade County, KY': '163',
'Menifee County, KY': '165',
'Mercer County, KY': '167',
'Metcalfe County, KY': '169',
'Monroe County, KY': '171',
'Montgomery County, KY': '173',
'Morgan County, KY': '175',
'Muhlenberg County, KY': '177',
'Nelson County, KY': '179',
'Nicholas County, KY': '181',
'Ohio County, KY': '183',
'Oldham County, KY': '185',
'Owen County, KY': '187',
'Owsley County, KY': '189',
'Pendleton County, KY': '191',
'Perry County, KY': '193',
'Pike County, KY': '195',
'Powell County, KY': '197',
'Pulaski County, KY': '199',
'Robertson County, KY': '201',
'Rockcastle County, KY': '203',
'Rowan County, KY': '205',
'Russell County, KY': '207',
'Scott County, KY': '209',
'Shelby County, KY': '211',
'Simpson County, KY': '213',
'Spencer County, KY': '215',
'Taylor County, KY': '217',
'Todd County, KY': '219',
'Trigg County, KY': '221',
'Trimble County, KY': '223',
'Union County, KY': '225',
'Warren County, KY': '227',
'Washington County, KY': '229',
'Wayne County, KY': '231',
'Webster County, KY': '233',
'Whitley County, KY': '235',
'Wolfe County, KY': '237',
'Woodford County, KY': '239'},
22: { '--All--': '%',
'Acadia Parish, LA': '001',
'Allen Parish, LA': '003',
'Ascension Parish, LA': '005',
'Assumption Parish, LA': '007',
'Avoyelles Parish, LA': '009',
'Beauregard Parish, LA': '011',
'Bienville Parish, LA': '013',
'Bossier Parish, LA': '015',
'Caddo Parish, LA': '017',
'Calcasieu Parish, LA': '019',
'Caldwell Parish, LA': '021',
'Cameron Parish, LA': '023',
'Catahoula Parish, LA': '025',
'Claiborne Parish, LA': '027',
'Concordia Parish, LA': '029',
'De Soto Parish, LA': '031',
'East Baton Rouge Parish, LA': '033',
'East Carroll Parish, LA': '035',
'East Feliciana Parish, LA': '037',
'Evangeline Parish, LA': '039',
'Franklin Parish, LA': '041',
'Grant Parish, LA': '043',
'Iberia Parish, LA': '045',
'Iberville Parish, LA': '047',
'Jackson Parish, LA': '049',
'Jefferson Davis Parish, LA': '053',
'Jefferson Parish, LA': '051',
'La Salle Parish, LA': '059',
'Lafayette Parish, LA': '055',
'Lafourche Parish, LA': '057',
'Lincoln Parish, LA': '061',
'Livingston Parish, LA': '063',
'Madison Parish, LA': '065',
'Morehouse Parish, LA': '067',
'Natchitoches Parish, LA': '069',
'Orleans Parish, LA': '071',
'Ouachita Parish, LA': '073',
'Plaquemines Parish, LA': '075',
'Pointe Coupee Parish, LA': '077',
'Rapides Parish, LA': '079',
'Red River Parish, LA': '081',
'Richland Parish, LA': '083',
'Sabine Parish, LA': '085',
'St. Bernard Parish, LA': '087',
'St. Charles Parish, LA': '089',
'St. Helena Parish, LA': '091',
'St. James Parish, LA': '093',
'St. John the Baptist Parish, LA': '095',
'St. Landry Parish, LA': '097',
'St. Martin Parish, LA': '099',
'St. Mary Parish, LA': '101',
'St. Tammany Parish, LA': '103',
'Tangipahoa Parish, LA': '105',
'Tensas Parish, LA': '107',
'Terrebonne Parish, LA': '109',
'Union Parish, LA': '111',
'Vermilion Parish, LA': '113',
'Vernon Parish, LA': '115',
'Washington Parish, LA': '117',
'Webster Parish, LA': '119',
'West Baton Rouge Parish, LA': '121',
'West Carroll Parish, LA': '123',
'West Feliciana Parish, LA': '125',
'Winn Parish, LA': '127'},
23: { '--All--': '%',
'Androscoggin County, ME': '001',
'Aroostook County, ME': '003',
'Cumberland County, ME': '005',
'Franklin County, ME': '007',
'Hancock County, ME': '009',
'Kennebec County, ME': '011',
'Knox County, ME': '013',
'Lincoln County, ME': '015',
'Oxford County, ME': '017',
'Penobscot County, ME': '019',
'Piscataquis County, ME': '021',
'Sagadahoc County, ME': '023',
'Somerset County, ME': '025',
'Waldo County, ME': '027',
'Washington County, ME': '029',
'York County, ME': '031'},
24: { '--All--': '%',
'Allegany County, MD': '001',
'Anne Arundel County, MD': '003',
'Baltimore County, MD': '005',
'Baltimore city, MD': '510',
'Calvert County, MD': '009',
'Caroline County, MD': '011',
'Carroll County, MD': '013',
'Cecil County, MD': '015',
'Charles County, MD': '017',
'Dorchester County, MD': '019',
'Frederick County, MD': '021',
'Garrett County, MD': '023',
'Harford County, MD': '025',
'Howard County, MD': '027',
'Kent County, MD': '029',
'Montgomery County, MD': '031',
"Prince George's County, MD": '033',
"Queen Anne's County, MD": '035',
'Somerset County, MD': '039',
"St. Mary's County, MD": '037',
'Talbot County, MD': '041',
'Washington County, MD': '043',
'Wicomico County, MD': '045',
'Worcester County, MD': '047'},
25: { '--All--': '%',
'Barnstable County, MA': '001',
'Berkshire County, MA': '003',
'Bristol County, MA': '005',
'Dukes County, MA': '007',
'Essex County, MA': '009',
'Franklin County, MA': '011',
'Hampden County, MA': '013',
'Hampshire County, MA': '015',
'Middlesex County, MA': '017',
'Nantucket County/town, MA': '019',
'Norfolk County, MA': '021',
'Plymouth County, MA': '023',
'Suffolk County, MA': '025',
'Worcester County, MA': '027'},
26: { '--All--': '%',
'Alcona County, MI': '001',
'Alger County, MI': '003',
'Allegan County, MI': '005',
'Alpena County, MI': '007',
'Antrim County, MI': '009',
'Arenac County, MI': '011',
'Baraga County, MI': '013',
'Barry County, MI': '015',
'Bay County, MI': '017',
'Benzie County, MI': '019',
'Berrien County, MI': '021',
'Branch County, MI': '023',
'Calhoun County, MI': '025',
'Cass County, MI': '027',
'Charlevoix County, MI': '029',
'Cheboygan County, MI': '031',
'Chippewa County, MI': '033',
'Clare County, MI': '035',
'Clinton County, MI': '037',
'Crawford County, MI': '039',
'Delta County, MI': '041',
'Dickinson County, MI': '043',
'Eaton County, MI': '045',
'Emmet County, MI': '047',
'Genesee County, MI': '049',
'Gladwin County, MI': '051',
'Gogebic County, MI': '053',
'Grand Traverse County, MI': '055',
'Gratiot County, MI': '057',
'Hillsdale County, MI': '059',
'Houghton County, MI': '061',
'Huron County, MI': '063',
'Ingham County, MI': '065',
'Ionia County, MI': '067',
'Iosco County, MI': '069',
'Iron County, MI': '071',
'Isabella County, MI': '073',
'Jackson County, MI': '075',
'Kalamazoo County, MI': '077',
'Kalkaska County, MI': '079',
'Kent County, MI': '081',
'Keweenaw County, MI': '083',
'Lake County, MI': '085',
'Lapeer County, MI': '087',
'Leelanau County, MI': '089',
'Lenawee County, MI': '091',
'Livingston County, MI': '093',
'Luce County, MI': '095',
'Mackinac County, MI': '097',
'Macomb County, MI': '099',
'Manistee County, MI': '101',
'Marquette County, MI': '103',
'Mason County, MI': '105',
'Mecosta County, MI': '107',
'Menominee County, MI': '109',
'Midland County, MI': '111',
'Missaukee County, MI': '113',
'Monroe County, MI': '115',
'Montcalm County, MI': '117',
'Montmorency County, MI': '119',
'Muskegon County, MI': '121',
'Newaygo County, MI': '123',
'Oakland County, MI': '125',
'Oceana County, MI': '127',
'Ogemaw County, MI': '129',
'Ontonagon County, MI': '131',
'Osceola County, MI': '133',
'Oscoda County, MI': '135',
'Otsego County, MI': '137',
'Ottawa County, MI': '139',
'Presque Isle County, MI': '141',
'Roscommon County, MI': '143',
'Saginaw County, MI': '145',
'Sanilac County, MI': '151',
'Schoolcraft County, MI': '153',
'Shiawassee County, MI': '155',
'St. Clair County, MI': '147',
'St. Joseph County, MI': '149',
'Tuscola County, MI': '157',
'Van Buren County, MI': '159',
'Washtenaw County, MI': '161',
'Wayne County, MI': '163',
'Wexford County, MI': '165'},
27: { '--All--': '%',
'Aitkin County, MN': '001',
'Anoka County, MN': '003',
'Becker County, MN': '005',
'Beltrami County, MN': '007',
'Benton County, MN': '009',
'Big Stone County, MN': '011',
'Blue Earth County, MN': '013',
'Brown County, MN': '015',
'Carlton County, MN': '017',
'Carver County, MN': '019',
'Cass County, MN': '021',
'Chippewa County, MN': '023',
'Chisago County, MN': '025',
'Clay County, MN': '027',
'Clearwater County, MN': '029',
'Cook County, MN': '031',
'Cottonwood County, MN': '033',
'Crow Wing County, MN': '035',
'Dakota County, MN': '037',
'Dodge County, MN': '039',
'Douglas County, MN': '041',
'Faribault County, MN': '043',
'Fillmore County, MN': '045',
'Freeborn County, MN': '047',
'Goodhue County, MN': '049',
'Grant County, MN': '051',
'Hennepin County, MN': '053',
'Houston County, MN': '055',
'Hubbard County, MN': '057',
'Isanti County, MN': '059',
'Itasca County, MN': '061',
'Jackson County, MN': '063',
'Kanabec County, MN': '065',
'Kandiyohi County, MN': '067',
'Kittson County, MN': '069',
'Koochiching County, MN': '071',
'Lac qui Parle County, MN': '073',
'Lake County, MN': '075',
'Lake of the Woods County, MN': '077',
'Le Sueur County, MN': '079',
'Lincoln County, MN': '081',
'Lyon County, MN': '083',
'Mahnomen County, MN': '087',
'Marshall County, MN': '089',
'Martin County, MN': '091',
'McLeod County, MN': '085',
'Meeker County, MN': '093',
'Mille Lacs County, MN': '095',
'Morrison County, MN': '097',
'Mower County, MN': '099',
'Murray County, MN': '101',
'Nicollet County, MN': '103',
'Nobles County, MN': '105',
'Norman County, MN': '107',
'Olmsted County, MN': '109',
'Otter Tail County, MN': '111',
'Pennington County, MN': '113',
'Pine County, MN': '115',
'Pipestone County, MN': '117',
'Polk County, MN': '119',
'Pope County, MN': '121',
'Ramsey County, MN': '123',
'Red Lake County, MN': '125',
'Redwood County, MN': '127',
'Renville County, MN': '129',
'Rice County, MN': '131',
'Rock County, MN': '133',
'Roseau County, MN': '135',
'Scott County, MN': '139',
'Sherburne County, MN': '141',
'Sibley County, MN': '143',
'St. Louis County, MN': '137',
'Stearns County, MN': '145',
'Steele County, MN': '147',
'Stevens County, MN': '149',
'Swift County, MN': '151',
'Todd County, MN': '153',
'Traverse County, MN': '155',
'Wabasha County, MN': '157',
'Wadena County, MN': '159',
'Waseca County, MN': '161',
'Washington County, MN': '163',
'Watonwan County, MN': '165',
'Wilkin County, MN': '167',
'Winona County, MN': '169',
'Wright County, MN': '171',
'Yellow Medicine County, MN': '173'},
28: { '--All--': '%',
'Adams County, MS': '001',
'Alcorn County, MS': '003',
'Amite County, MS': '005',
'Attala County, MS': '007',
'Benton County, MS': '009',
'Bolivar County, MS': '011',
'Calhoun County, MS': '013',
'Carroll County, MS': '015',
'Chickasaw County, MS': '017',
'Choctaw County, MS': '019',
'Claiborne County, MS': '021',
'Clarke County, MS': '023',
'Clay County, MS': '025',
'Coahoma County, MS': '027',
'Copiah County, MS': '029',
'Covington County, MS': '031',
'DeSoto County, MS': '033',
'Forrest County, MS': '035',
'Franklin County, MS': '037',
'George County, MS': '039',
'Greene County, MS': '041',
'Grenada County, MS': '043',
'Hancock County, MS': '045',
'Harrison County, MS': '047',
'Hinds County, MS': '049',
'Holmes County, MS': '051',
'Humphreys County, MS': '053',
'Issaquena County, MS': '055',
'Itawamba County, MS': '057',
'Jackson County, MS': '059',
'Jasper County, MS': '061',
'Jefferson County, MS': '063',
'Jefferson Davis County, MS': '065',
'Jones County, MS': '067',
'Kemper County, MS': '069',
'Lafayette County, MS': '071',
'Lamar County, MS': '073',
'Lauderdale County, MS': '075',
'Lawrence County, MS': '077',
'Leake County, MS': '079',
'Lee County, MS': '081',
'Leflore County, MS': '083',
'Lincoln County, MS': '085',
'Lowndes County, MS': '087',
'Madison County, MS': '089',
'Marion County, MS': '091',
'Marshall County, MS': '093',
'Monroe County, MS': '095',
'Montgomery County, MS': '097',
'Neshoba County, MS': '099',
'Newton County, MS': '101',
'Noxubee County, MS': '103',
'Oktibbeha County, MS': '105',
'Panola County, MS': '107',
'Pearl River County, MS': '109',
'Perry County, MS': '111',
'Pike County, MS': '113',
'Pontotoc County, MS': '115',
'Prentiss County, MS': '117',
'Quitman County, MS': '119',
'Rankin County, MS': '121',
'Scott County, MS': '123',
'Sharkey County, MS': '125',
'Simpson County, MS': '127',
'Smith County, MS': '129',
'Stone County, MS': '131',
'Sunflower County, MS': '133',
'Tallahatchie County, MS': '135',
'Tate County, MS': '137',
'Tippah County, MS': '139',
'Tishomingo County, MS': '141',
'Tunica County, MS': '143',
'Union County, MS': '145',
'Walthall County, MS': '147',
'Warren County, MS': '149',
'Washington County, MS': '151',
'Wayne County, MS': '153',
'Webster County, MS': '155',
'Wilkinson County, MS': '157',
'Winston County, MS': '159',
'Yalobusha County, MS': '161',
'Yazoo County, MS': '163'},
29: { '--All--': '%',
'Adair County, MO': '001',
'Andrew County, MO': '003',
'Atchison County, MO': '005',
'Audrain County, MO': '007',
'Barry County, MO': '009',
'Barton County, MO': '011',
'Bates County, MO': '013',
'Benton County, MO': '015',
'Bollinger County, MO': '017',
'Boone County, MO': '019',
'Buchanan County, MO': '021',
'Butler County, MO': '023',
'Caldwell County, MO': '025',
'Callaway County, MO': '027',
'Camden County, MO': '029',
'Cape Girardeau County, MO': '031',
'Carroll County, MO': '033',
'Carter County, MO': '035',
'Cass County, MO': '037',
'Cedar County, MO': '039',
'Chariton County, MO': '041',
'Christian County, MO': '043',
'Clark County, MO': '045',
'Clay County, MO': '047',
'Clinton County, MO': '049',
'Cole County, MO': '051',
'Cooper County, MO': '053',
'Crawford County, MO': '055',
'Dade County, MO': '057',
'Dallas County, MO': '059',
'Daviess County, MO': '061',
'DeKalb County, MO': '063',
'Dent County, MO': '065',
'Douglas County, MO': '067',
'Dunklin County, MO': '069',
'Franklin County, MO': '071',
'Gasconade County, MO': '073',
'Gentry County, MO': '075',
'Greene County, MO': '077',
'Grundy County, MO': '079',
'Harrison County, MO': '081',
'Henry County, MO': '083',
'Hickory County, MO': '085',
'Holt County, MO': '087',
'Howard County, MO': '089',
'Howell County, MO': '091',
'Iron County, MO': '093',
'Jackson County, MO': '095',
'Jasper County, MO': '097',
'Jefferson County, MO': '099',
'Johnson County, MO': '101',
'Knox County, MO': '103',
'Laclede County, MO': '105',
'Lafayette County, MO': '107',
'Lawrence County, MO': '109',
'Lewis County, MO': '111',
'Lincoln County, MO': '113',
'Linn County, MO': '115',
'Livingston County, MO': '117',
'Macon County, MO': '121',
'Madison County, MO': '123',
'Maries County, MO': '125',
'Marion County, MO': '127',
'McDonald County, MO': '119',
'Mercer County, MO': '129',
'Miller County, MO': '131',
'Mississippi County, MO': '133',
'Moniteau County, MO': '135',
'Monroe County, MO': '137',
'Montgomery County, MO': '139',
'Morgan County, MO': '141',
'New Madrid County, MO': '143',
'Newton County, MO': '145',
'Nodaway County, MO': '147',
'Oregon County, MO': '149',
'Osage County, MO': '151',
'Ozark County, MO': '153',
'Pemiscot County, MO': '155',
'Perry County, MO': '157',
'Pettis County, MO': '159',
'Phelps County, MO': '161',
'Pike County, MO': '163',
'Platte County, MO': '165',
'Polk County, MO': '167',
'Pulaski County, MO': '169',
'Putnam County, MO': '171',
'Ralls County, MO': '173',
'Randolph County, MO': '175',
'Ray County, MO': '177',
'Reynolds County, MO': '179',
'Ripley County, MO': '181',
'Saline County, MO': '195',
'Schuyler County, MO': '197',
'Scotland County, MO': '199',
'Scott County, MO': '201',
'Shannon County, MO': '203',
'Shelby County, MO': '205',
'St. Charles County, MO': '183',
'St. Clair County, MO': '185',
'St. Francois County, MO': '187',
'St. Louis County, MO': '189',
'St. Louis city, MO': '510',
'Ste. Genevieve County, MO': '186',
'Stoddard County, MO': '207',
'Stone County, MO': '209',
'Sullivan County, MO': '211',
'Taney County, MO': '213',
'Texas County, MO': '215',
'Vernon County, MO': '217',
'Warren County, MO': '219',
'Washington County, MO': '221',
'Wayne County, MO': '223',
'Webster County, MO': '225',
'Worth County, MO': '227',
'Wright County, MO': '229'},
30: { '--All--': '%',
'Beaverhead County, MT': '001',
'Big Horn County, MT': '003',
'Blaine County, MT': '005',
'Broadwater County, MT': '007',
'Carbon County, MT': '009',
'Carter County, MT': '011',
'Cascade County, MT': '013',
'Chouteau County, MT': '015',
'Custer County, MT': '017',
'Daniels County, MT': '019',
'Dawson County, MT': '021',
'Deer Lodge County, MT': '023',
'Fallon County, MT': '025',
'Fergus County, MT': '027',
'Flathead County, MT': '029',
'Gallatin County, MT': '031',
'Garfield County, MT': '033',
'Glacier County, MT': '035',
'Golden Valley County, MT': '037',
'Granite County, MT': '039',
'Hill County, MT': '041',
'Jefferson County, MT': '043',
'Judith Basin County, MT': '045',
'Lake County, MT': '047',
'Lewis and Clark County, MT': '049',
'Liberty County, MT': '051',
'Lincoln County, MT': '053',
'Madison County, MT': '057',
'McCone County, MT': '055',
'Meagher County, MT': '059',
'Mineral County, MT': '061',
'Missoula County, MT': '063',
'Musselshell County, MT': '065',
'Park County, MT': '067',
'Petroleum County, MT': '069',
'Phillips County, MT': '071',
'Pondera County, MT': '073',
'Powder River County, MT': '075',
'Powell County, MT': '077',
'Prairie County, MT': '079',
'Ravalli County, MT': '081',
'Richland County, MT': '083',
'Roosevelt County, MT': '085',
'Rosebud County, MT': '087',
'Sanders County, MT': '089',
'Sheridan County, MT': '091',
'Silver Bow County, MT': '093',
'Stillwater County, MT': '095',
'Sweet Grass County, MT': '097',
'Teton County, MT': '099',
'Toole County, MT': '101',
'Treasure County, MT': '103',
'Valley County, MT': '105',
'Wheatland County, MT': '107',
'Wibaux County, MT': '109',
'Yellowstone County, MT': '111'},
31: { '--All--': '%',
'Adams County, NE': '001',
'Antelope County, NE': '003',
'Arthur County, NE': '005',
'Banner County, NE': '007',
'Blaine County, NE': '009',
'Boone County, NE': '011',
'Box Butte County, NE': '013',
'Boyd County, NE': '015',
'Brown County, NE': '017',
'Buffalo County, NE': '019',
'Burt County, NE': '021',
'Butler County, NE': '023',
'Cass County, NE': '025',
'Cedar County, NE': '027',
'Chase County, NE': '029',
'Cherry County, NE': '031',
'Cheyenne County, NE': '033',
'Clay County, NE': '035',
'Colfax County, NE': '037',
'Cuming County, NE': '039',
'Custer County, NE': '041',
'Dakota County, NE': '043',
'Dawes County, NE': '045',
'Dawson County, NE': '047',
'Deuel County, NE': '049',
'Dixon County, NE': '051',
'Dodge County, NE': '053',
'Douglas County, NE': '055',
'Dundy County, NE': '057',
'Fillmore County, NE': '059',
'Franklin County, NE': '061',
'Frontier County, NE': '063',
'Furnas County, NE': '065',
'Gage County, NE': '067',
'Garden County, NE': '069',
'Garfield County, NE': '071',
'Gosper County, NE': '073',
'Grant County, NE': '075',
'Greeley County, NE': '077',
'Hall County, NE': '079',
'Hamilton County, NE': '081',
'Harlan County, NE': '083',
'Hayes County, NE': '085',
'Hitchcock County, NE': '087',
'Holt County, NE': '089',
'Hooker County, NE': '091',
'Howard County, NE': '093',
'Jefferson County, NE': '095',
'Johnson County, NE': '097',
'Kearney County, NE': '099',
'Keith County, NE': '101',
'Keya Paha County, NE': '103',
'Kimball County, NE': '105',
'Knox County, NE': '107',
'Lancaster County, NE': '109',
'Lincoln County, NE': '111',
'Logan County, NE': '113',
'Loup County, NE': '115',
'Madison County, NE': '119',
'McPherson County, NE': '117',
'Merrick County, NE': '121',
'Morrill County, NE': '123',
'Nance County, NE': '125',
'Nemaha County, NE': '127',
'Nuckolls County, NE': '129',
'Otoe County, NE': '131',
'Pawnee County, NE': '133',
'Perkins County, NE': '135',
'Phelps County, NE': '137',
'Pierce County, NE': '139',
'Platte County, NE': '141',
'Polk County, NE': '143',
'Red Willow County, NE': '145',
'Richardson County, NE': '147',
'Rock County, NE': '149',
'Saline County, NE': '151',
'Sarpy County, NE': '153',
'Saunders County, NE': '155',
'Scotts Bluff County, NE': '157',
'Seward County, NE': '159',
'Sheridan County, NE': '161',
'Sherman County, NE': '163',
'Sioux County, NE': '165',
'Stanton County, NE': '167',
'Thayer County, NE': '169',
'Thomas County, NE': '171',
'Thurston County, NE': '173',
'Valley County, NE': '175',
'Washington County, NE': '177',
'Wayne County, NE': '179',
'Webster County, NE': '181',
'Wheeler County, NE': '183',
'York County, NE': '185'},
32: { '--All--': '%',
'Carson City, NV': '510',
'Churchill County, NV': '001',
'Clark County, NV': '003',
'Douglas County, NV': '005',
'Elko County, NV': '007',
'Esmeralda County, NV': '009',
'Eureka County, NV': '011',
'Humboldt County, NV': '013',
'Lander County, NV': '015',
'Lincoln County, NV': '017',
'Lyon County, NV': '019',
'Mineral County, NV': '021',
'Nye County, NV': '023',
'Pershing County, NV': '027',
'Storey County, NV': '029',
'Washoe County, NV': '031',
'White Pine County, NV': '033'},
33: { '--All--': '%',
'Belknap County, NH': '001',
'Carroll County, NH': '003',
'Cheshire County, NH': '005',
'Coos County, NH': '007',
'Grafton County, NH': '009',
'Hillsborough County, NH': '011',
'Merrimack County, NH': '013',
'Rockingham County, NH': '015',
'Strafford County, NH': '017',
'Sullivan County, NH': '019'},
34: { '--All--': '%',
'Atlantic County, NJ': '001',
'Bergen County, NJ': '003',
'Burlington County, NJ': '005',
'Camden County, NJ': '007',
'Cape May County, NJ': '009',
'Cumberland County, NJ': '011',
'Essex County, NJ': '013',
'Gloucester County, NJ': '015',
'Hudson County, NJ': '017',
'Hunterdon County, NJ': '019',
'Mercer County, NJ': '021',
'Middlesex County, NJ': '023',
'Monmouth County, NJ': '025',
'Morris County, NJ': '027',
'Ocean County, NJ': '029',
'Passaic County, NJ': '031',
'Salem County, NJ': '033',
'Somerset County, NJ': '035',
'Sussex County, NJ': '037',
'Union County, NJ': '039',
'Warren County, NJ': '041'},
35: { '--All--': '%',
'Bernalillo County, NM': '001',
'Catron County, NM': '003',
'Chaves County, NM': '005',
'Cibola County, NM': '006',
'Colfax County, NM': '007',
'Curry County, NM': '009',
'DeBaca County, NM': '011',
'Dona Ana County, NM': '013',
'Eddy County, NM': '015',
'Grant County, NM': '017',
'Guadalupe County, NM': '019',
'Harding County, NM': '021',
'Hidalgo County, NM': '023',
'Lea County, NM': '025',
'Lincoln County, NM': '027',
'Los Alamos County, NM': '028',
'Luna County, NM': '029',
'McKinley County, NM': '031',
'Mora County, NM': '033',
'Otero County, NM': '035',
'Quay County, NM': '037',
'Rio Arriba County, NM': '039',
'Roosevelt County, NM': '041',
'San Juan County, NM': '045',
'San Miguel County, NM': '047',
'Sandoval County, NM': '043',
'Santa Fe County, NM': '049',
'Sierra County, NM': '051',
'Socorro County, NM': '053',
'Taos County, NM': '055',
'Torrance County, NM': '057',
'Union County, NM': '059',
'Valencia County, NM': '061'},
36: { '--All--': '%',
'Albany County, NY': '001',
'Allegany County, NY': '003',
'Bronx County, NY': '005',
'Broome County, NY': '007',
'Cattaraugus County, NY': '009',
'Cayuga County, NY': '011',
'Chautauqua County, NY': '013',
'Chemung County, NY': '015',
'Chenango County, NY': '017',
'Clinton County, NY': '019',
'Columbia County, NY': '021',
'Cortland County, NY': '023',
'Delaware County, NY': '025',
'Dutchess County, NY': '027',
'Erie County, NY': '029',
'Essex County, NY': '031',
'Franklin County, NY': '033',
'Fulton County, NY': '035',
'Genesee County, NY': '037',
'Greene County, NY': '039',
'Hamilton County, NY': '041',
'Herkimer County, NY': '043',
'Jefferson County, NY': '045',
'Kings County, NY': '047',
'Lewis County, NY': '049',
'Livingston County, NY': '051',
'Madison County, NY': '053',
'Monroe County, NY': '055',
'Montgomery County, NY': '057',
'Nassau County, NY': '059',
'New York County, NY': '061',
'Niagara County, NY': '063',
'Oneida County, NY': '065',
'Onondaga County, NY': '067',
'Ontario County, NY': '069',
'Orange County, NY': '071',
'Orleans County, NY': '073',
'Oswego County, NY': '075',
'Otsego County, NY': '077',
'Putnam County, NY': '079',
'Queens County, NY': '081',
'Rensselaer County, NY': '083',
'Richmond County, NY': '085',
'Rockland County, NY': '087',
'Saratoga County, NY': '091',
'Schenectady County, NY': '093',
'Schoharie County, NY': '095',
'Schuyler County, NY': '097',
'Seneca County, NY': '099',
'St. Lawrence County, NY': '089',
'Steuben County, NY': '101',
'Suffolk County, NY': '103',
'Sullivan County, NY': '105',
'Tioga County, NY': '107',
'Tompkins County, NY': '109',
'Ulster County, NY': '111',
'Warren County, NY': '113',
'Washington County, NY': '115',
'Wayne County, NY': '117',
'Westchester County, NY': '119',
'Wyoming County, NY': '121',
'Yates County, NY': '123'},
37: { '--All--': '%',
'Alamance County, NC': '001',
'Alexander County, NC': '003',
'Alleghany County, NC': '005',
'Anson County, NC': '007',
'Ashe County, NC': '009',
'Avery County, NC': '011',
'Beaufort County, NC': '013',
'Bertie County, NC': '015',
'Bladen County, NC': '017',
'Brunswick County, NC': '019',
'Buncombe County, NC': '021',
'Burke County, NC': '023',
'Cabarrus County, NC': '025',
'Caldwell County, NC': '027',
'Camden County, NC': '029',
'Carteret County, NC': '031',
'Caswell County, NC': '033',
'Catawba County, NC': '035',
'Chatham County, NC': '037',
'Cherokee County, NC': '039',
'Chowan County, NC': '041',
'Clay County, NC': '043',
'Cleveland County, NC': '045',
'Columbus County, NC': '047',
'Craven County, NC': '049',
'Cumberland County, NC': '051',
'Currituck County, NC': '053',
'Dare County, NC': '055',
'Davidson County, NC': '057',
'Davie County, NC': '059',
'Duplin County, NC': '061',
'Durham County, NC': '063',
'Edgecombe County, NC': '065',
'Forsyth County, NC': '067',
'Franklin County, NC': '069',
'Gaston County, NC': '071',
'Gates County, NC': '073',
'Graham County, NC': '075',
'Granville County, NC': '077',
'Greene County, NC': '079',
'Guilford County, NC': '081',
'Halifax County, NC': '083',
'Harnett County, NC': '085',
'Haywood County, NC': '087',
'Henderson County, NC': '089',
'Hertford County, NC': '091',
'Hoke County, NC': '093',
'Hyde County, NC': '095',
'Iredell County, NC': '097',
'Jackson County, NC': '099',
'Johnston County, NC': '101',
'Jones County, NC': '103',
'Lee County, NC': '105',
'Lenoir County, NC': '107',
'Lincoln County, NC': '109',
'Macon County, NC': '113',
'Madison County, NC': '115',
'Martin County, NC': '117',
'McDowell County, NC': '111',
'Mecklenburg County, NC': '119',
'Mitchell County, NC': '121',
'Montgomery County, NC': '123',
'Moore County, NC': '125',
'Nash County, NC': '127',
'New Hanover County, NC': '129',
'Northampton County, NC': '131',
'Onslow County, NC': '133',
'Orange County, NC': '135',
'Pamlico County, NC': '137',
'Pasquotank County, NC': '139',
'Pender County, NC': '141',
'Perquimans County, NC': '143',
'Person County, NC': '145',
'Pitt County, NC': '147',
'Polk County, NC': '149',
'Randolph County, NC': '151',
'Richmond County, NC': '153',
'Robeson County, NC': '155',
'Rockingham County, NC': '157',
'Rowan County, NC': '159',
'Rutherford County, NC': '161',
'Sampson County, NC': '163',
'Scotland County, NC': '165',
'Stanly County, NC': '167',
'Stokes County, NC': '169',
'Surry County, NC': '171',
'Swain County, NC': '173',
'Transylvania County, NC': '175',
'Tyrrell County, NC': '177',
'Union County, NC': '179',
'Vance County, NC': '181',
'Wake County, NC': '183',
'Warren County, NC': '185',
'Washington County, NC': '187',
'Watauga County, NC': '189',
'Wayne County, NC': '191',
'Wilkes County, NC': '193',
'Wilson County, NC': '195',
'Yadkin County, NC': '197',
'Yancey County, NC': '199'},
38: { '--All--': '%',
'Adams County, ND': '001',
'Barnes County, ND': '003',
'Benson County, ND': '005',
'Billings County, ND': '007',
'Bottineau County, ND': '009',
'Bowman County, ND': '011',
'Burke County, ND': '013',
'Burleigh County, ND': '015',
'Cass County, ND': '017',
'Cavalier County, ND': '019',
'Dickey County, ND': '021',
'Divide County, ND': '023',
'Dunn County, ND': '025',
'Eddy County, ND': '027',
'Emmons County, ND': '029',
'Foster County, ND': '031',
'Golden Valley County, ND': '033',
'Grand Forks County, ND': '035',
'Grant County, ND': '037',
'Griggs County, ND': '039',
'Hettinger County, ND': '041',
'Kidder County, ND': '043',
'LaMoure County, ND': '045',
'Logan County, ND': '047',
'McHenry County, ND': '049',
'McIntosh County, ND': '051',
'McKenzie County, ND': '053',
'McLean County, ND': '055',
'Mercer County, ND': '057',
'Morton County, ND': '059',
'Mountrail County, ND': '061',
'Nelson County, ND': '063',
'Oliver County, ND': '065',
'Pembina County, ND': '067',
'Pierce County, ND': '069',
'Ramsey County, ND': '071',
'Ransom County, ND': '073',
'Renville County, ND': '075',
'Richland County, ND': '077',
'Rolette County, ND': '079',
'Sargent County, ND': '081',
'Sheridan County, ND': '083',
'Sioux County, ND': '085',
'Slope County, ND': '087',
'Stark County, ND': '089',
'Steele County, ND': '091',
'Stutsman County, ND': '093',
'Towner County, ND': '095',
'Traill County, ND': '097',
'Walsh County, ND': '099',
'Ward County, ND': '101',
'Wells County, ND': '103',
'Williams County, ND': '105'},
39: { '--All--': '%',
'Adams County, OH': '001',
'Allen County, OH': '003',
'Ashland County, OH': '005',
'Ashtabula County, OH': '007',
'Athens County, OH': '009',
'Auglaize County, OH': '011',
'Belmont County, OH': '013',
'Brown County, OH': '015',
'Butler County, OH': '017',
'Carroll County, OH': '019',
'Champaign County, OH': '021',
'Clark County, OH': '023',
'Clermont County, OH': '025',
'Clinton County, OH': '027',
'Columbiana County, OH': '029',
'Coshocton County, OH': '031',
'Crawford County, OH': '033',
'Cuyahoga County, OH': '035',
'Darke County, OH': '037',
'Defiance County, OH': '039',
'Delaware County, OH': '041',
'Erie County, OH': '043',
'Fairfield County, OH': '045',
'Fayette County, OH': '047',
'Franklin County, OH': '049',
'Fulton County, OH': '051',
'Gallia County, OH': '053',
'Geauga County, OH': '055',
'Greene County, OH': '057',
'Guernsey County, OH': '059',
'Hamilton County, OH': '061',
'Hancock County, OH': '063',
'Hardin County, OH': '065',
'Harrison County, OH': '067',
'Henry County, OH': '069',
'Highland County, OH': '071',
'Hocking County, OH': '073',
'Holmes County, OH': '075',
'Huron County, OH': '077',
'Jackson County, OH': '079',
'Jefferson County, OH': '081',
'Knox County, OH': '083',
'Lake County, OH': '085',
'Lawrence County, OH': '087',
'Licking County, OH': '089',
'Logan County, OH': '091',
'Lorain County, OH': '093',
'Lucas County, OH': '095',
'Madison County, OH': '097',
'Mahoning County, OH': '099',
'Marion County, OH': '101',
'Medina County, OH': '103',
'Meigs County, OH': '105',
'Mercer County, OH': '107',
'Miami County, OH': '109',
'Monroe County, OH': '111',
'Montgomery County, OH': '113',
'Morgan County, OH': '115',
'Morrow County, OH': '117',
'Muskingum County, OH': '119',
'Noble County, OH': '121',
'Ottawa County, OH': '123',
'Paulding County, OH': '125',
'Perry County, OH': '127',
'Pickaway County, OH': '129',
'Pike County, OH': '131',
'Portage County, OH': '133',
'Preble County, OH': '135',
'Putnam County, OH': '137',
'Richland County, OH': '139',
'Ross County, OH': '141',
'Sandusky County, OH': '143',
'Scioto County, OH': '145',
'Seneca County, OH': '147',
'Shelby County, OH': '149',
'Stark County, OH': '151',
'Summit County, OH': '153',
'Trumbull County, OH': '155',
'Tuscarawas County, OH': '157',
'Union County, OH': '159',
'Van Wert County, OH': '161',
'Vinton County, OH': '163',
'Warren County, OH': '165',
'Washington County, OH': '167',
'Wayne County, OH': '169',
'Williams County, OH': '171',
'Wood County, OH': '173',
'Wyandot County, OH': '175'},
40: { '--All--': '%',
'Adair County, OK': '001',
'Alfalfa County, OK': '003',
'Atoka County, OK': '005',
'Beaver County, OK': '007',
'Beckham County, OK': '009',
'Blaine County, OK': '011',
'Bryan County, OK': '013',
'Caddo County, OK': '015',
'Canadian County, OK': '017',
'Carter County, OK': '019',
'Cherokee County, OK': '021',
'Choctaw County, OK': '023',
'Cimarron County, OK': '025',
'Cleveland County, OK': '027',
'Coal County, OK': '029',
'Comanche County, OK': '031',
'Cotton County, OK': '033',
'Craig County, OK': '035',
'Creek County, OK': '037',
'Custer County, OK': '039',
'Delaware County, OK': '041',
'Dewey County, OK': '043',
'Ellis County, OK': '045',
'Garfield County, OK': '047',
'Garvin County, OK': '049',
'Grady County, OK': '051',
'Grant County, OK': '053',
'Greer County, OK': '055',
'Harmon County, OK': '057',
'Harper County, OK': '059',
'Haskell County, OK': '061',
'Hughes County, OK': '063',
'Jackson County, OK': '065',
'Jefferson County, OK': '067',
'Johnston County, OK': '069',
'Kay County, OK': '071',
'Kingfisher County, OK': '073',
'Kiowa County, OK': '075',
'Latimer County, OK': '077',
'Le Flore County, OK': '079',
'Lincoln County, OK': '081',
'Logan County, OK': '083',
'Love County, OK': '085',
'Major County, OK': '093',
'Marshall County, OK': '095',
'Mayes County, OK': '097',
'McClain County, OK': '087',
'McCurtain County, OK': '089',
'McIntosh County, OK': '091',
'Murray County, OK': '099',
'Muskogee County, OK': '101',
'Noble County, OK': '103',
'Nowata County, OK': '105',
'Okfuskee County, OK': '107',
'Oklahoma County, OK': '109',
'Okmulgee County, OK': '111',
'Osage County, OK': '113',
'Ottawa County, OK': '115',
'Pawnee County, OK': '117',
'Payne County, OK': '119',
'Pittsburg County, OK': '121',
'Pontotoc County, OK': '123',
'Pottawatomie County, OK': '125',
'Pushmataha County, OK': '127',
'Roger Mills County, OK': '129',
'Rogers County, OK': '131',
'Seminole County, OK': '133',
'Sequoyah County, OK': '135',
'Stephens County, OK': '137',
'Texas County, OK': '139',
'Tillman County, OK': '141',
'Tulsa County, OK': '143',
'Wagoner County, OK': '145',
'Washington County, OK': '147',
'Washita County, OK': '149',
'Woods County, OK': '151',
'Woodward County, OK': '153'},
41: { '--All--': '%',
'Baker County, OR': '001',
'Benton County, OR': '003',
'Clackamas County, OR': '005',
'Clatsop County, OR': '007',
'Columbia County, OR': '009',
'Coos County, OR': '011',
'Crook County, OR': '013',
'Curry County, OR': '015',
'Deschutes County, OR': '017',
'Douglas County, OR': '019',
'Gilliam County, OR': '021',
'Grant County, OR': '023',
'Harney County, OR': '025',
'Hood River County, OR': '027',
'Jackson County, OR': '029',
'Jefferson County, OR': '031',
'Josephine County, OR': '033',
'Klamath County, OR': '035',
'Lake County, OR': '037',
'Lane County, OR': '039',
'Lincoln County, OR': '041',
'Linn County, OR': '043',
'Malheur County, OR': '045',
'Marion County, OR': '047',
'Morrow County, OR': '049',
'Multnomah County, OR': '051',
'Polk County, OR': '053',
'Sherman County, OR': '055',
'Tillamook County, OR': '057',
'Umatilla County, OR': '059',
'Union County, OR': '061',
'Wallowa County, OR': '063',
'Wasco County, OR': '065',
'Washington County, OR': '067',
'Wheeler County, OR': '069',
'Yamhill County, OR': '071'},
42: { '--All--': '%',
'Adams County, PA': '001',
'Allegheny County, PA': '003',
'Armstrong County, PA': '005',
'Beaver County, PA': '007',
'Bedford County, PA': '009',
'Berks County, PA': '011',
'Blair County, PA': '013',
'Bradford County, PA': '015',
'Bucks County, PA': '017',
'Butler County, PA': '019',
'Cambria County, PA': '021',
'Cameron County, PA': '023',
'Carbon County, PA': '025',
'Centre County, PA': '027',
'Chester County, PA': '029',
'Clarion County, PA': '031',
'Clearfield County, PA': '033',
'Clinton County, PA': '035',
'Columbia County, PA': '037',
'Crawford County, PA': '039',
'Cumberland County, PA': '041',
'Dauphin County, PA': '043',
'Delaware County, PA': '045',
'Elk County, PA': '047',
'Erie County, PA': '049',
'Fayette County, PA': '051',
'Forest County, PA': '053',
'Franklin County, PA': '055',
'Fulton County, PA': '057',
'Greene County, PA': '059',
'Huntingdon County, PA': '061',
'Indiana County, PA': '063',
'Jefferson County, PA': '065',
'Juniata County, PA': '067',
'Lackawanna County, PA': '069',
'Lancaster County, PA': '071',
'Lawrence County, PA': '073',
'Lebanon County, PA': '075',
'Lehigh County, PA': '077',
'Luzerne County, PA': '079',
'Lycoming County, PA': '081',
'McKean County, PA': '083',
'Mercer County, PA': '085',
'Mifflin County, PA': '087',
'Monroe County, PA': '089',
'Montgomery County, PA': '091',
'Montour County, PA': '093',
'Northampton County, PA': '095',
'Northumberland County, PA': '097',
'Perry County, PA': '099',
'Philadelphia County/city, PA': '101',
'Pike County, PA': '103',
'Potter County, PA': '105',
'Schuylkill County, PA': '107',
'Snyder County, PA': '109',
'Somerset County, PA': '111',
'Sullivan County, PA': '113',
'Susquehanna County, PA': '115',
'Tioga County, PA': '117',
'Union County, PA': '119',
'Venango County, PA': '121',
'Warren County, PA': '123',
'Washington County, PA': '125',
'Wayne County, PA': '127',
'Westmoreland County, PA': '129',
'Wyoming County, PA': '131',
'York County, PA': '133'},
44: { '--All--': '%',
'Bristol County, RI': '001',
'Kent County, RI': '003',
'Newport County, RI': '005',
'Providence County, RI': '007',
'Washington County, RI': '009'},
45: { '--All--': '%',
'Abbeville County, SC': '001',
'Aiken County, SC': '003',
'Allendale County, SC': '005',
'Anderson County, SC': '007',
'Bamberg County, SC': '009',
'Barnwell County, SC': '011',
'Beaufort County, SC': '013',
'Berkeley County, SC': '015',
'Calhoun County, SC': '017',
'Charleston County, SC': '019',
'Cherokee County, SC': '021',
'Chester County, SC': '023',
'Chesterfield County, SC': '025',
'Clarendon County, SC': '027',
'Colleton County, SC': '029',
'Darlington County, SC': '031',
'Dillon County, SC': '033',
'Dorchester County, SC': '035',
'Edgefield County, SC': '037',
'Fairfield County, SC': '039',
'Florence County, SC': '041',
'Georgetown County, SC': '043',
'Greenville County, SC': '045',
'Greenwood County, SC': '047',
'Hampton County, SC': '049',
'Horry County, SC': '051',
'Jasper County, SC': '053',
'Kershaw County, SC': '055',
'Lancaster County, SC': '057',
'Laurens County, SC': '059',
'Lee County, SC': '061',
'Lexington County, SC': '063',
'Marion County, SC': '067',
'Marlboro County, SC': '069',
'McCormick County, SC': '065',
'Newberry County, SC': '071',
'Oconee County, SC': '073',
'Orangeburg County, SC': '075',
'Pickens County, SC': '077',
'Richland County, SC': '079',
'Saluda County, SC': '081',
'Spartanburg County, SC': '083',
'Sumter County, SC': '085',
'Union County, SC': '087',
'Williamsburg County, SC': '089',
'York County, SC': '091'},
46: { '--All--': '%',
'Aurora County, SD': '003',
'Beadle County, SD': '005',
'Bennett County, SD': '007',
'Bon Homme County, SD': '009',
'Brookings County, SD': '011',
'Brown County, SD': '013',
'Brule County, SD': '015',
'Buffalo County, SD': '017',
'Butte County, SD': '019',
'Campbell County, SD': '021',
'Charles Mix County, SD': '023',
'Clark County, SD': '025',
'Clay County, SD': '027',
'Codington County, SD': '029',
'Corson County, SD': '031',
'Custer County, SD': '033',
'Davison County, SD': '035',
'Day County, SD': '037',
'Deuel County, SD': '039',
'Dewey County, SD': '041',
'Douglas County, SD': '043',
'Edmunds County, SD': '045',
'Fall River County, SD': '047',
'Faulk County, SD': '049',
'Grant County, SD': '051',
'Gregory County, SD': '053',
'Haakon County, SD': '055',
'Hamlin County, SD': '057',
'Hand County, SD': '059',
'Hanson County, SD': '061',
'Harding County, SD': '063',
'Hughes County, SD': '065',
'Hutchinson County, SD': '067',
'Hyde County, SD': '069',
'Jackson County, SD': '071',
'Jerauld County, SD': '073',
'Jones County, SD': '075',
'Kingsbury County, SD': '077',
'Lake County, SD': '079',
'Lawrence County, SD': '081',
'Lincoln County, SD': '083',
'Lyman County, SD': '085',
'Marshall County, SD': '091',
'McCook County, SD': '087',
'McPherson County, SD': '089',
'Meade County, SD': '093',
'Mellette County, SD': '095',
'Miner County, SD': '097',
'Minnehaha County, SD': '099',
'Moody County, SD': '101',
'Pennington County, SD': '103',
'Perkins County, SD': '105',
'Potter County, SD': '107',
'Roberts County, SD': '109',
'Sanborn County, SD': '111',
'Shannon County, SD': '113',
'Spink County, SD': '115',
'Stanley County, SD': '117',
'Sully County, SD': '119',
'Todd County, SD': '121',
'Tripp County, SD': '123',
'Turner County, SD': '125',
'Union County, SD': '127',
'Walworth County, SD': '129',
'Yankton County, SD': '135',
'Ziebach County, SD': '137'},
47: { '--All--': '%',
'Anderson County, TN': '001',
'Bedford County, TN': '003',
'Benton County, TN': '005',
'Bledsoe County, TN': '007',
'Blount County, TN': '009',
'Bradley County, TN': '011',
'Campbell County, TN': '013',
'Cannon County, TN': '015',
'Carroll County, TN': '017',
'Carter County, TN': '019',
'Cheatham County, TN': '021',
'Chester County, TN': '023',
'Claiborne County, TN': '025',
'Clay County, TN': '027',
'Cocke County, TN': '029',
'Coffee County, TN': '031',
'Crockett County, TN': '033',
'Cumberland County, TN': '035',
'Davidson County, TN': '037',
'DeKalb County, TN': '041',
'Decatur County, TN': '039',
'Dickson County, TN': '043',
'Dyer County, TN': '045',
'Fayette County, TN': '047',
'Fentress County, TN': '049',
'Franklin County, TN': '051',
'Gibson County, TN': '053',
'Giles County, TN': '055',
'Grainger County, TN': '057',
'Greene County, TN': '059',
'Grundy County, TN': '061',
'Hamblen County, TN': '063',
'Hamilton County, TN': '065',
'Hancock County, TN': '067',
'Hardeman County, TN': '069',
'Hardin County, TN': '071',
'Hawkins County, TN': '073',
'Haywood County, TN': '075',
'Henderson County, TN': '077',
'Henry County, TN': '079',
'Hickman County, TN': '081',
'Houston County, TN': '083',
'Humphreys County, TN': '085',
'Jackson County, TN': '087',
'Jefferson County, TN': '089',
'Johnson County, TN': '091',
'Knox County, TN': '093',
'Lake County, TN': '095',
'Lauderdale County, TN': '097',
'Lawrence County, TN': '099',
'Lewis County, TN': '101',
'Lincoln County, TN': '103',
'Loudon County, TN': '105',
'Macon County, TN': '111',
'Madison County, TN': '113',
'Marion County, TN': '115',
'Marshall County, TN': '117',
'Maury County, TN': '119',
'McMinn County, TN': '107',
'McNairy County, TN': '109',
'Meigs County, TN': '121',
'Monroe County, TN': '123',
'Montgomery County, TN': '125',
'Moore County, TN': '127',
'Morgan County, TN': '129',
'Obion County, TN': '131',
'Overton County, TN': '133',
'Perry County, TN': '135',
'Pickett County, TN': '137',
'Polk County, TN': '139',
'Putnam County, TN': '141',
'Rhea County, TN': '143',
'Roane County, TN': '145',
'Robertson County, TN': '147',
'Rutherford County, TN': '149',
'Scott County, TN': '151',
'Sequatchie County, TN': '153',
'Sevier County, TN': '155',
'Shelby County, TN': '157',
'Smith County, TN': '159',
'Stewart County, TN': '161',
'Sullivan County, TN': '163',
'Sumner County, TN': '165',
'Tipton County, TN': '167',
'Trousdale County, TN': '169',
'Unicoi County, TN': '171',
'Union County, TN': '173',
'Van Buren County, TN': '175',
'Warren County, TN': '177',
'Washington County, TN': '179',
'Wayne County, TN': '181',
'Weakley County, TN': '183',
'White County, TN': '185',
'Williamson County, TN': '187',
'Wilson County, TN': '189'},
48: { '--All--': '%',
'Anderson County, TX': '001',
'Andrews County, TX': '003',
'Angelina County, TX': '005',
'Aransas County, TX': '007',
'Archer County, TX': '009',
'Armstrong County, TX': '011',
'Atascosa County, TX': '013',
'Austin County, TX': '015',
'Bailey County, TX': '017',
'Bandera County, TX': '019',
'Bastrop County, TX': '021',
'Baylor County, TX': '023',
'Bee County, TX': '025',
'Bell County, TX': '027',
'Bexar County, TX': '029',
'Blanco County, TX': '031',
'Borden County, TX': '033',
'Bosque County, TX': '035',
'Bowie County, TX': '037',
'Brazoria County, TX': '039',
'Brazos County, TX': '041',
'Brewster County, TX': '043',
'Briscoe County, TX': '045',
'Brooks County, TX': '047',
'Brown County, TX': '049',
'Burleson County, TX': '051',
'Burnet County, TX': '053',
'Caldwell County, TX': '055',
'Calhoun County, TX': '057',
'Callahan County, TX': '059',
'Cameron County, TX': '061',
'Camp County, TX': '063',
'Carson County, TX': '065',
'Cass County, TX': '067',
'Castro County, TX': '069',
'Chambers County, TX': '071',
'Cherokee County, TX': '073',
'Childress County, TX': '075',
'Clay County, TX': '077',
'Cochran County, TX': '079',
'Coke County, TX': '081',
'Coleman County, TX': '083',
'Collin County, TX': '085',
'Collingsworth County, TX': '087',
'Colorado County, TX': '089',
'Comal County, TX': '091',
'Comanche County, TX': '093',
'Concho County, TX': '095',
'Cooke County, TX': '097',
'Coryell County, TX': '099',
'Cottle County, TX': '101',
'Crane County, TX': '103',
'Crockett County, TX': '105',
'Crosby County, TX': '107',
'Culberson County, TX': '109',
'Dallam County, TX': '111',
'Dallas County, TX': '113',
'Dawson County, TX': '115',
'DeWitt County, TX': '123',
'Deaf Smith County, TX': '117',
'Delta County, TX': '119',
'Denton County, TX': '121',
'Dickens County, TX': '125',
'Dimmit County, TX': '127',
'Donley County, TX': '129',
'Duval County, TX': '131',
'Eastland County, TX': '133',
'Ector County, TX': '135',
'Edwards County, TX': '137',
'El Paso County, TX': '141',
'Ellis County, TX': '139',
'Erath County, TX': '143',
'Falls County, TX': '145',
'Fannin County, TX': '147',
'Fayette County, TX': '149',
'Fisher County, TX': '151',
'Floyd County, TX': '153',
'Foard County, TX': '155',
'Fort Bend County, TX': '157',
'Franklin County, TX': '159',
'Freestone County, TX': '161',
'Frio County, TX': '163',
'Gaines County, TX': '165',
'Galveston County, TX': '167',
'Garza County, TX': '169',
'Gillespie County, TX': '171',
'Glasscock County, TX': '173',
'Goliad County, TX': '175',
'Gonzales County, TX': '177',
'Gray County, TX': '179',
'Grayson County, TX': '181',
'Gregg County, TX': '183',
'Grimes County, TX': '185',
'Guadalupe County, TX': '187',
'Hale County, TX': '189',
'Hall County, TX': '191',
'Hamilton County, TX': '193',
'Hansford County, TX': '195',
'Hardeman County, TX': '197',
'Hardin County, TX': '199',
'Harris County, TX': '201',
'Harrison County, TX': '203',
'Hartley County, TX': '205',
'Haskell County, TX': '207',
'Hays County, TX': '209',
'Hemphill County, TX': '211',
'Henderson County, TX': '213',
'Hidalgo County, TX': '215',
'Hill County, TX': '217',
'Hockley County, TX': '219',
'Hood County, TX': '221',
'Hopkins County, TX': '223',
'Houston County, TX': '225',
'Howard County, TX': '227',
'Hudspeth County, TX': '229',
'Hunt County, TX': '231',
'Hutchinson County, TX': '233',
'Irion County, TX': '235',
'Jack County, TX': '237',
'Jackson County, TX': '239',
'Jasper County, TX': '241',
'Jeff Davis County, TX': '243',
'Jefferson County, TX': '245',
'Jim Hogg County, TX': '247',
'Jim Wells County, TX': '249',
'Johnson County, TX': '251',
'Jones County, TX': '253',
'Karnes County, TX': '255',
'Kaufman County, TX': '257',
'Kendall County, TX': '259',
'Kenedy County, TX': '261',
'Kent County, TX': '263',
'Kerr County, TX': '265',
'Kimble County, TX': '267',
'King County, TX': '269',
'Kinney County, TX': '271',
'Kleberg County, TX': '273',
'Knox County, TX': '275',
'La Salle County, TX': '283',
'Lamar County, TX': '277',
'Lamb County, TX': '279',
'Lampasas County, TX': '281',
'Lavaca County, TX': '285',
'Lee County, TX': '287',
'Leon County, TX': '289',
'Liberty County, TX': '291',
'Limestone County, TX': '293',
'Lipscomb County, TX': '295',
'Live Oak County, TX': '297',
'Llano County, TX': '299',
'Loving County, TX': '301',
'Lubbock County, TX': '303',
'Lynn County, TX': '305',
'Madison County, TX': '313',
'Marion County, TX': '315',
'Martin County, TX': '317',
'Mason County, TX': '319',
'Matagorda County, TX': '321',
'Maverick County, TX': '323',
'McCulloch County, TX': '307',
'McLennan County, TX': '309',
'McMullen County, TX': '311',
'Medina County, TX': '325',
'Menard County, TX': '327',
'Midland County, TX': '329',
'Milam County, TX': '331',
'Mills County, TX': '333',
'Mitchell County, TX': '335',
'Montague County, TX': '337',
'Montgomery County, TX': '339',
'Moore County, TX': '341',
'Morris County, TX': '343',
'Motley County, TX': '345',
'Nacogdoches County, TX': '347',
'Navarro County, TX': '349',
'Newton County, TX': '351',
'Nolan County, TX': '353',
'Nueces County, TX': '355',
'Ochiltree County, TX': '357',
'Oldham County, TX': '359',
'Orange County, TX': '361',
'Palo Pinto County, TX': '363',
'Panola County, TX': '365',
'Parker County, TX': '367',
'Parmer County, TX': '369',
'Pecos County, TX': '371',
'Polk County, TX': '373',
'Potter County, TX': '375',
'Presidio County, TX': '377',
'Rains County, TX': '379',
'Randall County, TX': '381',
'Reagan County, TX': '383',
'Real County, TX': '385',
'Red River County, TX': '387',
'Reeves County, TX': '389',
'Refugio County, TX': '391',
'Roberts County, TX': '393',
'Robertson County, TX': '395',
'Rockwall County, TX': '397',
'Runnels County, TX': '399',
'Rusk County, TX': '401',
'Sabine County, TX': '403',
'San Augustine County, TX': '405',
'San Jacinto County, TX': '407',
'San Patricio County, TX': '409',
'San Saba County, TX': '411',
'Schleicher County, TX': '413',
'Scurry County, TX': '415',
'Shackelford County, TX': '417',
'Shelby County, TX': '419',
'Sherman County, TX': '421',
'Smith County, TX': '423',
'Somervell County, TX': '425',
'Starr County, TX': '427',
'Stephens County, TX': '429',
'Sterling County, TX': '431',
'Stonewall County, TX': '433',
'Sutton County, TX': '435',
'Swisher County, TX': '437',
'Tarrant County, TX': '439',
'Taylor County, TX': '441',
'Terrell County, TX': '443',
'Terry County, TX': '445',
'Throckmorton County, TX': '447',
'Titus County, TX': '449',
'Tom Green County, TX': '451',
'Travis County, TX': '453',
'Trinity County, TX': '455',
'Tyler County, TX': '457',
'Upshur County, TX': '459',
'Upton County, TX': '461',
'Uvalde County, TX': '463',
'Val Verde County, TX': '465',
'Van Zandt County, TX': '467',
'Victoria County, TX': '469',
'Walker County, TX': '471',
'Waller County, TX': '473',
'Ward County, TX': '475',
'Washington County, TX': '477',
'Webb County, TX': '479',
'Wharton County, TX': '481',
'Wheeler County, TX': '483',
'Wichita County, TX': '485',
'Wilbarger County, TX': '487',
'Willacy County, TX': '489',
'Williamson County, TX': '491',
'Wilson County, TX': '493',
'Winkler County, TX': '495',
'Wise County, TX': '497',
'Wood County, TX': '499',
'Yoakum County, TX': '501',
'Young County, TX': '503',
'Zapata County, TX': '505',
'Zavala County, TX': '507'},
49: { '--All--': '%',
'Beaver County, UT': '001',
'Box Elder County, UT': '003',
'Cache County, UT': '005',
'Carbon County, UT': '007',
'Daggett County, UT': '009',
'Davis County, UT': '011',
'Duchesne County, UT': '013',
'Emery County, UT': '015',
'Garfield County, UT': '017',
'Grand County, UT': '019',
'Iron County, UT': '021',
'Juab County, UT': '023',
'Kane County, UT': '025',
'Millard County, UT': '027',
'Morgan County, UT': '029',
'Piute County, UT': '031',
'Rich County, UT': '033',
'Salt Lake County, UT': '035',
'San Juan County, UT': '037',
'Sanpete County, UT': '039',
'Sevier County, UT': '041',
'Summit County, UT': '043',
'Tooele County, UT': '045',
'Uintah County, UT': '047',
'Utah County, UT': '049',
'Wasatch County, UT': '051',
'Washington County, UT': '053',
'Wayne County, UT': '055',
'Weber County, UT': '057'},
50: { '--All--': '%',
'Addison County, VT': '001',
'Bennington County, VT': '003',
'Caledonia County, VT': '005',
'Chittenden County, VT': '007',
'Essex County, VT': '009',
'Franklin County, VT': '011',
'Grand Isle County, VT': '013',
'Lamoille County, VT': '015',
'Orange County, VT': '017',
'Orleans County, VT': '019',
'Rutland County, VT': '021',
'Washington County, VT': '023',
'Windham County, VT': '025',
'Windsor County, VT': '027'},
51: { '--All--': '%',
'Accomack County, VA': '001',
'Albemarle County, VA': '003',
'Alexandria city, VA': '510',
'Alleghany County, VA': '005',
'Amelia County, VA': '007',
'Amherst County, VA': '009',
'Appomattox County, VA': '011',
'Arlington County, VA': '013',
'Augusta County, VA': '015',
'Bath County, VA': '017',
'Bedford County, VA': '019',
'Bedford city, VA': '515',
'Bland County, VA': '021',
'Botetourt County, VA': '023',
'Bristol city, VA': '520',
'Brunswick County, VA': '025',
'Buchanan County, VA': '027',
'Buckingham County, VA': '029',
'Buena Vista city, VA': '530',
'Campbell County, VA': '031',
'Caroline County, VA': '033',
'Carroll County, VA': '035',
'Charles City County, VA': '036',
'Charlotte County, VA': '037',
'Charlottesville city, VA': '540',
'Chesapeake city, VA': '550',
'Chesterfield County, VA': '041',
'Clarke County, VA': '043',
'Colonial Heights city, VA': '570',
'Covington city, VA': '580',
'Craig County, VA': '045',
'Culpeper County, VA': '047',
'Cumberland County, VA': '049',
'Danville city, VA': '590',
'Dickenson County, VA': '051',
'Dinwiddie County, VA': '053',
'Emporia city, VA': '595',
'Essex County, VA': '057',
'Fairfax County, VA': '059',
'Fairfax city, VA': '600',
'Falls Church city, VA': '610',
'Fauquier County, VA': '061',
'Floyd County, VA': '063',
'Fluvanna County, VA': '065',
'Franklin County, VA': '067',
'Franklin city, VA': '620',
'Frederick County, VA': '069',
'Fredericksburg city, VA': '630',
'Galax city, VA': '640',
'Giles County, VA': '071',
'Gloucester County, VA': '073',
'Goochland County, VA': '075',
'Grayson County, VA': '077',
'Greene County, VA': '079',
'Greensville County, VA': '081',
'Halifax County, VA': '083',
'Hampton city, VA': '650',
'Hanover County, VA': '085',
'Harrisonburg city, VA': '660',
'Henrico County, VA': '087',
'Henry County, VA': '089',
'Highland County, VA': '091',
'Hopewell city, VA': '670',
'Isle of Wight County, VA': '093',
'James City County, VA': '095',
'King George County, VA': '099',
'King William County, VA': '101',
'King and Queen County, VA': '097',
'Lancaster County, VA': '103',
'Lee County, VA': '105',
'Lexington city, VA': '678',
'Loudoun County, VA': '107',
'Louisa County, VA': '109',
'Lunenburg County, VA': '111',
'Lynchburg city, VA': '680',
'Madison County, VA': '113',
'Manassas Park city, VA': '685',
'Manassas city, VA': '683',
'Martinsville city, VA': '690',
'Mathews County, VA': '115',
'Mecklenburg County, VA': '117',
'Middlesex County, VA': '119',
'Montgomery County, VA': '121',
'Nelson County, VA': '125',
'New Kent County, VA': '127',
'Newport News city, VA': '700',
'Norfolk city, VA': '710',
'Northampton County, VA': '131',
'Northumberland County, VA': '133',
'Norton city, VA': '720',
'Nottoway County, VA': '135',
'Orange County, VA': '137',
'Page County, VA': '139',
'Patrick County, VA': '141',
'Petersburg city, VA': '730',
'Pittsylvania County, VA': '143',
'Poquoson city, VA': '735',
'Portsmouth city, VA': '740',
'Powhatan County, VA': '145',
'Prince Edward County, VA': '147',
'Prince George County, VA': '149',
'Prince William County, VA': '153',
'Pulaski County, VA': '155',
'Radford city, VA': '750',
'Rappahannock County, VA': '157',
'Richmond County, VA': '159',
'Richmond city, VA': '760',
'Roanoke County, VA': '161',
'Roanoke city, VA': '770',
'Rockbridge County, VA': '163',
'Rockingham County, VA': '165',
'Russell County, VA': '167',
'Salem city, VA': '775',
'Scott County, VA': '169',
'Shenandoah County, VA': '171',
'Smyth County, VA': '173',
'Southampton County, VA': '175',
'Spotsylvania County, VA': '177',
'Stafford County, VA': '179',
'Staunton city, VA': '790',
'Suffolk city, VA': '800',
'Surry County, VA': '181',
'Sussex County, VA': '183',
'Tazewell County, VA': '185',
'Virginia Beach city, VA': '810',
'Warren County, VA': '187',
'Washington County, VA': '191',
'Waynesboro city, VA': '820',
'Westmoreland County, VA': '193',
'Williamsburg city, VA': '830',
'Winchester city, VA': '840',
'Wise County, VA': '195',
'Wythe County, VA': '197',
'York County, VA': '199'},
53: { '--All--': '%',
'Adams County, WA': '001',
'Asotin County, WA': '003',
'Benton County, WA': '005',
'Chelan County, WA': '007',
'Clallam County, WA': '009',
'Clark County, WA': '011',
'Columbia County, WA': '013',
'Cowlitz County, WA': '015',
'Douglas County, WA': '017',
'Ferry County, WA': '019',
'Franklin County, WA': '021',
'Garfield County, WA': '023',
'Grant County, WA': '025',
'Grays Harbor County, WA': '027',
'Island County, WA': '029',
'Jefferson County, WA': '031',
'King County, WA': '033',
'Kitsap County, WA': '035',
'Kittitas County, WA': '037',
'Klickitat County, WA': '039',
'Lewis County, WA': '041',
'Lincoln County, WA': '043',
'Mason County, WA': '045',
'Okanogan County, WA': '047',
'Pacific County, WA': '049',
'Pend Oreille County, WA': '051',
'Pierce County, WA': '053',
'San Juan County, WA': '055',
'Skagit County, WA': '057',
'Skamania County, WA': '059',
'Snohomish County, WA': '061',
'Spokane County, WA': '063',
'Stevens County, WA': '065',
'Thurston County, WA': '067',
'Wahkiakum County, WA': '069',
'Walla Walla County, WA': '071',
'Whatcom County, WA': '073',
'Whitman County, WA': '075',
'Yakima County, WA': '077'},
54: { '--All--': '%',
'Barbour County, WV': '001',
'Berkeley County, WV': '003',
'Boone County, WV': '005',
'Braxton County, WV': '007',
'Brooke County, WV': '009',
'Cabell County, WV': '011',
'Calhoun County, WV': '013',
'Clay County, WV': '015',
'Doddridge County, WV': '017',
'Fayette County, WV': '019',
'Gilmer County, WV': '021',
'Grant County, WV': '023',
'Greenbrier County, WV': '025',
'Hampshire County, WV': '027',
'Hancock County, WV': '029',
'Hardy County, WV': '031',
'Harrison County, WV': '033',
'Jackson County, WV': '035',
'Jefferson County, WV': '037',
'Kanawha County, WV': '039',
'Lewis County, WV': '041',
'Lincoln County, WV': '043',
'Logan County, WV': '045',
'Marion County, WV': '049',
'Marshall County, WV': '051',
'Mason County, WV': '053',
'McDowell County, WV': '047',
'Mercer County, WV': '055',
'Mineral County, WV': '057',
'Mingo County, WV': '059',
'Monongalia County, WV': '061',
'Monroe County, WV': '063',
'Morgan County, WV': '065',
'Nicholas County, WV': '067',
'Ohio County, WV': '069',
'Pendleton County, WV': '071',
'Pleasants County, WV': '073',
'Pocahontas County, WV': '075',
'Preston County, WV': '077',
'Putnam County, WV': '079',
'Raleigh County, WV': '081',
'Randolph County, WV': '083',
'Ritchie County, WV': '085',
'Roane County, WV': '087',
'Summers County, WV': '089',
'Taylor County, WV': '091',
'Tucker County, WV': '093',
'Tyler County, WV': '095',
'Upshur County, WV': '097',
'Wayne County, WV': '099',
'Webster County, WV': '101',
'Wetzel County, WV': '103',
'Wirt County, WV': '105',
'Wood County, WV': '107',
'Wyoming County, WV': '109'},
55: { '--All--': '%',
'Adams County, WI': '001',
'Ashland County, WI': '003',
'Barron County, WI': '005',
'Bayfield County, WI': '007',
'Brown County, WI': '009',
'Buffalo County, WI': '011',
'Burnett County, WI': '013',
'Calumet County, WI': '015',
'Chippewa County, WI': '017',
'Clark County, WI': '019',
'Columbia County, WI': '021',
'Crawford County, WI': '023',
'Dane County, WI': '025',
'Dodge County, WI': '027',
'Door County, WI': '029',
'Douglas County, WI': '031',
'Dunn County, WI': '033',
'Eau Claire County, WI': '035',
'Florence County, WI': '037',
'Fond du Lac County, WI': '039',
'Forest County, WI': '041',
'Grant County, WI': '043',
'Green County, WI': '045',
'Green Lake County, WI': '047',
'Iowa County, WI': '049',
'Iron County, WI': '051',
'Jackson County, WI': '053',
'Jefferson County, WI': '055',
'Juneau County, WI': '057',
'Kenosha County, WI': '059',
'Kewaunee County, WI': '061',
'La Crosse County, WI': '063',
'Lafayette County, WI': '065',
'Langlade County, WI': '067',
'Lincoln County, WI': '069',
'Manitowoc County, WI': '071',
'Marathon County, WI': '073',
'Marinette County, WI': '075',
'Marquette County, WI': '077',
'Menominee County, WI': '078',
'Milwaukee County, WI': '079',
'Monroe County, WI': '081',
'Oconto County, WI': '083',
'Oneida County, WI': '085',
'Outagamie County, WI': '087',
'Ozaukee County, WI': '089',
'Pepin County, WI': '091',
'Pierce County, WI': '093',
'Polk County, WI': '095',
'Portage County, WI': '097',
'Price County, WI': '099',
'Racine County, WI': '101',
'Richland County, WI': '103',
'Rock County, WI': '105',
'Rusk County, WI': '107',
'Sauk County, WI': '111',
'Sawyer County, WI': '113',
'Shawano County, WI': '115',
'Sheboygan County, WI': '117',
'St. Croix County, WI': '109',
'Taylor County, WI': '119',
'Trempealeau County, WI': '121',
'Vernon County, WI': '123',
'Vilas County, WI': '125',
'Walworth County, WI': '127',
'Washburn County, WI': '129',
'Washington County, WI': '131',
'Waukesha County, WI': '133',
'Waupaca County, WI': '135',
'Waushara County, WI': '137',
'Winnebago County, WI': '139',
'Wood County, WI': '141'},
56: { '--All--': '%',
'Albany County, WY': '001',
'Big Horn County, WY': '003',
'Campbell County, WY': '005',
'Carbon County, WY': '007',
'Converse County, WY': '009',
'Crook County, WY': '011',
'Fremont County, WY': '013',
'Goshen County, WY': '015',
'Hot Springs County, WY': '017',
'Johnson County, WY': '019',
'Laramie County, WY': '021',
'Lincoln County, WY': '023',
'Natrona County, WY': '025',
'Niobrara County, WY': '027',
'Park County, WY': '029',
'Platte County, WY': '031',
'Sheridan County, WY': '033',
'Sublette County, WY': '035',
'Sweetwater County, WY': '037',
'Teton County, WY': '039',
'Uinta County, WY': '041',
'Washakie County, WY': '043',
'Weston County, WY': '045'},
72: { '--All--': '%',
'Adjuntas Municipio, PR': '001',
'Aguada Municipio, PR': '003',
'Aguadilla Municipio, PR': '005',
'Aguas Buenas Municipio, PR': '007',
'Aibonito Municipio, PR': '009',
'Anasco Municipio, PR': '011',
'Arecibo Municipio, PR': '013',
'Arroyo Municipio, PR': '015',
'Barceloneta Municipio, PR': '017',
'Barranquitas Municipio, PR': '019',
'Bayamon Municipio, PR': '021',
'Cabo Rojo Municipio, PR': '023',
'Caguas Municipio, PR': '025',
'Camuy Municipio, PR': '027',
'Canovanas Municipio, PR': '029',
'Carolina Municipio, PR': '031',
'Catano Municipio, PR': '033',
'Cayey Municipio, PR': '035',
'Ceiba Municipio, PR': '037',
'Ciales Municipio, PR': '039',
'Cidra Municipio, PR': '041',
'Coamo Municipio, PR': '043',
'Comerio Municipio, PR': '045',
'Corozal Municipio, PR': '047',
'Culebra Municipio, PR': '049',
'Dorado Municipio, PR': '051',
'Fajardo Municipio, PR': '053',
'Florida Municipio, PR': '054',
'Guanica Municipio, PR': '055',
'Guayama Municipio, PR': '057',
'Guayanilla Municipio, PR': '059',
'Guaynabo Municipio, PR': '061',
'Gurabo Municipio, PR': '063',
'Hatillo Municipio, PR': '065',
'Hormigueros Municipio, PR': '067',
'Humacao Municipio, PR': '069',
'Isabela Municipio, PR': '071',
'Jayuya Municipio, PR': '073',
'Juana Diaz Municipio, PR': '075',
'Juncos Municipio, PR': '077',
'Lajas Municipio, PR': '079',
'Lares Municipio, PR': '081',
'Las Marias Municipio, PR': '083',
'Las Piedras Municipio, PR': '085',
'Loiza Municipio, PR': '087',
'Luquillo Municipio, PR': '089',
'Manati Municipio, PR': '091',
'Maricao Municipio, PR': '093',
'Maunabo Municipio, PR': '095',
'Mayaguez Municipio, PR': '097',
'Moca Municipio, PR': '099',
'Morovis Municipio, PR': '101',
'Naguabo Municipio, PR': '103',
'Naranjito Municipio, PR': '105',
'Orocovis Municipio, PR': '107',
'Patillas Municipio, PR': '109',
'Penuelas Municipio, PR': '111',
'Ponce Municipio, PR': '113',
'Quebradillas Municipio, PR': '115',
'Rincon Municipio, PR': '117',
'Rio Grande Municipio, PR': '119',
'Sabana Grande Municipio, PR': '121',
'Salinas Municipio, PR': '123',
'San German Municipio, PR': '125',
'San Juan Municipio, PR': '127',
'San Lorenzo Municipio, PR': '129',
'San Sebastian Municipio, PR': '131',
'Santa Isabel Municipio, PR': '133',
'Toa Alta Municipio, PR': '135',
'Toa Baja Municipio, PR': '137',
'Trujillo Alto Municipio, PR': '139',
'Utuado Municipio, PR': '141',
'Vega Alta Municipio, PR': '143',
'Vega Baja Municipio, PR': '145',
'Vieques Municipio, PR': '147',
'Villalba Municipio, PR': '149',
'Yabucoa Municipio, PR': '151',
'Yauco Municipio, PR': '153'},
'01': { 'Autauga County, AL': '001',
'Baldwin County, AL': '003',
'Barbour County, AL': '005',
'Bibb County, AL': '007',
'Blount County, AL': '009',
'Bullock County, AL': '011',
'Butler County, AL': '013',
'Calhoun County, AL': '015',
'Chambers County, AL': '017',
'Cherokee County, AL': '019',
'Chilton County, AL': '021',
'Choctaw County, AL': '023',
'Clarke County, AL': '025',
'Clay County, AL': '027',
'Cleburne County, AL': '029',
'Coffee County, AL': '031',
'Colbert County, AL': '033',
'Conecuh County, AL': '035',
'Coosa County, AL': '037',
'Covington County, AL': '039',
'Crenshaw County, AL': '041',
'Cullman County, AL': '043',
'Dale County, AL': '045',
'Dallas County, AL': '047',
'DeKalb County, AL': '049',
'Elmore County, AL': '051',
'Escambia County, AL': '053',
'Etowah County, AL': '055',
'Fayette County, AL': '057',
'Franklin County, AL': '059',
'Geneva County, AL': '061',
'Greene County, AL': '063',
'Hale County, AL': '065',
'Henry County, AL': '067',
'Houston County, AL': '069',
'Jackson County, AL': '071',
'Jefferson County, AL': '073',
'Lamar County, AL': '075',
'Lauderdale County, AL': '077',
'Lawrence County, AL': '079',
'Lee County, AL': '081',
'Limestone County, AL': '083',
'Lowndes County, AL': '085',
'Macon County, AL': '087',
'Madison County, AL': '089',
'Marengo County, AL': '091',
'Marion County, AL': '093',
'Marshall County, AL': '095',
'Mobile County, AL': '097',
'Monroe County, AL': '099',
'Montgomery County, AL': '101',
'Morgan County, AL': '103',
'Perry County, AL': '105',
'Pickens County, AL': '107',
'Pike County, AL': '109',
'Randolph County, AL': '111',
'Russell County, AL': '113',
'Shelby County, AL': '117',
'St. Clair County, AL': '115',
'Sumter County, AL': '119',
'Talladega County, AL': '121',
'Tallapoosa County, AL': '123',
'Tuscaloosa County, AL': '125',
'Walker County, AL': '127',
'Washington County, AL': '129',
'Wilcox County, AL': '131',
'Winston County, AL': '133'},
'02': { 'Aleutians East Borough, AK': '013',
'Aleutians West Census Area, AK': '016',
'Anchorage Borough/municipality, AK': '020',
'Bethel Census Area, AK': '050',
'Bristol Bay Borough, AK': '060',
'Denali Borough, AK': '068',
'Dillingham Census Area, AK': '070',
'Fairbanks North Star Borough, AK': '090',
'Haines Borough, AK': '100',
'Juneau Borough/city, AK': '110',
'Kenai Peninsula Borough, AK': '122',
'Ketchikan Gateway Borough, AK': '130',
'Kodiak Island Borough, AK': '150',
'Lake and Peninsula Borough, AK': '164',
'Matanuska-Susitna Borough, AK': '170',
'Nome Census Area, AK': '180',
'North Slope Borough, AK': '185',
'Northwest Arctic Borough, AK': '188',
'Prince of Wales-Outer Ketchikan Census Area, AK': '201',
'Sitka Borough/city, AK': '220',
'Skagway-Hoonah-Angoon Census Area, AK': '232',
'Southeast Fairbanks Census Area, AK': '240',
'Valdez-Cordova Census Area, AK': '261',
'Wade Hampton Census Area, AK': '270',
'Wrangell-Petersburg Census Area, AK': '280',
'Yakutat Borough, AK': '282',
'Yukon-Koyukuk Census Area, AK': '290'},
'04': { 'Apache County, AZ': '001',
'Cochise County, AZ': '003',
'Coconino County, AZ': '005',
'Gila County, AZ': '007',
'Graham County, AZ': '009',
'Greenlee County, AZ': '011',
'La Paz County, AZ': '012',
'Maricopa County, AZ': '013',
'Mohave County, AZ': '015',
'Navajo County, AZ': '017',
'Pima County, AZ': '019',
'Pinal County, AZ': '021',
'Santa Cruz County, AZ': '023',
'Yavapai County, AZ': '025',
'Yuma County, AZ': '027'},
'05': { 'Arkansas County, AR': '001',
'Ashley County, AR': '003',
'Baxter County, AR': '005',
'Benton County, AR': '007',
'Boone County, AR': '009',
'Bradley County, AR': '011',
'Calhoun County, AR': '013',
'Carroll County, AR': '015',
'Chicot County, AR': '017',
'Clark County, AR': '019',
'Clay County, AR': '021',
'Cleburne County, AR': '023',
'Cleveland County, AR': '025',
'Columbia County, AR': '027',
'Conway County, AR': '029',
'Craighead County, AR': '031',
'Crawford County, AR': '033',
'Crittenden County, AR': '035',
'Cross County, AR': '037',
'Dallas County, AR': '039',
'Desha County, AR': '041',
'Drew County, AR': '043',
'Faulkner County, AR': '045',
'Franklin County, AR': '047',
'Fulton County, AR': '049',
'Garland County, AR': '051',
'Grant County, AR': '053',
'Greene County, AR': '055',
'Hempstead County, AR': '057',
'Hot Spring County, AR': '059',
'Howard County, AR': '061',
'Independence County, AR': '063',
'Izard County, AR': '065',
'Jackson County, AR': '067',
'Jefferson County, AR': '069',
'Johnson County, AR': '071',
'Lafayette County, AR': '073',
'Lawrence County, AR': '075',
'Lee County, AR': '077',
'Lincoln County, AR': '079',
'Little River County, AR': '081',
'Logan County, AR': '083',
'Lonoke County, AR': '085',
'Madison County, AR': '087',
'Marion County, AR': '089',
'Miller County, AR': '091',
'Mississippi County, AR': '093',
'Monroe County, AR': '095',
'Montgomery County, AR': '097',
'Nevada County, AR': '099',
'Newton County, AR': '101',
'Ouachita County, AR': '103',
'Perry County, AR': '105',
'Phillips County, AR': '107',
'Pike County, AR': '109',
'Poinsett County, AR': '111',
'Polk County, AR': '113',
'Pope County, AR': '115',
'Prairie County, AR': '117',
'Pulaski County, AR': '119',
'Randolph County, AR': '121',
'Saline County, AR': '125',
'Scott County, AR': '127',
'Searcy County, AR': '129',
'Sebastian County, AR': '131',
'Sevier County, AR': '133',
'Sharp County, AR': '135',
'St. Francis County, AR': '123',
'Stone County, AR': '137',
'Union County, AR': '139',
'Van Buren County, AR': '141',
'Washington County, AR': '143',
'White County, AR': '145',
'Woodruff County, AR': '147',
'Yell County, AR': '149'},
'06': { 'Alameda County, CA': '001',
'Alpine County, CA': '003',
'Amador County, CA': '005',
'Butte County, CA': '007',
'Calaveras County, CA': '009',
'Colusa County, CA': '011',
'Contra Costa County, CA': '013',
'Del Norte County, CA': '015',
'El Dorado County, CA': '017',
'Fresno County, CA': '019',
'Glenn County, CA': '021',
'Humboldt County, CA': '023',
'Imperial County, CA': '025',
'Inyo County, CA': '027',
'Kern County, CA': '029',
'Kings County, CA': '031',
'Lake County, CA': '033',
'Lassen County, CA': '035',
'Los Angeles County, CA': '037',
'Madera County, CA': '039',
'Marin County, CA': '041',
'Mariposa County, CA': '043',
'Mendocino County, CA': '045',
'Merced County, CA': '047',
'Modoc County, CA': '049',
'Mono County, CA': '051',
'Monterey County, CA': '053',
'Napa County, CA': '055',
'Nevada County, CA': '057',
'Orange County, CA': '059',
'Placer County, CA': '061',
'Plumas County, CA': '063',
'Riverside County, CA': '065',
'Sacramento County, CA': '067',
'San Benito County, CA': '069',
'San Bernardino County, CA': '071',
'San Diego County, CA': '073',
'San Francisco County/city, CA': '075',
'San Joaquin County, CA': '077',
'San Luis Obispo County, CA': '079',
'San Mateo County, CA': '081',
'Santa Barbara County, CA': '083',
'Santa Clara County, CA': '085',
'Santa Cruz County, CA': '087',
'Shasta County, CA': '089',
'Sierra County, CA': '091',
'Siskiyou County, CA': '093',
'Solano County, CA': '095',
'Sonoma County, CA': '097',
'Stanislaus County, CA': '099',
'Sutter County, CA': '101',
'Tehama County, CA': '103',
'Trinity County, CA': '105',
'Tulare County, CA': '107',
'Tuolumne County, CA': '109',
'Ventura County, CA': '111',
'Yolo County, CA': '113',
'Yuba County, CA': '115'},
'08': { 'Adams County, CO': '001',
'Alamosa County, CO': '003',
'Arapahoe County, CO': '005',
'Archuleta County, CO': '007',
'Baca County, CO': '009',
'Bent County, CO': '011',
'Boulder County, CO': '013',
'Broomfield County/city, CO': '014',
'Chaffee County, CO': '015',
'Cheyenne County, CO': '017',
'Clear Creek County, CO': '019',
'Conejos County, CO': '021',
'Costilla County, CO': '023',
'Crowley County, CO': '025',
'Custer County, CO': '027',
'Delta County, CO': '029',
'Denver County/city, CO': '031',
'Dolores County, CO': '033',
'Douglas County, CO': '035',
'Eagle County, CO': '037',
'El Paso County, CO': '041',
'Elbert County, CO': '039',
'Fremont County, CO': '043',
'Garfield County, CO': '045',
'Gilpin County, CO': '047',
'Grand County, CO': '049',
'Gunnison County, CO': '051',
'Hinsdale County, CO': '053',
'Huerfano County, CO': '055',
'Jackson County, CO': '057',
'Jefferson County, CO': '059',
'Kiowa County, CO': '061',
'Kit Carson County, CO': '063',
'La Plata County, CO': '067',
'Lake County, CO': '065',
'Larimer County, CO': '069',
'Las Animas County, CO': '071',
'Lincoln County, CO': '073',
'Logan County, CO': '075',
'Mesa County, CO': '077',
'Mineral County, CO': '079',
'Moffat County, CO': '081',
'Montezuma County, CO': '083',
'Montrose County, CO': '085',
'Morgan County, CO': '087',
'Otero County, CO': '089',
'Ouray County, CO': '091',
'Park County, CO': '093',
'Phillips County, CO': '095',
'Pitkin County, CO': '097',
'Prowers County, CO': '099',
'Pueblo County, CO': '101',
'Rio Blanco County, CO': '103',
'Rio Grande County, CO': '105',
'Routt County, CO': '107',
'Saguache County, CO': '109',
'San Juan County, CO': '111',
'San Miguel County, CO': '113',
'Sedgwick County, CO': '115',
'Summit County, CO': '117',
'Teller County, CO': '119',
'Washington County, CO': '121',
'Weld County, CO': '123',
'Yuma County, CO': '125'},
'09': { 'Fairfield County, CT': '001',
'Hartford County, CT': '003',
'Litchfield County, CT': '005',
'Middlesex County, CT': '007',
'New Haven County, CT': '009',
'New London County, CT': '011',
'Tolland County, CT': '013',
'Windham County, CT': '015'},
'10': { 'Kent County, DE': '001',
'New Castle County, DE': '003',
'Sussex County, DE': '005'},
'11': { 'District of Columbia': '001'},
'12': { 'Alachua County, FL': '001',
'Baker County, FL': '003',
'Bay County, FL': '005',
'Bradford County, FL': '007',
'Brevard County, FL': '009',
'Broward County, FL': '011',
'Calhoun County, FL': '013',
'Charlotte County, FL': '015',
'Citrus County, FL': '017',
'Clay County, FL': '019',
'Collier County, FL': '021',
'Columbia County, FL': '023',
'DeSoto County, FL': '027',
'Dixie County, FL': '029',
'Duval County, FL': '031',
'Escambia County, FL': '033',
'Flagler County, FL': '035',
'Franklin County, FL': '037',
'Gadsden County, FL': '039',
'Gilchrist County, FL': '041',
'Glades County, FL': '043',
'Gulf County, FL': '045',
'Hamilton County, FL': '047',
'Hardee County, FL': '049',
'Hendry County, FL': '051',
'Hernando County, FL': '053',
'Highlands County, FL': '055',
'Hillsborough County, FL': '057',
'Holmes County, FL': '059',
'Indian River County, FL': '061',
'Jackson County, FL': '063',
'Jefferson County, FL': '065',
'Lafayette County, FL': '067',
'Lake County, FL': '069',
'Lee County, FL': '071',
'Leon County, FL': '073',
'Levy County, FL': '075',
'Liberty County, FL': '077',
'Madison County, FL': '079',
'Manatee County, FL': '081',
'Marion County, FL': '083',
'Martin County, FL': '085',
'Miami-Dade County, FL': '086',
'Monroe County, FL': '087',
'Nassau County, FL': '089',
'Okaloosa County, FL': '091',
'Okeechobee County, FL': '093',
'Orange County, FL': '095',
'Osceola County, FL': '097',
'Palm Beach County, FL': '099',
'Pasco County, FL': '101',
'Pinellas County, FL': '103',
'Polk County, FL': '105',
'Putnam County, FL': '107',
'Santa Rosa County, FL': '113',
'Sarasota County, FL': '115',
'Seminole County, FL': '117',
'St. Johns County, FL': '109',
'St. Lucie County, FL': '111',
'Sumter County, FL': '119',
'Suwannee County, FL': '121',
'Taylor County, FL': '123',
'Union County, FL': '125',
'Volusia County, FL': '127',
'Wakulla County, FL': '129',
'Walton County, FL': '131',
'Washington County, FL': '133'},
'13': { 'Appling County, GA': '001',
'Atkinson County, GA': '003',
'Bacon County, GA': '005',
'Baker County, GA': '007',
'Baldwin County, GA': '009',
'Banks County, GA': '011',
'Barrow County, GA': '013',
'Bartow County, GA': '015',
'Ben Hill County, GA': '017',
'Berrien County, GA': '019',
'Bibb County, GA': '021',
'Bleckley County, GA': '023',
'Brantley County, GA': '025',
'Brooks County, GA': '027',
'Bryan County, GA': '029',
'Bulloch County, GA': '031',
'Burke County, GA': '033',
'Butts County, GA': '035',
'Calhoun County, GA': '037',
'Camden County, GA': '039',
'Candler County, GA': '043',
'Carroll County, GA': '045',
'Catoosa County, GA': '047',
'Charlton County, GA': '049',
'Chatham County, GA': '051',
'Chattahoochee County, GA': '053',
'Chattooga County, GA': '055',
'Cherokee County, GA': '057',
'Clarke County, GA': '059',
'Clay County, GA': '061',
'Clayton County, GA': '063',
'Clinch County, GA': '065',
'Cobb County, GA': '067',
'Coffee County, GA': '069',
'Colquitt County, GA': '071',
'Columbia County, GA': '073',
'Cook County, GA': '075',
'Coweta County, GA': '077',
'Crawford County, GA': '079',
'Crisp County, GA': '081',
'Dade County, GA': '083',
'Dawson County, GA': '085',
'DeKalb County, GA': '089',
'Decatur County, GA': '087',
'Dodge County, GA': '091',
'Dooly County, GA': '093',
'Dougherty County, GA': '095',
'Douglas County, GA': '097',
'Early County, GA': '099',
'Echols County, GA': '101',
'Effingham County, GA': '103',
'Elbert County, GA': '105',
'Emanuel County, GA': '107',
'Evans County, GA': '109',
'Fannin County, GA': '111',
'Fayette County, GA': '113',
'Floyd County, GA': '115',
'Forsyth County, GA': '117',
'Franklin County, GA': '119',
'Fulton County, GA': '121',
'Gilmer County, GA': '123',
'Glascock County, GA': '125',
'Glynn County, GA': '127',
'Gordon County, GA': '129',
'Grady County, GA': '131',
'Greene County, GA': '133',
'Gwinnett County, GA': '135',
'Habersham County, GA': '137',
'Hall County, GA': '139',
'Hancock County, GA': '141',
'Haralson County, GA': '143',
'Harris County, GA': '145',
'Hart County, GA': '147',
'Heard County, GA': '149',
'Henry County, GA': '151',
'Houston County, GA': '153',
'Irwin County, GA': '155',
'Jackson County, GA': '157',
'Jasper County, GA': '159',
'Jeff Davis County, GA': '161',
'Jefferson County, GA': '163',
'Jenkins County, GA': '165',
'Johnson County, GA': '167',
'Jones County, GA': '169',
'Lamar County, GA': '171',
'Lanier County, GA': '173',
'Laurens County, GA': '175',
'Lee County, GA': '177',
'Liberty County, GA': '179',
'Lincoln County, GA': '181',
'Long County, GA': '183',
'Lowndes County, GA': '185',
'Lumpkin County, GA': '187',
'Macon County, GA': '193',
'Madison County, GA': '195',
'Marion County, GA': '197',
'McDuffie County, GA': '189',
'McIntosh County, GA': '191',
'Meriwether County, GA': '199',
'Miller County, GA': '201',
'Mitchell County, GA': '205',
'Monroe County, GA': '207',
'Montgomery County, GA': '209',
'Morgan County, GA': '211',
'Murray County, GA': '213',
'Muscogee County, GA': '215',
'Newton County, GA': '217',
'Oconee County, GA': '219',
'Oglethorpe County, GA': '221',
'Paulding County, GA': '223',
'Peach County, GA': '225',
'Pickens County, GA': '227',
'Pierce County, GA': '229',
'Pike County, GA': '231',
'Polk County, GA': '233',
'Pulaski County, GA': '235',
'Putnam County, GA': '237',
'Quitman County, GA': '239',
'Rabun County, GA': '241',
'Randolph County, GA': '243',
'Richmond County, GA': '245',
'Rockdale County, GA': '247',
'Schley County, GA': '249',
'Screven County, GA': '251',
'Seminole County, GA': '253',
'Spalding County, GA': '255',
'Stephens County, GA': '257',
'Stewart County, GA': '259',
'Sumter County, GA': '261',
'Talbot County, GA': '263',
'Taliaferro County, GA': '265',
'Tattnall County, GA': '267',
'Taylor County, GA': '269',
'Telfair County, GA': '271',
'Terrell County, GA': '273',
'Thomas County, GA': '275',
'Tift County, GA': '277',
'Toombs County, GA': '279',
'Towns County, GA': '281',
'Treutlen County, GA': '283',
'Troup County, GA': '285',
'Turner County, GA': '287',
'Twiggs County, GA': '289',
'Union County, GA': '291',
'Upson County, GA': '293',
'Walker County, GA': '295',
'Walton County, GA': '297',
'Ware County, GA': '299',
'Warren County, GA': '301',
'Washington County, GA': '303',
'Wayne County, GA': '305',
'Webster County, GA': '307',
'Wheeler County, GA': '309',
'White County, GA': '311',
'Whitfield County, GA': '313',
'Wilcox County, GA': '315',
'Wilkes County, GA': '317',
'Wilkinson County, GA': '319',
'Worth County, GA': '321'},
'15': { 'Hawaii County, HI': '001',
'Honolulu County/city, HI': '003',
'Kauai County, HI': '007',
'Maui County, HI': '009'},
'16': { 'Ada County, ID': '001',
'Adams County, ID': '003',
'Bannock County, ID': '005',
'Bear Lake County, ID': '007',
'Benewah County, ID': '009',
'Bingham County, ID': '011',
'Blaine County, ID': '013',
'Boise County, ID': '015',
'Bonner County, ID': '017',
'Bonneville County, ID': '019',
'Boundary County, ID': '021',
'Butte County, ID': '023',
'Camas County, ID': '025',
'Canyon County, ID': '027',
'Caribou County, ID': '029',
'Cassia County, ID': '031',
'Clark County, ID': '033',
'Clearwater County, ID': '035',
'Custer County, ID': '037',
'Elmore County, ID': '039',
'Franklin County, ID': '041',
'Fremont County, ID': '043',
'Gem County, ID': '045',
'Gooding County, ID': '047',
'Idaho County, ID': '049',
'Jefferson County, ID': '051',
'Jerome County, ID': '053',
'Kootenai County, ID': '055',
'Latah County, ID': '057',
'Lemhi County, ID': '059',
'Lewis County, ID': '061',
'Lincoln County, ID': '063',
'Madison County, ID': '065',
'Minidoka County, ID': '067',
'Nez Perce County, ID': '069',
'Oneida County, ID': '071',
'Owyhee County, ID': '073',
'Payette County, ID': '075',
'Power County, ID': '077',
'Shoshone County, ID': '079',
'Teton County, ID': '081',
'Twin Falls County, ID': '083',
'Valley County, ID': '085',
'Washington County, ID': '087'},
'17': { 'Adams County, IL': '001',
'Alexander County, IL': '003',
'Bond County, IL': '005',
'Boone County, IL': '007',
'Brown County, IL': '009',
'Bureau County, IL': '011',
'Calhoun County, IL': '013',
'Carroll County, IL': '015',
'Cass County, IL': '017',
'Champaign County, IL': '019',
'Christian County, IL': '021',
'Clark County, IL': '023',
'Clay County, IL': '025',
'Clinton County, IL': '027',
'Coles County, IL': '029',
'Cook County, IL': '031',
'Crawford County, IL': '033',
'Cumberland County, IL': '035',
'De Witt County, IL': '039',
'DeKalb County, IL': '037',
'Douglas County, IL': '041',
'DuPage County, IL': '043',
'Edgar County, IL': '045',
'Edwards County, IL': '047',
'Effingham County, IL': '049',
'Fayette County, IL': '051',
'Ford County, IL': '053',
'Franklin County, IL': '055',
'Fulton County, IL': '057',
'Gallatin County, IL': '059',
'Greene County, IL': '061',
'Grundy County, IL': '063',
'Hamilton County, IL': '065',
'Hancock County, IL': '067',
'Hardin County, IL': '069',
'Henderson County, IL': '071',
'Henry County, IL': '073',
'Iroquois County, IL': '075',
'Jackson County, IL': '077',
'Jasper County, IL': '079',
'Jefferson County, IL': '081',
'Jersey County, IL': '083',
'Jo Daviess County, IL': '085',
'Johnson County, IL': '087',
'Kane County, IL': '089',
'Kankakee County, IL': '091',
'Kendall County, IL': '093',
'Knox County, IL': '095',
'La Salle County, IL': '099',
'Lake County, IL': '097',
'Lawrence County, IL': '101',
'Lee County, IL': '103',
'Livingston County, IL': '105',
'Logan County, IL': '107',
'Macon County, IL': '115',
'Macoupin County, IL': '117',
'Madison County, IL': '119',
'Marion County, IL': '121',
'Marshall County, IL': '123',
'Mason County, IL': '125',
'Massac County, IL': '127',
'McDonough County, IL': '109',
'McHenry County, IL': '111',
'McLean County, IL': '113',
'Menard County, IL': '129',
'Mercer County, IL': '131',
'Monroe County, IL': '133',
'Montgomery County, IL': '135',
'Morgan County, IL': '137',
'Moultrie County, IL': '139',
'Ogle County, IL': '141',
'Peoria County, IL': '143',
'Perry County, IL': '145',
'Piatt County, IL': '147',
'Pike County, IL': '149',
'Pope County, IL': '151',
'Pulaski County, IL': '153',
'Putnam County, IL': '155',
'Randolph County, IL': '157',
'Richland County, IL': '159',
'Rock Island County, IL': '161',
'Saline County, IL': '165',
'Sangamon County, IL': '167',
'Schuyler County, IL': '169',
'Scott County, IL': '171',
'Shelby County, IL': '173',
'St. Clair County, IL': '163',
'Stark County, IL': '175',
'Stephenson County, IL': '177',
'Tazewell County, IL': '179',
'Union County, IL': '181',
'Vermilion County, IL': '183',
'Wabash County, IL': '185',
'Warren County, IL': '187',
'Washington County, IL': '189',
'Wayne County, IL': '191',
'White County, IL': '193',
'Whiteside County, IL': '195',
'Will County, IL': '197',
'Williamson County, IL': '199',
'Winnebago County, IL': '201',
'Woodford County, IL': '203'},
'18': { 'Adams County, IN': '001',
'Allen County, IN': '003',
'Bartholomew County, IN': '005',
'Benton County, IN': '007',
'Blackford County, IN': '009',
'Boone County, IN': '011',
'Brown County, IN': '013',
'Carroll County, IN': '015',
'Cass County, IN': '017',
'Clark County, IN': '019',
'Clay County, IN': '021',
'Clinton County, IN': '023',
'Crawford County, IN': '025',
'Daviess County, IN': '027',
'DeKalb County, IN': '033',
'Dearborn County, IN': '029',
'Decatur County, IN': '031',
'Delaware County, IN': '035',
'Dubois County, IN': '037',
'Elkhart County, IN': '039',
'Fayette County, IN': '041',
'Floyd County, IN': '043',
'Fountain County, IN': '045',
'Franklin County, IN': '047',
'Fulton County, IN': '049',
'Gibson County, IN': '051',
'Grant County, IN': '053',
'Greene County, IN': '055',
'Hamilton County, IN': '057',
'Hancock County, IN': '059',
'Harrison County, IN': '061',
'Hendricks County, IN': '063',
'Henry County, IN': '065',
'Howard County, IN': '067',
'Huntington County, IN': '069',
'Jackson County, IN': '071',
'Jasper County, IN': '073',
'Jay County, IN': '075',
'Jefferson County, IN': '077',
'Jennings County, IN': '079',
'Johnson County, IN': '081',
'Knox County, IN': '083',
'Kosciusko County, IN': '085',
'LaGrange County, IN': '087',
'LaPorte County, IN': '091',
'Lake County, IN': '089',
'Lawrence County, IN': '093',
'Madison County, IN': '095',
'Marion County, IN': '097',
'Marshall County, IN': '099',
'Martin County, IN': '101',
'Miami County, IN': '103',
'Monroe County, IN': '105',
'Montgomery County, IN': '107',
'Morgan County, IN': '109',
'Newton County, IN': '111',
'Noble County, IN': '113',
'Ohio County, IN': '115',
'Orange County, IN': '117',
'Owen County, IN': '119',
'Parke County, IN': '121',
'Perry County, IN': '123',
'Pike County, IN': '125',
'Porter County, IN': '127',
'Posey County, IN': '129',
'Pulaski County, IN': '131',
'Putnam County, IN': '133',
'Randolph County, IN': '135',
'Ripley County, IN': '137',
'Rush County, IN': '139',
'Scott County, IN': '143',
'Shelby County, IN': '145',
'Spencer County, IN': '147',
'St. Joseph County, IN': '141',
'Starke County, IN': '149',
'Steuben County, IN': '151',
'Sullivan County, IN': '153',
'Switzerland County, IN': '155',
'Tippecanoe County, IN': '157',
'Tipton County, IN': '159',
'Union County, IN': '161',
'Vanderburgh County, IN': '163',
'Vermillion County, IN': '165',
'Vigo County, IN': '167',
'Wabash County, IN': '169',
'Warren County, IN': '171',
'Warrick County, IN': '173',
'Washington County, IN': '175',
'Wayne County, IN': '177',
'Wells County, IN': '179',
'White County, IN': '181',
'Whitley County, IN': '183'},
'19': { 'Adair County, IA': '001',
'Adams County, IA': '003',
'Allamakee County, IA': '005',
'Appanoose County, IA': '007',
'Audubon County, IA': '009',
'Benton County, IA': '011',
'Black Hawk County, IA': '013',
'Boone County, IA': '015',
'Bremer County, IA': '017',
'Buchanan County, IA': '019',
'Buena Vista County, IA': '021',
'Butler County, IA': '023',
'Calhoun County, IA': '025',
'Carroll County, IA': '027',
'Cass County, IA': '029',
'Cedar County, IA': '031',
'Cerro Gordo County, IA': '033',
'Cherokee County, IA': '035',
'Chickasaw County, IA': '037',
'Clarke County, IA': '039',
'Clay County, IA': '041',
'Clayton County, IA': '043',
'Clinton County, IA': '045',
'Crawford County, IA': '047',
'Dallas County, IA': '049',
'Davis County, IA': '051',
'Decatur County, IA': '053',
'Delaware County, IA': '055',
'Des Moines County, IA': '057',
'Dickinson County, IA': '059',
'Dubuque County, IA': '061',
'Emmet County, IA': '063',
'Fayette County, IA': '065',
'Floyd County, IA': '067',
'Franklin County, IA': '069',
'Fremont County, IA': '071',
'Greene County, IA': '073',
'Grundy County, IA': '075',
'Guthrie County, IA': '077',
'Hamilton County, IA': '079',
'Hancock County, IA': '081',
'Hardin County, IA': '083',
'Harrison County, IA': '085',
'Henry County, IA': '087',
'Howard County, IA': '089',
'Humboldt County, IA': '091',
'Ida County, IA': '093',
'Iowa County, IA': '095',
'Jackson County, IA': '097',
'Jasper County, IA': '099',
'Jefferson County, IA': '101',
'Johnson County, IA': '103',
'Jones County, IA': '105',
'Keokuk County, IA': '107',
'Kossuth County, IA': '109',
'Lee County, IA': '111',
'Linn County, IA': '113',
'Louisa County, IA': '115',
'Lucas County, IA': '117',
'Lyon County, IA': '119',
'Madison County, IA': '121',
'Mahaska County, IA': '123',
'Marion County, IA': '125',
'Marshall County, IA': '127',
'Mills County, IA': '129',
'Mitchell County, IA': '131',
'Monona County, IA': '133',
'Monroe County, IA': '135',
'Montgomery County, IA': '137',
'Muscatine County, IA': '139',
"O'Brien County, IA": '141',
'Osceola County, IA': '143',
'Page County, IA': '145',
'Palo Alto County, IA': '147',
'Plymouth County, IA': '149',
'Pocahontas County, IA': '151',
'Polk County, IA': '153',
'Pottawattamie County, IA': '155',
'Poweshiek County, IA': '157',
'Ringgold County, IA': '159',
'Sac County, IA': '161',
'Scott County, IA': '163',
'Shelby County, IA': '165',
'Sioux County, IA': '167',
'Story County, IA': '169',
'Tama County, IA': '171',
'Taylor County, IA': '173',
'Union County, IA': '175',
'Van Buren County, IA': '177',
'Wapello County, IA': '179',
'Warren County, IA': '181',
'Washington County, IA': '183',
'Wayne County, IA': '185',
'Webster County, IA': '187',
'Winnebago County, IA': '189',
'Winneshiek County, IA': '191',
'Woodbury County, IA': '193',
'Worth County, IA': '195',
'Wright County, IA': '197'},
'20': { 'Allen County, KS': '001',
'Anderson County, KS': '003',
'Atchison County, KS': '005',
'Barber County, KS': '007',
'Barton County, KS': '009',
'Bourbon County, KS': '011',
'Brown County, KS': '013',
'Butler County, KS': '015',
'Chase County, KS': '017',
'Chautauqua County, KS': '019',
'Cherokee County, KS': '021',
'Cheyenne County, KS': '023',
'Clark County, KS': '025',
'Clay County, KS': '027',
'Cloud County, KS': '029',
'Coffey County, KS': '031',
'Comanche County, KS': '033',
'Cowley County, KS': '035',
'Crawford County, KS': '037',
'Decatur County, KS': '039',
'Dickinson County, KS': '041',
'Doniphan County, KS': '043',
'Douglas County, KS': '045',
'Edwards County, KS': '047',
'Elk County, KS': '049',
'Ellis County, KS': '051',
'Ellsworth County, KS': '053',
'Finney County, KS': '055',
'Ford County, KS': '057',
'Franklin County, KS': '059',
'Geary County, KS': '061',
'Gove County, KS': '063',
'Graham County, KS': '065',
'Grant County, KS': '067',
'Gray County, KS': '069',
'Greeley County, KS': '071',
'Greenwood County, KS': '073',
'Hamilton County, KS': '075',
'Harper County, KS': '077',
'Harvey County, KS': '079',
'Haskell County, KS': '081',
'Hodgeman County, KS': '083',
'Jackson County, KS': '085',
'Jefferson County, KS': '087',
'Jewell County, KS': '089',
'Johnson County, KS': '091',
'Kearny County, KS': '093',
'Kingman County, KS': '095',
'Kiowa County, KS': '097',
'Labette County, KS': '099',
'Lane County, KS': '101',
'Leavenworth County, KS': '103',
'Lincoln County, KS': '105',
'Linn County, KS': '107',
'Logan County, KS': '109',
'Lyon County, KS': '111',
'Marion County, KS': '115',
'Marshall County, KS': '117',
'McPherson County, KS': '113',
'Meade County, KS': '119',
'Miami County, KS': '121',
'Mitchell County, KS': '123',
'Montgomery County, KS': '125',
'Morris County, KS': '127',
'Morton County, KS': '129',
'Nemaha County, KS': '131',
'Neosho County, KS': '133',
'Ness County, KS': '135',
'Norton County, KS': '137',
'Osage County, KS': '139',
'Osborne County, KS': '141',
'Ottawa County, KS': '143',
'Pawnee County, KS': '145',
'Phillips County, KS': '147',
'Pottawatomie County, KS': '149',
'Pratt County, KS': '151',
'Rawlins County, KS': '153',
'Reno County, KS': '155',
'Republic County, KS': '157',
'Rice County, KS': '159',
'Riley County, KS': '161',
'Rooks County, KS': '163',
'Rush County, KS': '165',
'Russell County, KS': '167',
'Saline County, KS': '169',
'Scott County, KS': '171',
'Sedgwick County, KS': '173',
'Seward County, KS': '175',
'Shawnee County, KS': '177',
'Sheridan County, KS': '179',
'Sherman County, KS': '181',
'Smith County, KS': '183',
'Stafford County, KS': '185',
'Stanton County, KS': '187',
'Stevens County, KS': '189',
'Sumner County, KS': '191',
'Thomas County, KS': '193',
'Trego County, KS': '195',
'Wabaunsee County, KS': '197',
'Wallace County, KS': '199',
'Washington County, KS': '201',
'Wichita County, KS': '203',
'Wilson County, KS': '205',
'Woodson County, KS': '207',
'Wyandotte County, KS': '209'},
'21': { 'Adair County, KY': '001',
'Allen County, KY': '003',
'Anderson County, KY': '005',
'Ballard County, KY': '007',
'Barren County, KY': '009',
'Bath County, KY': '011',
'Bell County, KY': '013',
'Boone County, KY': '015',
'Bourbon County, KY': '017',
'Boyd County, KY': '019',
'Boyle County, KY': '021',
'Bracken County, KY': '023',
'Breathitt County, KY': '025',
'Breckinridge County, KY': '027',
'Bullitt County, KY': '029',
'Butler County, KY': '031',
'Caldwell County, KY': '033',
'Calloway County, KY': '035',
'Campbell County, KY': '037',
'Carlisle County, KY': '039',
'Carroll County, KY': '041',
'Carter County, KY': '043',
'Casey County, KY': '045',
'Christian County, KY': '047',
'Clark County, KY': '049',
'Clay County, KY': '051',
'Clinton County, KY': '053',
'Crittenden County, KY': '055',
'Cumberland County, KY': '057',
'Daviess County, KY': '059',
'Edmonson County, KY': '061',
'Elliott County, KY': '063',
'Estill County, KY': '065',
'Fayette County, KY': '067',
'Fleming County, KY': '069',
'Floyd County, KY': '071',
'Franklin County, KY': '073',
'Fulton County, KY': '075',
'Gallatin County, KY': '077',
'Garrard County, KY': '079',
'Grant County, KY': '081',
'Graves County, KY': '083',
'Grayson County, KY': '085',
'Green County, KY': '087',
'Greenup County, KY': '089',
'Hancock County, KY': '091',
'Hardin County, KY': '093',
'Harlan County, KY': '095',
'Harrison County, KY': '097',
'Hart County, KY': '099',
'Henderson County, KY': '101',
'Henry County, KY': '103',
'Hickman County, KY': '105',
'Hopkins County, KY': '107',
'Jackson County, KY': '109',
'Jefferson County, KY': '111',
'Jessamine County, KY': '113',
'Johnson County, KY': '115',
'Kenton County, KY': '117',
'Knott County, KY': '119',
'Knox County, KY': '121',
'Larue County, KY': '123',
'Laurel County, KY': '125',
'Lawrence County, KY': '127',
'Lee County, KY': '129',
'Leslie County, KY': '131',
'Letcher County, KY': '133',
'Lewis County, KY': '135',
'Lincoln County, KY': '137',
'Livingston County, KY': '139',
'Logan County, KY': '141',
'Lyon County, KY': '143',
'Madison County, KY': '151',
'Magoffin County, KY': '153',
'Marion County, KY': '155',
'Marshall County, KY': '157',
'Martin County, KY': '159',
'Mason County, KY': '161',
'McCracken County, KY': '145',
'McCreary County, KY': '147',
'McLean County, KY': '149',
'Meade County, KY': '163',
'Menifee County, KY': '165',
'Mercer County, KY': '167',
'Metcalfe County, KY': '169',
'Monroe County, KY': '171',
'Montgomery County, KY': '173',
'Morgan County, KY': '175',
'Muhlenberg County, KY': '177',
'Nelson County, KY': '179',
'Nicholas County, KY': '181',
'Ohio County, KY': '183',
'Oldham County, KY': '185',
'Owen County, KY': '187',
'Owsley County, KY': '189',
'Pendleton County, KY': '191',
'Perry County, KY': '193',
'Pike County, KY': '195',
'Powell County, KY': '197',
'Pulaski County, KY': '199',
'Robertson County, KY': '201',
'Rockcastle County, KY': '203',
'Rowan County, KY': '205',
'Russell County, KY': '207',
'Scott County, KY': '209',
'Shelby County, KY': '211',
'Simpson County, KY': '213',
'Spencer County, KY': '215',
'Taylor County, KY': '217',
'Todd County, KY': '219',
'Trigg County, KY': '221',
'Trimble County, KY': '223',
'Union County, KY': '225',
'Warren County, KY': '227',
'Washington County, KY': '229',
'Wayne County, KY': '231',
'Webster County, KY': '233',
'Whitley County, KY': '235',
'Wolfe County, KY': '237',
'Woodford County, KY': '239'},
'22': { 'Acadia Parish, LA': '001',
'Allen Parish, LA': '003',
'Ascension Parish, LA': '005',
'Assumption Parish, LA': '007',
'Avoyelles Parish, LA': '009',
'Beauregard Parish, LA': '011',
'Bienville Parish, LA': '013',
'Bossier Parish, LA': '015',
'Caddo Parish, LA': '017',
'Calcasieu Parish, LA': '019',
'Caldwell Parish, LA': '021',
'Cameron Parish, LA': '023',
'Catahoula Parish, LA': '025',
'Claiborne Parish, LA': '027',
'Concordia Parish, LA': '029',
'De Soto Parish, LA': '031',
'East Baton Rouge Parish, LA': '033',
'East Carroll Parish, LA': '035',
'East Feliciana Parish, LA': '037',
'Evangeline Parish, LA': '039',
'Franklin Parish, LA': '041',
'Grant Parish, LA': '043',
'Iberia Parish, LA': '045',
'Iberville Parish, LA': '047',
'Jackson Parish, LA': '049',
'Jefferson Davis Parish, LA': '053',
'Jefferson Parish, LA': '051',
'La Salle Parish, LA': '059',
'Lafayette Parish, LA': '055',
'Lafourche Parish, LA': '057',
'Lincoln Parish, LA': '061',
'Livingston Parish, LA': '063',
'Madison Parish, LA': '065',
'Morehouse Parish, LA': '067',
'Natchitoches Parish, LA': '069',
'Orleans Parish, LA': '071',
'Ouachita Parish, LA': '073',
'Plaquemines Parish, LA': '075',
'Pointe Coupee Parish, LA': '077',
'Rapides Parish, LA': '079',
'Red River Parish, LA': '081',
'Richland Parish, LA': '083',
'Sabine Parish, LA': '085',
'St. Bernard Parish, LA': '087',
'St. Charles Parish, LA': '089',
'St. Helena Parish, LA': '091',
'St. James Parish, LA': '093',
'St. John the Baptist Parish, LA': '095',
'St. Landry Parish, LA': '097',
'St. Martin Parish, LA': '099',
'St. Mary Parish, LA': '101',
'St. Tammany Parish, LA': '103',
'Tangipahoa Parish, LA': '105',
'Tensas Parish, LA': '107',
'Terrebonne Parish, LA': '109',
'Union Parish, LA': '111',
'Vermilion Parish, LA': '113',
'Vernon Parish, LA': '115',
'Washington Parish, LA': '117',
'Webster Parish, LA': '119',
'West Baton Rouge Parish, LA': '121',
'West Carroll Parish, LA': '123',
'West Feliciana Parish, LA': '125',
'Winn Parish, LA': '127'},
'23': { 'Androscoggin County, ME': '001',
'Aroostook County, ME': '003',
'Cumberland County, ME': '005',
'Franklin County, ME': '007',
'Hancock County, ME': '009',
'Kennebec County, ME': '011',
'Knox County, ME': '013',
'Lincoln County, ME': '015',
'Oxford County, ME': '017',
'Penobscot County, ME': '019',
'Piscataquis County, ME': '021',
'Sagadahoc County, ME': '023',
'Somerset County, ME': '025',
'Waldo County, ME': '027',
'Washington County, ME': '029',
'York County, ME': '031'},
'24': { 'Allegany County, MD': '001',
'Anne Arundel County, MD': '003',
'Baltimore County, MD': '005',
'Baltimore city, MD': '510',
'Calvert County, MD': '009',
'Caroline County, MD': '011',
'Carroll County, MD': '013',
'Cecil County, MD': '015',
'Charles County, MD': '017',
'Dorchester County, MD': '019',
'Frederick County, MD': '021',
'Garrett County, MD': '023',
'Harford County, MD': '025',
'Howard County, MD': '027',
'Kent County, MD': '029',
'Montgomery County, MD': '031',
"Prince George's County, MD": '033',
"Queen Anne's County, MD": '035',
'Somerset County, MD': '039',
"St. Mary's County, MD": '037',
'Talbot County, MD': '041',
'Washington County, MD': '043',
'Wicomico County, MD': '045',
'Worcester County, MD': '047'},
'25': { 'Barnstable County, MA': '001',
'Berkshire County, MA': '003',
'Bristol County, MA': '005',
'Dukes County, MA': '007',
'Essex County, MA': '009',
'Franklin County, MA': '011',
'Hampden County, MA': '013',
'Hampshire County, MA': '015',
'Middlesex County, MA': '017',
'Nantucket County/town, MA': '019',
'Norfolk County, MA': '021',
'Plymouth County, MA': '023',
'Suffolk County, MA': '025',
'Worcester County, MA': '027'},
'26': { 'Alcona County, MI': '001',
'Alger County, MI': '003',
'Allegan County, MI': '005',
'Alpena County, MI': '007',
'Antrim County, MI': '009',
'Arenac County, MI': '011',
'Baraga County, MI': '013',
'Barry County, MI': '015',
'Bay County, MI': '017',
'Benzie County, MI': '019',
'Berrien County, MI': '021',
'Branch County, MI': '023',
'Calhoun County, MI': '025',
'Cass County, MI': '027',
'Charlevoix County, MI': '029',
'Cheboygan County, MI': '031',
'Chippewa County, MI': '033',
'Clare County, MI': '035',
'Clinton County, MI': '037',
'Crawford County, MI': '039',
'Delta County, MI': '041',
'Dickinson County, MI': '043',
'Eaton County, MI': '045',
'Emmet County, MI': '047',
'Genesee County, MI': '049',
'Gladwin County, MI': '051',
'Gogebic County, MI': '053',
'Grand Traverse County, MI': '055',
'Gratiot County, MI': '057',
'Hillsdale County, MI': '059',
'Houghton County, MI': '061',
'Huron County, MI': '063',
'Ingham County, MI': '065',
'Ionia County, MI': '067',
'Iosco County, MI': '069',
'Iron County, MI': '071',
'Isabella County, MI': '073',
'Jackson County, MI': '075',
'Kalamazoo County, MI': '077',
'Kalkaska County, MI': '079',
'Kent County, MI': '081',
'Keweenaw County, MI': '083',
'Lake County, MI': '085',
'Lapeer County, MI': '087',
'Leelanau County, MI': '089',
'Lenawee County, MI': '091',
'Livingston County, MI': '093',
'Luce County, MI': '095',
'Mackinac County, MI': '097',
'Macomb County, MI': '099',
'Manistee County, MI': '101',
'Marquette County, MI': '103',
'Mason County, MI': '105',
'Mecosta County, MI': '107',
'Menominee County, MI': '109',
'Midland County, MI': '111',
'Missaukee County, MI': '113',
'Monroe County, MI': '115',
'Montcalm County, MI': '117',
'Montmorency County, MI': '119',
'Muskegon County, MI': '121',
'Newaygo County, MI': '123',
'Oakland County, MI': '125',
'Oceana County, MI': '127',
'Ogemaw County, MI': '129',
'Ontonagon County, MI': '131',
'Osceola County, MI': '133',
'Oscoda County, MI': '135',
'Otsego County, MI': '137',
'Ottawa County, MI': '139',
'Presque Isle County, MI': '141',
'Roscommon County, MI': '143',
'Saginaw County, MI': '145',
'Sanilac County, MI': '151',
'Schoolcraft County, MI': '153',
'Shiawassee County, MI': '155',
'St. Clair County, MI': '147',
'St. Joseph County, MI': '149',
'Tuscola County, MI': '157',
'Van Buren County, MI': '159',
'Washtenaw County, MI': '161',
'Wayne County, MI': '163',
'Wexford County, MI': '165'},
'27': { 'Aitkin County, MN': '001',
'Anoka County, MN': '003',
'Becker County, MN': '005',
'Beltrami County, MN': '007',
'Benton County, MN': '009',
'Big Stone County, MN': '011',
'Blue Earth County, MN': '013',
'Brown County, MN': '015',
'Carlton County, MN': '017',
'Carver County, MN': '019',
'Cass County, MN': '021',
'Chippewa County, MN': '023',
'Chisago County, MN': '025',
'Clay County, MN': '027',
'Clearwater County, MN': '029',
'Cook County, MN': '031',
'Cottonwood County, MN': '033',
'Crow Wing County, MN': '035',
'Dakota County, MN': '037',
'Dodge County, MN': '039',
'Douglas County, MN': '041',
'Faribault County, MN': '043',
'Fillmore County, MN': '045',
'Freeborn County, MN': '047',
'Goodhue County, MN': '049',
'Grant County, MN': '051',
'Hennepin County, MN': '053',
'Houston County, MN': '055',
'Hubbard County, MN': '057',
'Isanti County, MN': '059',
'Itasca County, MN': '061',
'Jackson County, MN': '063',
'Kanabec County, MN': '065',
'Kandiyohi County, MN': '067',
'Kittson County, MN': '069',
'Koochiching County, MN': '071',
'Lac qui Parle County, MN': '073',
'Lake County, MN': '075',
'Lake of the Woods County, MN': '077',
'Le Sueur County, MN': '079',
'Lincoln County, MN': '081',
'Lyon County, MN': '083',
'Mahnomen County, MN': '087',
'Marshall County, MN': '089',
'Martin County, MN': '091',
'McLeod County, MN': '085',
'Meeker County, MN': '093',
'Mille Lacs County, MN': '095',
'Morrison County, MN': '097',
'Mower County, MN': '099',
'Murray County, MN': '101',
'Nicollet County, MN': '103',
'Nobles County, MN': '105',
'Norman County, MN': '107',
'Olmsted County, MN': '109',
'Otter Tail County, MN': '111',
'Pennington County, MN': '113',
'Pine County, MN': '115',
'Pipestone County, MN': '117',
'Polk County, MN': '119',
'Pope County, MN': '121',
'Ramsey County, MN': '123',
'Red Lake County, MN': '125',
'Redwood County, MN': '127',
'Renville County, MN': '129',
'Rice County, MN': '131',
'Rock County, MN': '133',
'Roseau County, MN': '135',
'Scott County, MN': '139',
'Sherburne County, MN': '141',
'Sibley County, MN': '143',
'St. Louis County, MN': '137',
'Stearns County, MN': '145',
'Steele County, MN': '147',
'Stevens County, MN': '149',
'Swift County, MN': '151',
'Todd County, MN': '153',
'Traverse County, MN': '155',
'Wabasha County, MN': '157',
'Wadena County, MN': '159',
'Waseca County, MN': '161',
'Washington County, MN': '163',
'Watonwan County, MN': '165',
'Wilkin County, MN': '167',
'Winona County, MN': '169',
'Wright County, MN': '171',
'Yellow Medicine County, MN': '173'},
'28': { 'Adams County, MS': '001',
'Alcorn County, MS': '003',
'Amite County, MS': '005',
'Attala County, MS': '007',
'Benton County, MS': '009',
'Bolivar County, MS': '011',
'Calhoun County, MS': '013',
'Carroll County, MS': '015',
'Chickasaw County, MS': '017',
'Choctaw County, MS': '019',
'Claiborne County, MS': '021',
'Clarke County, MS': '023',
'Clay County, MS': '025',
'Coahoma County, MS': '027',
'Copiah County, MS': '029',
'Covington County, MS': '031',
'DeSoto County, MS': '033',
'Forrest County, MS': '035',
'Franklin County, MS': '037',
'George County, MS': '039',
'Greene County, MS': '041',
'Grenada County, MS': '043',
'Hancock County, MS': '045',
'Harrison County, MS': '047',
'Hinds County, MS': '049',
'Holmes County, MS': '051',
'Humphreys County, MS': '053',
'Issaquena County, MS': '055',
'Itawamba County, MS': '057',
'Jackson County, MS': '059',
'Jasper County, MS': '061',
'Jefferson County, MS': '063',
'Jefferson Davis County, MS': '065',
'Jones County, MS': '067',
'Kemper County, MS': '069',
'Lafayette County, MS': '071',
'Lamar County, MS': '073',
'Lauderdale County, MS': '075',
'Lawrence County, MS': '077',
'Leake County, MS': '079',
'Lee County, MS': '081',
'Leflore County, MS': '083',
'Lincoln County, MS': '085',
'Lowndes County, MS': '087',
'Madison County, MS': '089',
'Marion County, MS': '091',
'Marshall County, MS': '093',
'Monroe County, MS': '095',
'Montgomery County, MS': '097',
'Neshoba County, MS': '099',
'Newton County, MS': '101',
'Noxubee County, MS': '103',
'Oktibbeha County, MS': '105',
'Panola County, MS': '107',
'Pearl River County, MS': '109',
'Perry County, MS': '111',
'Pike County, MS': '113',
'Pontotoc County, MS': '115',
'Prentiss County, MS': '117',
'Quitman County, MS': '119',
'Rankin County, MS': '121',
'Scott County, MS': '123',
'Sharkey County, MS': '125',
'Simpson County, MS': '127',
'Smith County, MS': '129',
'Stone County, MS': '131',
'Sunflower County, MS': '133',
'Tallahatchie County, MS': '135',
'Tate County, MS': '137',
'Tippah County, MS': '139',
'Tishomingo County, MS': '141',
'Tunica County, MS': '143',
'Union County, MS': '145',
'Walthall County, MS': '147',
'Warren County, MS': '149',
'Washington County, MS': '151',
'Wayne County, MS': '153',
'Webster County, MS': '155',
'Wilkinson County, MS': '157',
'Winston County, MS': '159',
'Yalobusha County, MS': '161',
'Yazoo County, MS': '163'},
'29': { 'Adair County, MO': '001',
'Andrew County, MO': '003',
'Atchison County, MO': '005',
'Audrain County, MO': '007',
'Barry County, MO': '009',
'Barton County, MO': '011',
'Bates County, MO': '013',
'Benton County, MO': '015',
'Bollinger County, MO': '017',
'Boone County, MO': '019',
'Buchanan County, MO': '021',
'Butler County, MO': '023',
'Caldwell County, MO': '025',
'Callaway County, MO': '027',
'Camden County, MO': '029',
'Cape Girardeau County, MO': '031',
'Carroll County, MO': '033',
'Carter County, MO': '035',
'Cass County, MO': '037',
'Cedar County, MO': '039',
'Chariton County, MO': '041',
'Christian County, MO': '043',
'Clark County, MO': '045',
'Clay County, MO': '047',
'Clinton County, MO': '049',
'Cole County, MO': '051',
'Cooper County, MO': '053',
'Crawford County, MO': '055',
'Dade County, MO': '057',
'Dallas County, MO': '059',
'Daviess County, MO': '061',
'DeKalb County, MO': '063',
'Dent County, MO': '065',
'Douglas County, MO': '067',
'Dunklin County, MO': '069',
'Franklin County, MO': '071',
'Gasconade County, MO': '073',
'Gentry County, MO': '075',
'Greene County, MO': '077',
'Grundy County, MO': '079',
'Harrison County, MO': '081',
'Henry County, MO': '083',
'Hickory County, MO': '085',
'Holt County, MO': '087',
'Howard County, MO': '089',
'Howell County, MO': '091',
'Iron County, MO': '093',
'Jackson County, MO': '095',
'Jasper County, MO': '097',
'Jefferson County, MO': '099',
'Johnson County, MO': '101',
'Knox County, MO': '103',
'Laclede County, MO': '105',
'Lafayette County, MO': '107',
'Lawrence County, MO': '109',
'Lewis County, MO': '111',
'Lincoln County, MO': '113',
'Linn County, MO': '115',
'Livingston County, MO': '117',
'Macon County, MO': '121',
'Madison County, MO': '123',
'Maries County, MO': '125',
'Marion County, MO': '127',
'McDonald County, MO': '119',
'Mercer County, MO': '129',
'Miller County, MO': '131',
'Mississippi County, MO': '133',
'Moniteau County, MO': '135',
'Monroe County, MO': '137',
'Montgomery County, MO': '139',
'Morgan County, MO': '141',
'New Madrid County, MO': '143',
'Newton County, MO': '145',
'Nodaway County, MO': '147',
'Oregon County, MO': '149',
'Osage County, MO': '151',
'Ozark County, MO': '153',
'Pemiscot County, MO': '155',
'Perry County, MO': '157',
'Pettis County, MO': '159',
'Phelps County, MO': '161',
'Pike County, MO': '163',
'Platte County, MO': '165',
'Polk County, MO': '167',
'Pulaski County, MO': '169',
'Putnam County, MO': '171',
'Ralls County, MO': '173',
'Randolph County, MO': '175',
'Ray County, MO': '177',
'Reynolds County, MO': '179',
'Ripley County, MO': '181',
'Saline County, MO': '195',
'Schuyler County, MO': '197',
'Scotland County, MO': '199',
'Scott County, MO': '201',
'Shannon County, MO': '203',
'Shelby County, MO': '205',
'St. Charles County, MO': '183',
'St. Clair County, MO': '185',
'St. Francois County, MO': '187',
'St. Louis County, MO': '189',
'St. Louis city, MO': '510',
'Ste. Genevieve County, MO': '186',
'Stoddard County, MO': '207',
'Stone County, MO': '209',
'Sullivan County, MO': '211',
'Taney County, MO': '213',
'Texas County, MO': '215',
'Vernon County, MO': '217',
'Warren County, MO': '219',
'Washington County, MO': '221',
'Wayne County, MO': '223',
'Webster County, MO': '225',
'Worth County, MO': '227',
'Wright County, MO': '229'},
'30': { 'Beaverhead County, MT': '001',
'Big Horn County, MT': '003',
'Blaine County, MT': '005',
'Broadwater County, MT': '007',
'Carbon County, MT': '009',
'Carter County, MT': '011',
'Cascade County, MT': '013',
'Chouteau County, MT': '015',
'Custer County, MT': '017',
'Daniels County, MT': '019',
'Dawson County, MT': '021',
'Deer Lodge County, MT': '023',
'Fallon County, MT': '025',
'Fergus County, MT': '027',
'Flathead County, MT': '029',
'Gallatin County, MT': '031',
'Garfield County, MT': '033',
'Glacier County, MT': '035',
'Golden Valley County, MT': '037',
'Granite County, MT': '039',
'Hill County, MT': '041',
'Jefferson County, MT': '043',
'Judith Basin County, MT': '045',
'Lake County, MT': '047',
'Lewis and Clark County, MT': '049',
'Liberty County, MT': '051',
'Lincoln County, MT': '053',
'Madison County, MT': '057',
'McCone County, MT': '055',
'Meagher County, MT': '059',
'Mineral County, MT': '061',
'Missoula County, MT': '063',
'Musselshell County, MT': '065',
'Park County, MT': '067',
'Petroleum County, MT': '069',
'Phillips County, MT': '071',
'Pondera County, MT': '073',
'Powder River County, MT': '075',
'Powell County, MT': '077',
'Prairie County, MT': '079',
'Ravalli County, MT': '081',
'Richland County, MT': '083',
'Roosevelt County, MT': '085',
'Rosebud County, MT': '087',
'Sanders County, MT': '089',
'Sheridan County, MT': '091',
'Silver Bow County, MT': '093',
'Stillwater County, MT': '095',
'Sweet Grass County, MT': '097',
'Teton County, MT': '099',
'Toole County, MT': '101',
'Treasure County, MT': '103',
'Valley County, MT': '105',
'Wheatland County, MT': '107',
'Wibaux County, MT': '109',
'Yellowstone County, MT': '111'},
'31': { 'Adams County, NE': '001',
'Antelope County, NE': '003',
'Arthur County, NE': '005',
'Banner County, NE': '007',
'Blaine County, NE': '009',
'Boone County, NE': '011',
'Box Butte County, NE': '013',
'Boyd County, NE': '015',
'Brown County, NE': '017',
'Buffalo County, NE': '019',
'Burt County, NE': '021',
'Butler County, NE': '023',
'Cass County, NE': '025',
'Cedar County, NE': '027',
'Chase County, NE': '029',
'Cherry County, NE': '031',
'Cheyenne County, NE': '033',
'Clay County, NE': '035',
'Colfax County, NE': '037',
'Cuming County, NE': '039',
'Custer County, NE': '041',
'Dakota County, NE': '043',
'Dawes County, NE': '045',
'Dawson County, NE': '047',
'Deuel County, NE': '049',
'Dixon County, NE': '051',
'Dodge County, NE': '053',
'Douglas County, NE': '055',
'Dundy County, NE': '057',
'Fillmore County, NE': '059',
'Franklin County, NE': '061',
'Frontier County, NE': '063',
'Furnas County, NE': '065',
'Gage County, NE': '067',
'Garden County, NE': '069',
'Garfield County, NE': '071',
'Gosper County, NE': '073',
'Grant County, NE': '075',
'Greeley County, NE': '077',
'Hall County, NE': '079',
'Hamilton County, NE': '081',
'Harlan County, NE': '083',
'Hayes County, NE': '085',
'Hitchcock County, NE': '087',
'Holt County, NE': '089',
'Hooker County, NE': '091',
'Howard County, NE': '093',
'Jefferson County, NE': '095',
'Johnson County, NE': '097',
'Kearney County, NE': '099',
'Keith County, NE': '101',
'Keya Paha County, NE': '103',
'Kimball County, NE': '105',
'Knox County, NE': '107',
'Lancaster County, NE': '109',
'Lincoln County, NE': '111',
'Logan County, NE': '113',
'Loup County, NE': '115',
'Madison County, NE': '119',
'McPherson County, NE': '117',
'Merrick County, NE': '121',
'Morrill County, NE': '123',
'Nance County, NE': '125',
'Nemaha County, NE': '127',
'Nuckolls County, NE': '129',
'Otoe County, NE': '131',
'Pawnee County, NE': '133',
'Perkins County, NE': '135',
'Phelps County, NE': '137',
'Pierce County, NE': '139',
'Platte County, NE': '141',
'Polk County, NE': '143',
'Red Willow County, NE': '145',
'Richardson County, NE': '147',
'Rock County, NE': '149',
'Saline County, NE': '151',
'Sarpy County, NE': '153',
'Saunders County, NE': '155',
'Scotts Bluff County, NE': '157',
'Seward County, NE': '159',
'Sheridan County, NE': '161',
'Sherman County, NE': '163',
'Sioux County, NE': '165',
'Stanton County, NE': '167',
'Thayer County, NE': '169',
'Thomas County, NE': '171',
'Thurston County, NE': '173',
'Valley County, NE': '175',
'Washington County, NE': '177',
'Wayne County, NE': '179',
'Webster County, NE': '181',
'Wheeler County, NE': '183',
'York County, NE': '185'},
'32': { 'Carson City, NV': '510',
'Churchill County, NV': '001',
'Clark County, NV': '003',
'Douglas County, NV': '005',
'Elko County, NV': '007',
'Esmeralda County, NV': '009',
'Eureka County, NV': '011',
'Humboldt County, NV': '013',
'Lander County, NV': '015',
'Lincoln County, NV': '017',
'Lyon County, NV': '019',
'Mineral County, NV': '021',
'Nye County, NV': '023',
'Pershing County, NV': '027',
'Storey County, NV': '029',
'Washoe County, NV': '031',
'White Pine County, NV': '033'},
'33': { 'Belknap County, NH': '001',
'Carroll County, NH': '003',
'Cheshire County, NH': '005',
'Coos County, NH': '007',
'Grafton County, NH': '009',
'Hillsborough County, NH': '011',
'Merrimack County, NH': '013',
'Rockingham County, NH': '015',
'Strafford County, NH': '017',
'Sullivan County, NH': '019'},
'34': { 'Atlantic County, NJ': '001',
'Bergen County, NJ': '003',
'Burlington County, NJ': '005',
'Camden County, NJ': '007',
'Cape May County, NJ': '009',
'Cumberland County, NJ': '011',
'Essex County, NJ': '013',
'Gloucester County, NJ': '015',
'Hudson County, NJ': '017',
'Hunterdon County, NJ': '019',
'Mercer County, NJ': '021',
'Middlesex County, NJ': '023',
'Monmouth County, NJ': '025',
'Morris County, NJ': '027',
'Ocean County, NJ': '029',
'Passaic County, NJ': '031',
'Salem County, NJ': '033',
'Somerset County, NJ': '035',
'Sussex County, NJ': '037',
'Union County, NJ': '039',
'Warren County, NJ': '041'},
'35': { 'Bernalillo County, NM': '001',
'Catron County, NM': '003',
'Chaves County, NM': '005',
'Cibola County, NM': '006',
'Colfax County, NM': '007',
'Curry County, NM': '009',
'DeBaca County, NM': '011',
'Dona Ana County, NM': '013',
'Eddy County, NM': '015',
'Grant County, NM': '017',
'Guadalupe County, NM': '019',
'Harding County, NM': '021',
'Hidalgo County, NM': '023',
'Lea County, NM': '025',
'Lincoln County, NM': '027',
'Los Alamos County, NM': '028',
'Luna County, NM': '029',
'McKinley County, NM': '031',
'Mora County, NM': '033',
'Otero County, NM': '035',
'Quay County, NM': '037',
'Rio Arriba County, NM': '039',
'Roosevelt County, NM': '041',
'San Juan County, NM': '045',
'San Miguel County, NM': '047',
'Sandoval County, NM': '043',
'Santa Fe County, NM': '049',
'Sierra County, NM': '051',
'Socorro County, NM': '053',
'Taos County, NM': '055',
'Torrance County, NM': '057',
'Union County, NM': '059',
'Valencia County, NM': '061'},
'36': { 'Albany County, NY': '001',
'Allegany County, NY': '003',
'Bronx County, NY': '005',
'Broome County, NY': '007',
'Cattaraugus County, NY': '009',
'Cayuga County, NY': '011',
'Chautauqua County, NY': '013',
'Chemung County, NY': '015',
'Chenango County, NY': '017',
'Clinton County, NY': '019',
'Columbia County, NY': '021',
'Cortland County, NY': '023',
'Delaware County, NY': '025',
'Dutchess County, NY': '027',
'Erie County, NY': '029',
'Essex County, NY': '031',
'Franklin County, NY': '033',
'Fulton County, NY': '035',
'Genesee County, NY': '037',
'Greene County, NY': '039',
'Hamilton County, NY': '041',
'Herkimer County, NY': '043',
'Jefferson County, NY': '045',
'Kings County, NY': '047',
'Lewis County, NY': '049',
'Livingston County, NY': '051',
'Madison County, NY': '053',
'Monroe County, NY': '055',
'Montgomery County, NY': '057',
'Nassau County, NY': '059',
'New York County, NY': '061',
'Niagara County, NY': '063',
'Oneida County, NY': '065',
'Onondaga County, NY': '067',
'Ontario County, NY': '069',
'Orange County, NY': '071',
'Orleans County, NY': '073',
'Oswego County, NY': '075',
'Otsego County, NY': '077',
'Putnam County, NY': '079',
'Queens County, NY': '081',
'Rensselaer County, NY': '083',
'Richmond County, NY': '085',
'Rockland County, NY': '087',
'Saratoga County, NY': '091',
'Schenectady County, NY': '093',
'Schoharie County, NY': '095',
'Schuyler County, NY': '097',
'Seneca County, NY': '099',
'St. Lawrence County, NY': '089',
'Steuben County, NY': '101',
'Suffolk County, NY': '103',
'Sullivan County, NY': '105',
'Tioga County, NY': '107',
'Tompkins County, NY': '109',
'Ulster County, NY': '111',
'Warren County, NY': '113',
'Washington County, NY': '115',
'Wayne County, NY': '117',
'Westchester County, NY': '119',
'Wyoming County, NY': '121',
'Yates County, NY': '123'},
'37': { 'Alamance County, NC': '001',
'Alexander County, NC': '003',
'Alleghany County, NC': '005',
'Anson County, NC': '007',
'Ashe County, NC': '009',
'Avery County, NC': '011',
'Beaufort County, NC': '013',
'Bertie County, NC': '015',
'Bladen County, NC': '017',
'Brunswick County, NC': '019',
'Buncombe County, NC': '021',
'Burke County, NC': '023',
'Cabarrus County, NC': '025',
'Caldwell County, NC': '027',
'Camden County, NC': '029',
'Carteret County, NC': '031',
'Caswell County, NC': '033',
'Catawba County, NC': '035',
'Chatham County, NC': '037',
'Cherokee County, NC': '039',
'Chowan County, NC': '041',
'Clay County, NC': '043',
'Cleveland County, NC': '045',
'Columbus County, NC': '047',
'Craven County, NC': '049',
'Cumberland County, NC': '051',
'Currituck County, NC': '053',
'Dare County, NC': '055',
'Davidson County, NC': '057',
'Davie County, NC': '059',
'Duplin County, NC': '061',
'Durham County, NC': '063',
'Edgecombe County, NC': '065',
'Forsyth County, NC': '067',
'Franklin County, NC': '069',
'Gaston County, NC': '071',
'Gates County, NC': '073',
'Graham County, NC': '075',
'Granville County, NC': '077',
'Greene County, NC': '079',
'Guilford County, NC': '081',
'Halifax County, NC': '083',
'Harnett County, NC': '085',
'Haywood County, NC': '087',
'Henderson County, NC': '089',
'Hertford County, NC': '091',
'Hoke County, NC': '093',
'Hyde County, NC': '095',
'Iredell County, NC': '097',
'Jackson County, NC': '099',
'Johnston County, NC': '101',
'Jones County, NC': '103',
'Lee County, NC': '105',
'Lenoir County, NC': '107',
'Lincoln County, NC': '109',
'Macon County, NC': '113',
'Madison County, NC': '115',
'Martin County, NC': '117',
'McDowell County, NC': '111',
'Mecklenburg County, NC': '119',
'Mitchell County, NC': '121',
'Montgomery County, NC': '123',
'Moore County, NC': '125',
'Nash County, NC': '127',
'New Hanover County, NC': '129',
'Northampton County, NC': '131',
'Onslow County, NC': '133',
'Orange County, NC': '135',
'Pamlico County, NC': '137',
'Pasquotank County, NC': '139',
'Pender County, NC': '141',
'Perquimans County, NC': '143',
'Person County, NC': '145',
'Pitt County, NC': '147',
'Polk County, NC': '149',
'Randolph County, NC': '151',
'Richmond County, NC': '153',
'Robeson County, NC': '155',
'Rockingham County, NC': '157',
'Rowan County, NC': '159',
'Rutherford County, NC': '161',
'Sampson County, NC': '163',
'Scotland County, NC': '165',
'Stanly County, NC': '167',
'Stokes County, NC': '169',
'Surry County, NC': '171',
'Swain County, NC': '173',
'Transylvania County, NC': '175',
'Tyrrell County, NC': '177',
'Union County, NC': '179',
'Vance County, NC': '181',
'Wake County, NC': '183',
'Warren County, NC': '185',
'Washington County, NC': '187',
'Watauga County, NC': '189',
'Wayne County, NC': '191',
'Wilkes County, NC': '193',
'Wilson County, NC': '195',
'Yadkin County, NC': '197',
'Yancey County, NC': '199'},
'38': { 'Adams County, ND': '001',
'Barnes County, ND': '003',
'Benson County, ND': '005',
'Billings County, ND': '007',
'Bottineau County, ND': '009',
'Bowman County, ND': '011',
'Burke County, ND': '013',
'Burleigh County, ND': '015',
'Cass County, ND': '017',
'Cavalier County, ND': '019',
'Dickey County, ND': '021',
'Divide County, ND': '023',
'Dunn County, ND': '025',
'Eddy County, ND': '027',
'Emmons County, ND': '029',
'Foster County, ND': '031',
'Golden Valley County, ND': '033',
'Grand Forks County, ND': '035',
'Grant County, ND': '037',
'Griggs County, ND': '039',
'Hettinger County, ND': '041',
'Kidder County, ND': '043',
'LaMoure County, ND': '045',
'Logan County, ND': '047',
'McHenry County, ND': '049',
'McIntosh County, ND': '051',
'McKenzie County, ND': '053',
'McLean County, ND': '055',
'Mercer County, ND': '057',
'Morton County, ND': '059',
'Mountrail County, ND': '061',
'Nelson County, ND': '063',
'Oliver County, ND': '065',
'Pembina County, ND': '067',
'Pierce County, ND': '069',
'Ramsey County, ND': '071',
'Ransom County, ND': '073',
'Renville County, ND': '075',
'Richland County, ND': '077',
'Rolette County, ND': '079',
'Sargent County, ND': '081',
'Sheridan County, ND': '083',
'Sioux County, ND': '085',
'Slope County, ND': '087',
'Stark County, ND': '089',
'Steele County, ND': '091',
'Stutsman County, ND': '093',
'Towner County, ND': '095',
'Traill County, ND': '097',
'Walsh County, ND': '099',
'Ward County, ND': '101',
'Wells County, ND': '103',
'Williams County, ND': '105'},
'39': { 'Adams County, OH': '001',
'Allen County, OH': '003',
'Ashland County, OH': '005',
'Ashtabula County, OH': '007',
'Athens County, OH': '009',
'Auglaize County, OH': '011',
'Belmont County, OH': '013',
'Brown County, OH': '015',
'Butler County, OH': '017',
'Carroll County, OH': '019',
'Champaign County, OH': '021',
'Clark County, OH': '023',
'Clermont County, OH': '025',
'Clinton County, OH': '027',
'Columbiana County, OH': '029',
'Coshocton County, OH': '031',
'Crawford County, OH': '033',
'Cuyahoga County, OH': '035',
'Darke County, OH': '037',
'Defiance County, OH': '039',
'Delaware County, OH': '041',
'Erie County, OH': '043',
'Fairfield County, OH': '045',
'Fayette County, OH': '047',
'Franklin County, OH': '049',
'Fulton County, OH': '051',
'Gallia County, OH': '053',
'Geauga County, OH': '055',
'Greene County, OH': '057',
'Guernsey County, OH': '059',
'Hamilton County, OH': '061',
'Hancock County, OH': '063',
'Hardin County, OH': '065',
'Harrison County, OH': '067',
'Henry County, OH': '069',
'Highland County, OH': '071',
'Hocking County, OH': '073',
'Holmes County, OH': '075',
'Huron County, OH': '077',
'Jackson County, OH': '079',
'Jefferson County, OH': '081',
'Knox County, OH': '083',
'Lake County, OH': '085',
'Lawrence County, OH': '087',
'Licking County, OH': '089',
'Logan County, OH': '091',
'Lorain County, OH': '093',
'Lucas County, OH': '095',
'Madison County, OH': '097',
'Mahoning County, OH': '099',
'Marion County, OH': '101',
'Medina County, OH': '103',
'Meigs County, OH': '105',
'Mercer County, OH': '107',
'Miami County, OH': '109',
'Monroe County, OH': '111',
'Montgomery County, OH': '113',
'Morgan County, OH': '115',
'Morrow County, OH': '117',
'Muskingum County, OH': '119',
'Noble County, OH': '121',
'Ottawa County, OH': '123',
'Paulding County, OH': '125',
'Perry County, OH': '127',
'Pickaway County, OH': '129',
'Pike County, OH': '131',
'Portage County, OH': '133',
'Preble County, OH': '135',
'Putnam County, OH': '137',
'Richland County, OH': '139',
'Ross County, OH': '141',
'Sandusky County, OH': '143',
'Scioto County, OH': '145',
'Seneca County, OH': '147',
'Shelby County, OH': '149',
'Stark County, OH': '151',
'Summit County, OH': '153',
'Trumbull County, OH': '155',
'Tuscarawas County, OH': '157',
'Union County, OH': '159',
'Van Wert County, OH': '161',
'Vinton County, OH': '163',
'Warren County, OH': '165',
'Washington County, OH': '167',
'Wayne County, OH': '169',
'Williams County, OH': '171',
'Wood County, OH': '173',
'Wyandot County, OH': '175'},
'40': { 'Adair County, OK': '001',
'Alfalfa County, OK': '003',
'Atoka County, OK': '005',
'Beaver County, OK': '007',
'Beckham County, OK': '009',
'Blaine County, OK': '011',
'Bryan County, OK': '013',
'Caddo County, OK': '015',
'Canadian County, OK': '017',
'Carter County, OK': '019',
'Cherokee County, OK': '021',
'Choctaw County, OK': '023',
'Cimarron County, OK': '025',
'Cleveland County, OK': '027',
'Coal County, OK': '029',
'Comanche County, OK': '031',
'Cotton County, OK': '033',
'Craig County, OK': '035',
'Creek County, OK': '037',
'Custer County, OK': '039',
'Delaware County, OK': '041',
'Dewey County, OK': '043',
'Ellis County, OK': '045',
'Garfield County, OK': '047',
'Garvin County, OK': '049',
'Grady County, OK': '051',
'Grant County, OK': '053',
'Greer County, OK': '055',
'Harmon County, OK': '057',
'Harper County, OK': '059',
'Haskell County, OK': '061',
'Hughes County, OK': '063',
'Jackson County, OK': '065',
'Jefferson County, OK': '067',
'Johnston County, OK': '069',
'Kay County, OK': '071',
'Kingfisher County, OK': '073',
'Kiowa County, OK': '075',
'Latimer County, OK': '077',
'Le Flore County, OK': '079',
'Lincoln County, OK': '081',
'Logan County, OK': '083',
'Love County, OK': '085',
'Major County, OK': '093',
'Marshall County, OK': '095',
'Mayes County, OK': '097',
'McClain County, OK': '087',
'McCurtain County, OK': '089',
'McIntosh County, OK': '091',
'Murray County, OK': '099',
'Muskogee County, OK': '101',
'Noble County, OK': '103',
'Nowata County, OK': '105',
'Okfuskee County, OK': '107',
'Oklahoma County, OK': '109',
'Okmulgee County, OK': '111',
'Osage County, OK': '113',
'Ottawa County, OK': '115',
'Pawnee County, OK': '117',
'Payne County, OK': '119',
'Pittsburg County, OK': '121',
'Pontotoc County, OK': '123',
'Pottawatomie County, OK': '125',
'Pushmataha County, OK': '127',
'Roger Mills County, OK': '129',
'Rogers County, OK': '131',
'Seminole County, OK': '133',
'Sequoyah County, OK': '135',
'Stephens County, OK': '137',
'Texas County, OK': '139',
'Tillman County, OK': '141',
'Tulsa County, OK': '143',
'Wagoner County, OK': '145',
'Washington County, OK': '147',
'Washita County, OK': '149',
'Woods County, OK': '151',
'Woodward County, OK': '153'},
'41': { 'Baker County, OR': '001',
'Benton County, OR': '003',
'Clackamas County, OR': '005',
'Clatsop County, OR': '007',
'Columbia County, OR': '009',
'Coos County, OR': '011',
'Crook County, OR': '013',
'Curry County, OR': '015',
'Deschutes County, OR': '017',
'Douglas County, OR': '019',
'Gilliam County, OR': '021',
'Grant County, OR': '023',
'Harney County, OR': '025',
'Hood River County, OR': '027',
'Jackson County, OR': '029',
'Jefferson County, OR': '031',
'Josephine County, OR': '033',
'Klamath County, OR': '035',
'Lake County, OR': '037',
'Lane County, OR': '039',
'Lincoln County, OR': '041',
'Linn County, OR': '043',
'Malheur County, OR': '045',
'Marion County, OR': '047',
'Morrow County, OR': '049',
'Multnomah County, OR': '051',
'Polk County, OR': '053',
'Sherman County, OR': '055',
'Tillamook County, OR': '057',
'Umatilla County, OR': '059',
'Union County, OR': '061',
'Wallowa County, OR': '063',
'Wasco County, OR': '065',
'Washington County, OR': '067',
'Wheeler County, OR': '069',
'Yamhill County, OR': '071'},
'42': { 'Adams County, PA': '001',
'Allegheny County, PA': '003',
'Armstrong County, PA': '005',
'Beaver County, PA': '007',
'Bedford County, PA': '009',
'Berks County, PA': '011',
'Blair County, PA': '013',
'Bradford County, PA': '015',
'Bucks County, PA': '017',
'Butler County, PA': '019',
'Cambria County, PA': '021',
'Cameron County, PA': '023',
'Carbon County, PA': '025',
'Centre County, PA': '027',
'Chester County, PA': '029',
'Clarion County, PA': '031',
'Clearfield County, PA': '033',
'Clinton County, PA': '035',
'Columbia County, PA': '037',
'Crawford County, PA': '039',
'Cumberland County, PA': '041',
'Dauphin County, PA': '043',
'Delaware County, PA': '045',
'Elk County, PA': '047',
'Erie County, PA': '049',
'Fayette County, PA': '051',
'Forest County, PA': '053',
'Franklin County, PA': '055',
'Fulton County, PA': '057',
'Greene County, PA': '059',
'Huntingdon County, PA': '061',
'Indiana County, PA': '063',
'Jefferson County, PA': '065',
'Juniata County, PA': '067',
'Lackawanna County, PA': '069',
'Lancaster County, PA': '071',
'Lawrence County, PA': '073',
'Lebanon County, PA': '075',
'Lehigh County, PA': '077',
'Luzerne County, PA': '079',
'Lycoming County, PA': '081',
'McKean County, PA': '083',
'Mercer County, PA': '085',
'Mifflin County, PA': '087',
'Monroe County, PA': '089',
'Montgomery County, PA': '091',
'Montour County, PA': '093',
'Northampton County, PA': '095',
'Northumberland County, PA': '097',
'Perry County, PA': '099',
'Philadelphia County/city, PA': '101',
'Pike County, PA': '103',
'Potter County, PA': '105',
'Schuylkill County, PA': '107',
'Snyder County, PA': '109',
'Somerset County, PA': '111',
'Sullivan County, PA': '113',
'Susquehanna County, PA': '115',
'Tioga County, PA': '117',
'Union County, PA': '119',
'Venango County, PA': '121',
'Warren County, PA': '123',
'Washington County, PA': '125',
'Wayne County, PA': '127',
'Westmoreland County, PA': '129',
'Wyoming County, PA': '131',
'York County, PA': '133'},
'44': { 'Bristol County, RI': '001',
'Kent County, RI': '003',
'Newport County, RI': '005',
'Providence County, RI': '007',
'Washington County, RI': '009'},
'45': { 'Abbeville County, SC': '001',
'Aiken County, SC': '003',
'Allendale County, SC': '005',
'Anderson County, SC': '007',
'Bamberg County, SC': '009',
'Barnwell County, SC': '011',
'Beaufort County, SC': '013',
'Berkeley County, SC': '015',
'Calhoun County, SC': '017',
'Charleston County, SC': '019',
'Cherokee County, SC': '021',
'Chester County, SC': '023',
'Chesterfield County, SC': '025',
'Clarendon County, SC': '027',
'Colleton County, SC': '029',
'Darlington County, SC': '031',
'Dillon County, SC': '033',
'Dorchester County, SC': '035',
'Edgefield County, SC': '037',
'Fairfield County, SC': '039',
'Florence County, SC': '041',
'Georgetown County, SC': '043',
'Greenville County, SC': '045',
'Greenwood County, SC': '047',
'Hampton County, SC': '049',
'Horry County, SC': '051',
'Jasper County, SC': '053',
'Kershaw County, SC': '055',
'Lancaster County, SC': '057',
'Laurens County, SC': '059',
'Lee County, SC': '061',
'Lexington County, SC': '063',
'Marion County, SC': '067',
'Marlboro County, SC': '069',
'McCormick County, SC': '065',
'Newberry County, SC': '071',
'Oconee County, SC': '073',
'Orangeburg County, SC': '075',
'Pickens County, SC': '077',
'Richland County, SC': '079',
'Saluda County, SC': '081',
'Spartanburg County, SC': '083',
'Sumter County, SC': '085',
'Union County, SC': '087',
'Williamsburg County, SC': '089',
'York County, SC': '091'},
'46': { 'Aurora County, SD': '003',
'Beadle County, SD': '005',
'Bennett County, SD': '007',
'Bon Homme County, SD': '009',
'Brookings County, SD': '011',
'Brown County, SD': '013',
'Brule County, SD': '015',
'Buffalo County, SD': '017',
'Butte County, SD': '019',
'Campbell County, SD': '021',
'Charles Mix County, SD': '023',
'Clark County, SD': '025',
'Clay County, SD': '027',
'Codington County, SD': '029',
'Corson County, SD': '031',
'Custer County, SD': '033',
'Davison County, SD': '035',
'Day County, SD': '037',
'Deuel County, SD': '039',
'Dewey County, SD': '041',
'Douglas County, SD': '043',
'Edmunds County, SD': '045',
'Fall River County, SD': '047',
'Faulk County, SD': '049',
'Grant County, SD': '051',
'Gregory County, SD': '053',
'Haakon County, SD': '055',
'Hamlin County, SD': '057',
'Hand County, SD': '059',
'Hanson County, SD': '061',
'Harding County, SD': '063',
'Hughes County, SD': '065',
'Hutchinson County, SD': '067',
'Hyde County, SD': '069',
'Jackson County, SD': '071',
'Jerauld County, SD': '073',
'Jones County, SD': '075',
'Kingsbury County, SD': '077',
'Lake County, SD': '079',
'Lawrence County, SD': '081',
'Lincoln County, SD': '083',
'Lyman County, SD': '085',
'Marshall County, SD': '091',
'McCook County, SD': '087',
'McPherson County, SD': '089',
'Meade County, SD': '093',
'Mellette County, SD': '095',
'Miner County, SD': '097',
'Minnehaha County, SD': '099',
'Moody County, SD': '101',
'Pennington County, SD': '103',
'Perkins County, SD': '105',
'Potter County, SD': '107',
'Roberts County, SD': '109',
'Sanborn County, SD': '111',
'Shannon County, SD': '113',
'Spink County, SD': '115',
'Stanley County, SD': '117',
'Sully County, SD': '119',
'Todd County, SD': '121',
'Tripp County, SD': '123',
'Turner County, SD': '125',
'Union County, SD': '127',
'Walworth County, SD': '129',
'Yankton County, SD': '135',
'Ziebach County, SD': '137'},
'47': { 'Anderson County, TN': '001',
'Bedford County, TN': '003',
'Benton County, TN': '005',
'Bledsoe County, TN': '007',
'Blount County, TN': '009',
'Bradley County, TN': '011',
'Campbell County, TN': '013',
'Cannon County, TN': '015',
'Carroll County, TN': '017',
'Carter County, TN': '019',
'Cheatham County, TN': '021',
'Chester County, TN': '023',
'Claiborne County, TN': '025',
'Clay County, TN': '027',
'Cocke County, TN': '029',
'Coffee County, TN': '031',
'Crockett County, TN': '033',
'Cumberland County, TN': '035',
'Davidson County, TN': '037',
'DeKalb County, TN': '041',
'Decatur County, TN': '039',
'Dickson County, TN': '043',
'Dyer County, TN': '045',
'Fayette County, TN': '047',
'Fentress County, TN': '049',
'Franklin County, TN': '051',
'Gibson County, TN': '053',
'Giles County, TN': '055',
'Grainger County, TN': '057',
'Greene County, TN': '059',
'Grundy County, TN': '061',
'Hamblen County, TN': '063',
'Hamilton County, TN': '065',
'Hancock County, TN': '067',
'Hardeman County, TN': '069',
'Hardin County, TN': '071',
'Hawkins County, TN': '073',
'Haywood County, TN': '075',
'Henderson County, TN': '077',
'Henry County, TN': '079',
'Hickman County, TN': '081',
'Houston County, TN': '083',
'Humphreys County, TN': '085',
'Jackson County, TN': '087',
'Jefferson County, TN': '089',
'Johnson County, TN': '091',
'Knox County, TN': '093',
'Lake County, TN': '095',
'Lauderdale County, TN': '097',
'Lawrence County, TN': '099',
'Lewis County, TN': '101',
'Lincoln County, TN': '103',
'Loudon County, TN': '105',
'Macon County, TN': '111',
'Madison County, TN': '113',
'Marion County, TN': '115',
'Marshall County, TN': '117',
'Maury County, TN': '119',
'McMinn County, TN': '107',
'McNairy County, TN': '109',
'Meigs County, TN': '121',
'Monroe County, TN': '123',
'Montgomery County, TN': '125',
'Moore County, TN': '127',
'Morgan County, TN': '129',
'Obion County, TN': '131',
'Overton County, TN': '133',
'Perry County, TN': '135',
'Pickett County, TN': '137',
'Polk County, TN': '139',
'Putnam County, TN': '141',
'Rhea County, TN': '143',
'Roane County, TN': '145',
'Robertson County, TN': '147',
'Rutherford County, TN': '149',
'Scott County, TN': '151',
'Sequatchie County, TN': '153',
'Sevier County, TN': '155',
'Shelby County, TN': '157',
'Smith County, TN': '159',
'Stewart County, TN': '161',
'Sullivan County, TN': '163',
'Sumner County, TN': '165',
'Tipton County, TN': '167',
'Trousdale County, TN': '169',
'Unicoi County, TN': '171',
'Union County, TN': '173',
'Van Buren County, TN': '175',
'Warren County, TN': '177',
'Washington County, TN': '179',
'Wayne County, TN': '181',
'Weakley County, TN': '183',
'White County, TN': '185',
'Williamson County, TN': '187',
'Wilson County, TN': '189'},
'48': { 'Anderson County, TX': '001',
'Andrews County, TX': '003',
'Angelina County, TX': '005',
'Aransas County, TX': '007',
'Archer County, TX': '009',
'Armstrong County, TX': '011',
'Atascosa County, TX': '013',
'Austin County, TX': '015',
'Bailey County, TX': '017',
'Bandera County, TX': '019',
'Bastrop County, TX': '021',
'Baylor County, TX': '023',
'Bee County, TX': '025',
'Bell County, TX': '027',
'Bexar County, TX': '029',
'Blanco County, TX': '031',
'Borden County, TX': '033',
'Bosque County, TX': '035',
'Bowie County, TX': '037',
'Brazoria County, TX': '039',
'Brazos County, TX': '041',
'Brewster County, TX': '043',
'Briscoe County, TX': '045',
'Brooks County, TX': '047',
'Brown County, TX': '049',
'Burleson County, TX': '051',
'Burnet County, TX': '053',
'Caldwell County, TX': '055',
'Calhoun County, TX': '057',
'Callahan County, TX': '059',
'Cameron County, TX': '061',
'Camp County, TX': '063',
'Carson County, TX': '065',
'Cass County, TX': '067',
'Castro County, TX': '069',
'Chambers County, TX': '071',
'Cherokee County, TX': '073',
'Childress County, TX': '075',
'Clay County, TX': '077',
'Cochran County, TX': '079',
'Coke County, TX': '081',
'Coleman County, TX': '083',
'Collin County, TX': '085',
'Collingsworth County, TX': '087',
'Colorado County, TX': '089',
'Comal County, TX': '091',
'Comanche County, TX': '093',
'Concho County, TX': '095',
'Cooke County, TX': '097',
'Coryell County, TX': '099',
'Cottle County, TX': '101',
'Crane County, TX': '103',
'Crockett County, TX': '105',
'Crosby County, TX': '107',
'Culberson County, TX': '109',
'Dallam County, TX': '111',
'Dallas County, TX': '113',
'Dawson County, TX': '115',
'DeWitt County, TX': '123',
'Deaf Smith County, TX': '117',
'Delta County, TX': '119',
'Denton County, TX': '121',
'Dickens County, TX': '125',
'Dimmit County, TX': '127',
'Donley County, TX': '129',
'Duval County, TX': '131',
'Eastland County, TX': '133',
'Ector County, TX': '135',
'Edwards County, TX': '137',
'El Paso County, TX': '141',
'Ellis County, TX': '139',
'Erath County, TX': '143',
'Falls County, TX': '145',
'Fannin County, TX': '147',
'Fayette County, TX': '149',
'Fisher County, TX': '151',
'Floyd County, TX': '153',
'Foard County, TX': '155',
'Fort Bend County, TX': '157',
'Franklin County, TX': '159',
'Freestone County, TX': '161',
'Frio County, TX': '163',
'Gaines County, TX': '165',
'Galveston County, TX': '167',
'Garza County, TX': '169',
'Gillespie County, TX': '171',
'Glasscock County, TX': '173',
'Goliad County, TX': '175',
'Gonzales County, TX': '177',
'Gray County, TX': '179',
'Grayson County, TX': '181',
'Gregg County, TX': '183',
'Grimes County, TX': '185',
'Guadalupe County, TX': '187',
'Hale County, TX': '189',
'Hall County, TX': '191',
'Hamilton County, TX': '193',
'Hansford County, TX': '195',
'Hardeman County, TX': '197',
'Hardin County, TX': '199',
'Harris County, TX': '201',
'Harrison County, TX': '203',
'Hartley County, TX': '205',
'Haskell County, TX': '207',
'Hays County, TX': '209',
'Hemphill County, TX': '211',
'Henderson County, TX': '213',
'Hidalgo County, TX': '215',
'Hill County, TX': '217',
'Hockley County, TX': '219',
'Hood County, TX': '221',
'Hopkins County, TX': '223',
'Houston County, TX': '225',
'Howard County, TX': '227',
'Hudspeth County, TX': '229',
'Hunt County, TX': '231',
'Hutchinson County, TX': '233',
'Irion County, TX': '235',
'Jack County, TX': '237',
'Jackson County, TX': '239',
'Jasper County, TX': '241',
'Jeff Davis County, TX': '243',
'Jefferson County, TX': '245',
'Jim Hogg County, TX': '247',
'Jim Wells County, TX': '249',
'Johnson County, TX': '251',
'Jones County, TX': '253',
'Karnes County, TX': '255',
'Kaufman County, TX': '257',
'Kendall County, TX': '259',
'Kenedy County, TX': '261',
'Kent County, TX': '263',
'Kerr County, TX': '265',
'Kimble County, TX': '267',
'King County, TX': '269',
'Kinney County, TX': '271',
'Kleberg County, TX': '273',
'Knox County, TX': '275',
'La Salle County, TX': '283',
'Lamar County, TX': '277',
'Lamb County, TX': '279',
'Lampasas County, TX': '281',
'Lavaca County, TX': '285',
'Lee County, TX': '287',
'Leon County, TX': '289',
'Liberty County, TX': '291',
'Limestone County, TX': '293',
'Lipscomb County, TX': '295',
'Live Oak County, TX': '297',
'Llano County, TX': '299',
'Loving County, TX': '301',
'Lubbock County, TX': '303',
'Lynn County, TX': '305',
'Madison County, TX': '313',
'Marion County, TX': '315',
'Martin County, TX': '317',
'Mason County, TX': '319',
'Matagorda County, TX': '321',
'Maverick County, TX': '323',
'McCulloch County, TX': '307',
'McLennan County, TX': '309',
'McMullen County, TX': '311',
'Medina County, TX': '325',
'Menard County, TX': '327',
'Midland County, TX': '329',
'Milam County, TX': '331',
'Mills County, TX': '333',
'Mitchell County, TX': '335',
'Montague County, TX': '337',
'Montgomery County, TX': '339',
'Moore County, TX': '341',
'Morris County, TX': '343',
'Motley County, TX': '345',
'Nacogdoches County, TX': '347',
'Navarro County, TX': '349',
'Newton County, TX': '351',
'Nolan County, TX': '353',
'Nueces County, TX': '355',
'Ochiltree County, TX': '357',
'Oldham County, TX': '359',
'Orange County, TX': '361',
'Palo Pinto County, TX': '363',
'Panola County, TX': '365',
'Parker County, TX': '367',
'Parmer County, TX': '369',
'Pecos County, TX': '371',
'Polk County, TX': '373',
'Potter County, TX': '375',
'Presidio County, TX': '377',
'Rains County, TX': '379',
'Randall County, TX': '381',
'Reagan County, TX': '383',
'Real County, TX': '385',
'Red River County, TX': '387',
'Reeves County, TX': '389',
'Refugio County, TX': '391',
'Roberts County, TX': '393',
'Robertson County, TX': '395',
'Rockwall County, TX': '397',
'Runnels County, TX': '399',
'Rusk County, TX': '401',
'Sabine County, TX': '403',
'San Augustine County, TX': '405',
'San Jacinto County, TX': '407',
'San Patricio County, TX': '409',
'San Saba County, TX': '411',
'Schleicher County, TX': '413',
'Scurry County, TX': '415',
'Shackelford County, TX': '417',
'Shelby County, TX': '419',
'Sherman County, TX': '421',
'Smith County, TX': '423',
'Somervell County, TX': '425',
'Starr County, TX': '427',
'Stephens County, TX': '429',
'Sterling County, TX': '431',
'Stonewall County, TX': '433',
'Sutton County, TX': '435',
'Swisher County, TX': '437',
'Tarrant County, TX': '439',
'Taylor County, TX': '441',
'Terrell County, TX': '443',
'Terry County, TX': '445',
'Throckmorton County, TX': '447',
'Titus County, TX': '449',
'Tom Green County, TX': '451',
'Travis County, TX': '453',
'Trinity County, TX': '455',
'Tyler County, TX': '457',
'Upshur County, TX': '459',
'Upton County, TX': '461',
'Uvalde County, TX': '463',
'Val Verde County, TX': '465',
'Van Zandt County, TX': '467',
'Victoria County, TX': '469',
'Walker County, TX': '471',
'Waller County, TX': '473',
'Ward County, TX': '475',
'Washington County, TX': '477',
'Webb County, TX': '479',
'Wharton County, TX': '481',
'Wheeler County, TX': '483',
'Wichita County, TX': '485',
'Wilbarger County, TX': '487',
'Willacy County, TX': '489',
'Williamson County, TX': '491',
'Wilson County, TX': '493',
'Winkler County, TX': '495',
'Wise County, TX': '497',
'Wood County, TX': '499',
'Yoakum County, TX': '501',
'Young County, TX': '503',
'Zapata County, TX': '505',
'Zavala County, TX': '507'},
'49': { 'Beaver County, UT': '001',
'Box Elder County, UT': '003',
'Cache County, UT': '005',
'Carbon County, UT': '007',
'Daggett County, UT': '009',
'Davis County, UT': '011',
'Duchesne County, UT': '013',
'Emery County, UT': '015',
'Garfield County, UT': '017',
'Grand County, UT': '019',
'Iron County, UT': '021',
'Juab County, UT': '023',
'Kane County, UT': '025',
'Millard County, UT': '027',
'Morgan County, UT': '029',
'Piute County, UT': '031',
'Rich County, UT': '033',
'Salt Lake County, UT': '035',
'San Juan County, UT': '037',
'Sanpete County, UT': '039',
'Sevier County, UT': '041',
'Summit County, UT': '043',
'Tooele County, UT': '045',
'Uintah County, UT': '047',
'Utah County, UT': '049',
'Wasatch County, UT': '051',
'Washington County, UT': '053',
'Wayne County, UT': '055',
'Weber County, UT': '057'},
'50': { 'Addison County, VT': '001',
'Bennington County, VT': '003',
'Caledonia County, VT': '005',
'Chittenden County, VT': '007',
'Essex County, VT': '009',
'Franklin County, VT': '011',
'Grand Isle County, VT': '013',
'Lamoille County, VT': '015',
'Orange County, VT': '017',
'Orleans County, VT': '019',
'Rutland County, VT': '021',
'Washington County, VT': '023',
'Windham County, VT': '025',
'Windsor County, VT': '027'},
'51': { 'Accomack County, VA': '001',
'Albemarle County, VA': '003',
'Alexandria city, VA': '510',
'Alleghany County, VA': '005',
'Amelia County, VA': '007',
'Amherst County, VA': '009',
'Appomattox County, VA': '011',
'Arlington County, VA': '013',
'Augusta County, VA': '015',
'Bath County, VA': '017',
'Bedford County, VA': '019',
'Bedford city, VA': '515',
'Bland County, VA': '021',
'Botetourt County, VA': '023',
'Bristol city, VA': '520',
'Brunswick County, VA': '025',
'Buchanan County, VA': '027',
'Buckingham County, VA': '029',
'Buena Vista city, VA': '530',
'Campbell County, VA': '031',
'Caroline County, VA': '033',
'Carroll County, VA': '035',
'Charles City County, VA': '036',
'Charlotte County, VA': '037',
'Charlottesville city, VA': '540',
'Chesapeake city, VA': '550',
'Chesterfield County, VA': '041',
'Clarke County, VA': '043',
'Colonial Heights city, VA': '570',
'Covington city, VA': '580',
'Craig County, VA': '045',
'Culpeper County, VA': '047',
'Cumberland County, VA': '049',
'Danville city, VA': '590',
'Dickenson County, VA': '051',
'Dinwiddie County, VA': '053',
'Emporia city, VA': '595',
'Essex County, VA': '057',
'Fairfax County, VA': '059',
'Fairfax city, VA': '600',
'Falls Church city, VA': '610',
'Fauquier County, VA': '061',
'Floyd County, VA': '063',
'Fluvanna County, VA': '065',
'Franklin County, VA': '067',
'Franklin city, VA': '620',
'Frederick County, VA': '069',
'Fredericksburg city, VA': '630',
'Galax city, VA': '640',
'Giles County, VA': '071',
'Gloucester County, VA': '073',
'Goochland County, VA': '075',
'Grayson County, VA': '077',
'Greene County, VA': '079',
'Greensville County, VA': '081',
'Halifax County, VA': '083',
'Hampton city, VA': '650',
'Hanover County, VA': '085',
'Harrisonburg city, VA': '660',
'Henrico County, VA': '087',
'Henry County, VA': '089',
'Highland County, VA': '091',
'Hopewell city, VA': '670',
'Isle of Wight County, VA': '093',
'James City County, VA': '095',
'King George County, VA': '099',
'King William County, VA': '101',
'King and Queen County, VA': '097',
'Lancaster County, VA': '103',
'Lee County, VA': '105',
'Lexington city, VA': '678',
'Loudoun County, VA': '107',
'Louisa County, VA': '109',
'Lunenburg County, VA': '111',
'Lynchburg city, VA': '680',
'Madison County, VA': '113',
'Manassas Park city, VA': '685',
'Manassas city, VA': '683',
'Martinsville city, VA': '690',
'Mathews County, VA': '115',
'Mecklenburg County, VA': '117',
'Middlesex County, VA': '119',
'Montgomery County, VA': '121',
'Nelson County, VA': '125',
'New Kent County, VA': '127',
'Newport News city, VA': '700',
'Norfolk city, VA': '710',
'Northampton County, VA': '131',
'Northumberland County, VA': '133',
'Norton city, VA': '720',
'Nottoway County, VA': '135',
'Orange County, VA': '137',
'Page County, VA': '139',
'Patrick County, VA': '141',
'Petersburg city, VA': '730',
'Pittsylvania County, VA': '143',
'Poquoson city, VA': '735',
'Portsmouth city, VA': '740',
'Powhatan County, VA': '145',
'Prince Edward County, VA': '147',
'Prince George County, VA': '149',
'Prince William County, VA': '153',
'Pulaski County, VA': '155',
'Radford city, VA': '750',
'Rappahannock County, VA': '157',
'Richmond County, VA': '159',
'Richmond city, VA': '760',
'Roanoke County, VA': '161',
'Roanoke city, VA': '770',
'Rockbridge County, VA': '163',
'Rockingham County, VA': '165',
'Russell County, VA': '167',
'Salem city, VA': '775',
'Scott County, VA': '169',
'Shenandoah County, VA': '171',
'Smyth County, VA': '173',
'Southampton County, VA': '175',
'Spotsylvania County, VA': '177',
'Stafford County, VA': '179',
'Staunton city, VA': '790',
'Suffolk city, VA': '800',
'Surry County, VA': '181',
'Sussex County, VA': '183',
'Tazewell County, VA': '185',
'Virginia Beach city, VA': '810',
'Warren County, VA': '187',
'Washington County, VA': '191',
'Waynesboro city, VA': '820',
'Westmoreland County, VA': '193',
'Williamsburg city, VA': '830',
'Winchester city, VA': '840',
'Wise County, VA': '195',
'Wythe County, VA': '197',
'York County, VA': '199'},
'53': { 'Adams County, WA': '001',
'Asotin County, WA': '003',
'Benton County, WA': '005',
'Chelan County, WA': '007',
'Clallam County, WA': '009',
'Clark County, WA': '011',
'Columbia County, WA': '013',
'Cowlitz County, WA': '015',
'Douglas County, WA': '017',
'Ferry County, WA': '019',
'Franklin County, WA': '021',
'Garfield County, WA': '023',
'Grant County, WA': '025',
'Grays Harbor County, WA': '027',
'Island County, WA': '029',
'Jefferson County, WA': '031',
'King County, WA': '033',
'Kitsap County, WA': '035',
'Kittitas County, WA': '037',
'Klickitat County, WA': '039',
'Lewis County, WA': '041',
'Lincoln County, WA': '043',
'Mason County, WA': '045',
'Okanogan County, WA': '047',
'Pacific County, WA': '049',
'Pend Oreille County, WA': '051',
'Pierce County, WA': '053',
'San Juan County, WA': '055',
'Skagit County, WA': '057',
'Skamania County, WA': '059',
'Snohomish County, WA': '061',
'Spokane County, WA': '063',
'Stevens County, WA': '065',
'Thurston County, WA': '067',
'Wahkiakum County, WA': '069',
'Walla Walla County, WA': '071',
'Whatcom County, WA': '073',
'Whitman County, WA': '075',
'Yakima County, WA': '077'},
'54': { 'Barbour County, WV': '001',
'Berkeley County, WV': '003',
'Boone County, WV': '005',
'Braxton County, WV': '007',
'Brooke County, WV': '009',
'Cabell County, WV': '011',
'Calhoun County, WV': '013',
'Clay County, WV': '015',
'Doddridge County, WV': '017',
'Fayette County, WV': '019',
'Gilmer County, WV': '021',
'Grant County, WV': '023',
'Greenbrier County, WV': '025',
'Hampshire County, WV': '027',
'Hancock County, WV': '029',
'Hardy County, WV': '031',
'Harrison County, WV': '033',
'Jackson County, WV': '035',
'Jefferson County, WV': '037',
'Kanawha County, WV': '039',
'Lewis County, WV': '041',
'Lincoln County, WV': '043',
'Logan County, WV': '045',
'Marion County, WV': '049',
'Marshall County, WV': '051',
'Mason County, WV': '053',
'McDowell County, WV': '047',
'Mercer County, WV': '055',
'Mineral County, WV': '057',
'Mingo County, WV': '059',
'Monongalia County, WV': '061',
'Monroe County, WV': '063',
'Morgan County, WV': '065',
'Nicholas County, WV': '067',
'Ohio County, WV': '069',
'Pendleton County, WV': '071',
'Pleasants County, WV': '073',
'Pocahontas County, WV': '075',
'Preston County, WV': '077',
'Putnam County, WV': '079',
'Raleigh County, WV': '081',
'Randolph County, WV': '083',
'Ritchie County, WV': '085',
'Roane County, WV': '087',
'Summers County, WV': '089',
'Taylor County, WV': '091',
'Tucker County, WV': '093',
'Tyler County, WV': '095',
'Upshur County, WV': '097',
'Wayne County, WV': '099',
'Webster County, WV': '101',
'Wetzel County, WV': '103',
'Wirt County, WV': '105',
'Wood County, WV': '107',
'Wyoming County, WV': '109'},
'55': { 'Adams County, WI': '001',
'Ashland County, WI': '003',
'Barron County, WI': '005',
'Bayfield County, WI': '007',
'Brown County, WI': '009',
'Buffalo County, WI': '011',
'Burnett County, WI': '013',
'Calumet County, WI': '015',
'Chippewa County, WI': '017',
'Clark County, WI': '019',
'Columbia County, WI': '021',
'Crawford County, WI': '023',
'Dane County, WI': '025',
'Dodge County, WI': '027',
'Door County, WI': '029',
'Douglas County, WI': '031',
'Dunn County, WI': '033',
'Eau Claire County, WI': '035',
'Florence County, WI': '037',
'Fond du Lac County, WI': '039',
'Forest County, WI': '041',
'Grant County, WI': '043',
'Green County, WI': '045',
'Green Lake County, WI': '047',
'Iowa County, WI': '049',
'Iron County, WI': '051',
'Jackson County, WI': '053',
'Jefferson County, WI': '055',
'Juneau County, WI': '057',
'Kenosha County, WI': '059',
'Kewaunee County, WI': '061',
'La Crosse County, WI': '063',
'Lafayette County, WI': '065',
'Langlade County, WI': '067',
'Lincoln County, WI': '069',
'Manitowoc County, WI': '071',
'Marathon County, WI': '073',
'Marinette County, WI': '075',
'Marquette County, WI': '077',
'Menominee County, WI': '078',
'Milwaukee County, WI': '079',
'Monroe County, WI': '081',
'Oconto County, WI': '083',
'Oneida County, WI': '085',
'Outagamie County, WI': '087',
'Ozaukee County, WI': '089',
'Pepin County, WI': '091',
'Pierce County, WI': '093',
'Polk County, WI': '095',
'Portage County, WI': '097',
'Price County, WI': '099',
'Racine County, WI': '101',
'Richland County, WI': '103',
'Rock County, WI': '105',
'Rusk County, WI': '107',
'Sauk County, WI': '111',
'Sawyer County, WI': '113',
'Shawano County, WI': '115',
'Sheboygan County, WI': '117',
'St. Croix County, WI': '109',
'Taylor County, WI': '119',
'Trempealeau County, WI': '121',
'Vernon County, WI': '123',
'Vilas County, WI': '125',
'Walworth County, WI': '127',
'Washburn County, WI': '129',
'Washington County, WI': '131',
'Waukesha County, WI': '133',
'Waupaca County, WI': '135',
'Waushara County, WI': '137',
'Winnebago County, WI': '139',
'Wood County, WI': '141'},
'56': { 'Albany County, WY': '001',
'Big Horn County, WY': '003',
'Campbell County, WY': '005',
'Carbon County, WY': '007',
'Converse County, WY': '009',
'Crook County, WY': '011',
'Fremont County, WY': '013',
'Goshen County, WY': '015',
'Hot Springs County, WY': '017',
'Johnson County, WY': '019',
'Laramie County, WY': '021',
'Lincoln County, WY': '023',
'Natrona County, WY': '025',
'Niobrara County, WY': '027',
'Park County, WY': '029',
'Platte County, WY': '031',
'Sheridan County, WY': '033',
'Sublette County, WY': '035',
'Sweetwater County, WY': '037',
'Teton County, WY': '039',
'Uinta County, WY': '041',
'Washakie County, WY': '043',
'Weston County, WY': '045'},
'72': { 'Adjuntas Municipio, PR': '001',
'Aguada Municipio, PR': '003',
'Aguadilla Municipio, PR': '005',
'Aguas Buenas Municipio, PR': '007',
'Aibonito Municipio, PR': '009',
'Anasco Municipio, PR': '011',
'Arecibo Municipio, PR': '013',
'Arroyo Municipio, PR': '015',
'Barceloneta Municipio, PR': '017',
'Barranquitas Municipio, PR': '019',
'Bayamon Municipio, PR': '021',
'Cabo Rojo Municipio, PR': '023',
'Caguas Municipio, PR': '025',
'Camuy Municipio, PR': '027',
'Canovanas Municipio, PR': '029',
'Carolina Municipio, PR': '031',
'Catano Municipio, PR': '033',
'Cayey Municipio, PR': '035',
'Ceiba Municipio, PR': '037',
'Ciales Municipio, PR': '039',
'Cidra Municipio, PR': '041',
'Coamo Municipio, PR': '043',
'Comerio Municipio, PR': '045',
'Corozal Municipio, PR': '047',
'Culebra Municipio, PR': '049',
'Dorado Municipio, PR': '051',
'Fajardo Municipio, PR': '053',
'Florida Municipio, PR': '054',
'Guanica Municipio, PR': '055',
'Guayama Municipio, PR': '057',
'Guayanilla Municipio, PR': '059',
'Guaynabo Municipio, PR': '061',
'Gurabo Municipio, PR': '063',
'Hatillo Municipio, PR': '065',
'Hormigueros Municipio, PR': '067',
'Humacao Municipio, PR': '069',
'Isabela Municipio, PR': '071',
'Jayuya Municipio, PR': '073',
'Juana Diaz Municipio, PR': '075',
'Juncos Municipio, PR': '077',
'Lajas Municipio, PR': '079',
'Lares Municipio, PR': '081',
'Las Marias Municipio, PR': '083',
'Las Piedras Municipio, PR': '085',
'Loiza Municipio, PR': '087',
'Luquillo Municipio, PR': '089',
'Manati Municipio, PR': '091',
'Maricao Municipio, PR': '093',
'Maunabo Municipio, PR': '095',
'Mayaguez Municipio, PR': '097',
'Moca Municipio, PR': '099',
'Morovis Municipio, PR': '101',
'Naguabo Municipio, PR': '103',
'Naranjito Municipio, PR': '105',
'Orocovis Municipio, PR': '107',
'Patillas Municipio, PR': '109',
'Penuelas Municipio, PR': '111',
'Ponce Municipio, PR': '113',
'Quebradillas Municipio, PR': '115',
'Rincon Municipio, PR': '117',
'Rio Grande Municipio, PR': '119',
'Sabana Grande Municipio, PR': '121',
'Salinas Municipio, PR': '123',
'San German Municipio, PR': '125',
'San Juan Municipio, PR': '127',
'San Lorenzo Municipio, PR': '129',
'San Sebastian Municipio, PR': '131',
'Santa Isabel Municipio, PR': '133',
'Toa Alta Municipio, PR': '135',
'Toa Baja Municipio, PR': '137',
'Trujillo Alto Municipio, PR': '139',
'Utuado Municipio, PR': '141',
'Vega Alta Municipio, PR': '143',
'Vega Baja Municipio, PR': '145',
'Vieques Municipio, PR': '147',
'Villalba Municipio, PR': '149',
'Yabucoa Municipio, PR': '151',
'Yauco Municipio, PR': '153'},
"CA01": { '--All--': '%', },
"CA02": { '--All--': '%', },
"CA03": { '--All--': '%', },
"CA04": { '--All--': '%', },
"CA05": { '--All--': '%', },
"CA13": { '--All--': '%', },
"CA07": { '--All--': '%', },
"CA14": { '--All--': '%', },
"CA08": { '--All--': '%', },
"CA09": { '--All--': '%', },
"CA10": { '--All--': '%', },
"CA11": { '--All--': '%', },
"CA12": { '--All--': '%', },
}
if __name__ == "__main__":
from sys import argv
from pprint import PrettyPrinter
pp = PrettyPrinter(indent=2)
import csv
fipsreader = csv.reader(open(argv[1],'rb'), delimiter=',', quotechar='"')
for row in fipsreader:
try:
FIPS_COUNTIES[int(row[1])][row[3]] = row[2]
except KeyError:
FIPS_COUNTIES[int(row[1])] = {'--All--': '%'}
FIPS_COUNTIES[int(row[1])][row[3]] = row[2]
pp.pprint(FIPS_COUNTIES)
| cl4u2/chirp | chirpui/fips.py | Python | gpl-3.0 | 250,263 | [
"COLUMBUS",
"Elk"
] | fdca78da308d097848b0e628aba8a394bf839a2b473ec4073e471b39428dd742 |
import numpy as np
import cudamat as cm
import time
import optparse
import matplotlib.pyplot as plt
import os
def save_plot(fname, x, train, cv, xlines=None, min_line=False, dpi=80):
#plt.figure(figsize=(max(16, min(int(8*len(train)/dpi), 160)), 12), dpi=dpi)
plt.figure(figsize=(18, 12), dpi=dpi)
plt.plot(x, train, 'r', label='train')
plt.plot(x, cv, 'b', label='cv')
if xlines is not None:
for x in xlines:
plt.axvline(x, color='g')
plt.hlines(np.min(cv_error[:x]), np.argmin(cv_error[:x]), x, color='green', linestyle='dashed')
if min_line:
plt.axhline(np.min(cv_error), color='gray', linestyle='dashed')
plt.legend(loc='upper right')
plt.title('NN training, cv_min: %0.8f' % np.min(cv_error))
plt.ylabel('Error')
plt.xlabel('Epoche')
plt.savefig(fname)
plt.clf()
plt.close()
def harr(dev_m):
dev_m.copy_to_host()
return dev_m.numpy_array
def leveldb_batch_generator(db, batch_size=100, mode='v'):
batch = []
for key, value in db.RangeIter():
if mode == 'k':
batch.append(key)
elif mode == 'v':
batch.append(np.frombuffer(value))
else:
batch.append((key, np.frombuffer(value)))
if len(batch) == batch_size:
yield batch
batch = []
if len(batch) > 0:
yield batch
if __name__ == "__main__":
parser = optparse.OptionParser()
parser.add_option('-o', dest="output", help='Output file name', default='rbm')
parser.add_option('--eps', dest="epsilon", type=float, help='Learning rate', default=0.001)
parser.add_option('--mom', dest="momentum", type=float, help='Momentum rate', default=0.9)
parser.add_option('--ne', dest="num_epochs", type=int, help='Number of epoches', default=30)
parser.add_option('--bs', dest="batch_size", type=int, help='Size of the batch', default=32)
#parser.add_option('--tsr', dest="train_set_ram", type=int, help='Number of train examples loaded to RAM', default=20000)
parser.add_option('--nv', dest="num_vis", type=int, help='Number of visible units', default=4608)
parser.add_option('--nh', dest="num_hid", type=int, help='Number of hidden units', default=1024)
parser.add_option('--gv', dest="gaussian_vis", action='store_true', help='Gaussian visible units', default=False)
parser.add_option('--mc', dest="minimum_error_change", type=float, help='Minimum error change step', default=0)
#parser.add_option('--cvr', dest="cv_ratio", type=float, help='Ratio of cross-validation '
# 'data sampled from input dataset', default=None)
parser.add_option('--cvmd', dest="cv_stop_factor", type=float, help='Maximum allowed distance between global cv '
'error minimum and current value', default=0.1)
parser.add_option('--do', dest="drop_out", type=float, help='Drop out probability', default=None)
parser.add_option('--lam', dest='lmbd', type=float, help='Regularization rate', default=None)
parser.add_option('--rt', dest="reg_type", help='Type of regularization: l1, l2 or en', default='en')
parser.add_option('--en_lam', dest="en_lmbd", type=float, help='Elastic net lambda', default=0.5)
parser.add_option('--aef', dest="ada_eps_factor", type=float, help='Adaptive learning rate', default=0.5)
parser.add_option('--aefm', dest="ada_eps_min", type=float, help='Adaptive learning rate', default=0.00001)
parser.add_option('--sb', dest="save_best", action='store_true', help='Save best rbm', default=False)
parser.add_option('-l', dest="load_rbm", help='Load rbm weights', default=None)
parser.add_option('--ml', dest="memory_limit", type=float, help='GPU memory limit (Mb)', default=2000)
parser.add_option('-d', dest="debug", action='store_true', help='Debug mode', default=False)
parser.add_option('--kt', dest="keep_train", action='store_true',
help='Do not delete train dataset from RAM (needed for small ds)', default=False)
parser.add_option('--kv', dest="keep_valid", action='store_true',
help='Do not delete valid dataset from RAM (needed for small ds)', default=False)
options, input_dirs = parser.parse_args() # first path is to train set, next one is to validation set
def dlog(s, do_log=options.debug):
if not do_log:
return
print s
if not os.path.isdir(input_dirs[0]):
print 'ERROR: Cant find train dir %s' % input_dirs[0]
exit(0)
train_path = input_dirs[0] + ('' if input_dirs[0].endswith('/') else '/')
test_path = None
if len(input_dirs) > 1:
if not os.path.isdir(input_dirs[1]):
print 'ERROR: Cant find valid dir %s' % input_dirs[0]
exit(0)
test_path = input_dirs[1] + ('' if input_dirs[1].endswith('/') else '/')
base_mem_consumption = 0.0
if options.gaussian_vis:
print 'Gaussian-Bernoulli mode'
else:
print 'Bernoulli-Bernoulli mode'
# initialize CUDA
cm.cublas_init()
base_mem_consumption += 80*1024*1024
cm.CUDAMatrix.init_random(1)
base_mem_consumption += 2*1024*1024
# training parameters
epsilon = options.epsilon
momentum = options.momentum
num_epochs = options.num_epochs
batch_size = options.batch_size
# model parameters
num_vis = options.num_vis
num_hid = options.num_hid
# initialize weights
if options.load_rbm is None:
w_vh = cm.CUDAMatrix(0.1 * np.random.randn(num_vis, num_hid))
base_mem_consumption += num_vis*num_hid*4
w_v = cm.CUDAMatrix(np.zeros((num_vis, 1)))
base_mem_consumption += num_vis*4
w_h = cm.CUDAMatrix(-4.*np.ones((num_hid, 1)))
base_mem_consumption += num_hid*4
else:
d = np.load(options.load_rbm)
w_vh = cm.CUDAMatrix(d['w_vh'])
base_mem_consumption += np.prod(d['w_vh'].shape)*4
w_v = cm.CUDAMatrix(d['w_v'])
base_mem_consumption += np.prod(d['w_v'].shape)*4
w_h = cm.CUDAMatrix(d['w_h'])
base_mem_consumption += np.prod(d['w_h'].shape)*4
del(d)
print 'INFO: params loaded from ' + options.load_rbm
epoch_eps_corrected = None
if options.ada_eps_factor is not None:
w_vh_min = cm.CUDAMatrix(0.1 * np.random.randn(num_vis, num_hid))
base_mem_consumption += num_vis*num_hid*4
w_v_min = cm.CUDAMatrix(np.zeros((num_vis, 1)))
base_mem_consumption += num_vis*4
w_h_min = cm.CUDAMatrix(-4.*np.ones((num_hid, 1)))
base_mem_consumption += num_hid*4
epoch_eps_corrected = []
# initialize weight updates
wu_vh = cm.CUDAMatrix(np.zeros((num_vis, num_hid)))
base_mem_consumption += num_vis*num_hid*4
wu_v = cm.CUDAMatrix(np.zeros((num_vis, 1)))
base_mem_consumption += num_vis*4
wu_h = cm.CUDAMatrix(np.zeros((num_hid, 1)))
base_mem_consumption += num_hid*4
# initialize temporary storage
v = cm.empty((num_vis, batch_size))
base_mem_consumption += num_vis*batch_size*4
h = cm.empty((num_hid, batch_size))
base_mem_consumption += num_hid*batch_size*4
r = cm.empty((num_hid, batch_size))
base_mem_consumption += num_hid*batch_size*4
# dropout
if options.drop_out is not None:
do_h = cm.CUDAMatrix(np.zeros((num_hid, batch_size)))
base_mem_consumption += num_hid*batch_size*4
print 'Base memory usage: %0.2f mb' % (base_mem_consumption/(1024**2))
free_memory = options.memory_limit*1024*1024 - base_mem_consumption
print 'Free memory: %0.2f mb' % (free_memory/(1024**2))
if free_memory <= 0:
print 'ERROR: free memory is negative: %0.2f bytes' % free_memory
cm.cublas_shutdown()
exit(0)
batch_mem_size = num_vis*batch_size*4.0
print 'One batch memory size (mb): %0.2f' % (batch_mem_size/(1024**2))
if batch_mem_size > free_memory:
print 'ERROR: batch_mem_size > free_memory, %0.2f > %0.2f' % (batch_mem_size, free_memory)
cm.cublas_shutdown()
exit(0)
batches_in_free_mem = int(np.floor(free_memory/batch_mem_size))
print 'Batches in free mem: %i' % batches_in_free_mem
# control parameters
train_error = []
cv_error = []
dat_train = None
dat_cv = None
dlog('Start training')
for epoch in range(num_epochs):
start_time = time.time()
print "Epoch " + str(epoch + 1)
batch_error_train = []
batch_error_cv = []
# ---> train set processing
for fname_tmp in os.listdir(train_path):
if dat_train is None or not options.keep_train:
dat_train = np.load(train_path + fname_tmp)['data']
dlog(' pack lodaed from train: (%s)' % ', '.join(map(lambda i: str(i), dat_train.shape)))
batch_packs_train = dat_train.shape[0]
# shuffle data
np.random.shuffle(dat_train)
dat_train = dat_train.T
dlog(' Go through dat_train')
for batch_pack_inx in range(batch_packs_train):
dlog(' batch_pack_inx = %i' % batch_pack_inx)
dat_tmp = dat_train[:, (batch_pack_inx*batch_size*batches_in_free_mem):((batch_pack_inx + 1)*batch_size*batches_in_free_mem)]
if dat_tmp.shape[1] == 0:
break
try:
dev_dat_train = cm.CUDAMatrix(
cm.reformat(dat_tmp))
except Exception as e:
print 'CUDAMAT ERROR: ' + e.message
cm.cublas_shutdown()
exit(0)
dlog(' dev_dat_train.shape = [%s]' % ', '.join(map(lambda x: str(x), dev_dat_train.shape)))
num_batches_train = dev_dat_train.shape[1]/batch_size
for batch in range(num_batches_train):
# sample dropout
if options.drop_out is not None:
do_h.fill_with_rand()
do_h.less_than(options.drop_out)
# get current minibatch
v_true = dev_dat_train.slice(batch*batch_size, (batch + 1)*batch_size)
v.assign(v_true)
# apply momentum
wu_vh.mult(momentum)
wu_v.mult(momentum)
wu_h.mult(momentum)
# positive phase
cm.dot(w_vh.T, v, target=h)
h.add_col_vec(w_h)
h.apply_sigmoid()
if options.drop_out is not None:
h.mult(do_h)
wu_vh.add_dot(v, h.T)
wu_v.add_sums(v, axis=1)
wu_h.add_sums(h, axis=1)
# sample hiddens
r.fill_with_rand()
r.less_than(h, target=h)
# negative phase
cm.dot(w_vh, h, target=v)
if options.drop_out is not None:
v.mult(1/options.drop_out)
v.add_col_vec(w_v)
if not options.gaussian_vis:
v.apply_sigmoid()
cm.dot(w_vh.T, v, target=h)
h.add_col_vec(w_h)
h.apply_sigmoid()
if options.drop_out is not None:
h.mult(do_h)
wu_vh.subtract_dot(v, h.T)
wu_v.add_sums(v, axis=1, mult=-1.)
wu_h.add_sums(h, axis=1, mult=-1.)
# update weights: regularization
if options.lmbd is not None:
if options.reg_type == 'l1':
wu_vh.add_mult(w_vh.sign(), -options.lmbd)
elif options.reg_type == 'l2':
wu_vh.add_mult(w_vh, -options.lmbd)
elif options.reg_type == 'en':
wu_vh.add_mult(w_vh.sign(), -options.lmbd * options.en_lmbd)
wu_vh.add_mult(w_vh, -options.lmbd * (1 - options.en_lmbd))
# update weights: gradients
w_vh.add_mult(wu_vh, epsilon/batch_size)
w_v.add_mult(wu_v, epsilon/batch_size)
w_h.add_mult(wu_h, epsilon/batch_size)
# calculate train reconstruction error
v.subtract(v_true)
batch_error_train.append(v.euclid_norm()**2/(num_vis*batch_size))
# clear memory
dev_dat_train.free_device_memory()
if not options.keep_train:
del(dat_train)
dat_train = None
else:
dat_train = dat_train.T
# <--- train set processing
# ---> cv set processing
# calculate cv reconstruction error
if test_path is not None:
for fname_tmp in os.listdir(test_path):
if dat_cv is None or not options.keep_valid:
dat_cv = np.load(test_path + fname_tmp)['data']
dlog(' pack lodaed from valid: (%s)' % ', '.join(map(lambda i: str(i), dat_cv.shape)))
batch_packs_cv = dat_cv.shape[0]
dat_cv = dat_cv.T
dlog(' Go through dat_cv')
for batch_pack_inx in range(batch_packs_cv):
dlog(' batch_pack_inx = %i' % batch_pack_inx)
dat_tmp = dat_cv[:, (batch_pack_inx*batch_size*batches_in_free_mem):((batch_pack_inx + 1)*batch_size*batches_in_free_mem)]
if dat_tmp.shape[1] == 0:
break
try:
dev_dat_cv = cm.CUDAMatrix(
cm.reformat(dat_tmp))
except Exception as e:
print 'CUDAMAT ERROR: ' + e.message
cm.cublas_shutdown()
exit(0)
dlog(' dev_dat_cv.shape = [%s]' % ', '.join(map(lambda x: str(x), dev_dat_cv.shape)))
num_batches_cv = dev_dat_cv.shape[1]/batch_size
for batch in range(num_batches_cv):
v_true = dev_dat_cv.slice(batch*batch_size, (batch + 1)*batch_size)
v.assign(v_true)
cm.dot(w_vh.T, v, target=h)
h.add_col_vec(w_h)
h.apply_sigmoid()
r.fill_with_rand()
r.less_than(h, target=h)
cm.dot(w_vh, h, target=v)
v.add_col_vec(w_v)
if not options.gaussian_vis:
v.apply_sigmoid()
v.subtract(v_true)
batch_error_cv.append(v.euclid_norm()**2/(num_vis*batch_size))
dev_dat_cv.free_device_memory()
if not options.keep_valid:
del(dat_cv)
dat_cv = None
# <--- cv set processing
# reporting
train_error.append(np.mean(batch_error_train))
print " Train MSE: " + str(train_error[-1])
if test_path is not None:
cv_error.append(np.mean(batch_error_cv))
print " CV MSE: " + str(cv_error[-1])
print " Time: " + str(time.time() - start_time)
# stop conditions
if len(train_error) > 1:
if np.abs(train_error[-2] - train_error[-1]) < options.minimum_error_change:
print 'BREAK: minimum_error_change'
break
if test_path is not None and np.min(cv_error) < cv_error[-1] and cv_error[-1]/np.min(cv_error) > options.cv_stop_factor + 1:
if options.ada_eps_factor is None:
print 'BREAK: cv_stop_factor, min = %f, cur = %f' % (np.min(cv_error), cv_error[-1])
break
elif epsilon < options.ada_eps_min:
print 'BREAK: ada_eps_min, min = %f, cur = %f' % (options.ada_eps_min, epsilon)
break
else:
w_vh.assign(w_vh_min)
w_v.assign(w_v_min)
w_h.assign(w_h_min)
epsilon = epsilon * options.ada_eps_factor
epoch_eps_corrected.append(epoch)
print 'INFO: epsilon is corrected, original = %f, new = %f' % (options.epsilon, epsilon)
# saving best state
if len(cv_error) > 1 and np.min(cv_error[:-1]) > cv_error[-1]:
if options.save_best:
try:
np.savez(options.output + '_min.npz',
w_vh=w_vh.asarray(), w_v=w_v.asarray(), w_h=w_h.asarray(),
train_error=train_error, cv_error=cv_error)
print 'Saved: ' + options.output
except:
print 'ERROR: can\'t save'
save_plot(options.output + '_plot_min.png', range(epoch + 1), train_error, cv_error, epoch_eps_corrected)
if options.ada_eps_factor is not None:
w_vh_min.assign(w_vh)
w_v_min.assign(w_v)
w_h_min.assign(w_h)
try:
np.savez(options.output + '.npz',
w_vh=w_vh.asarray(), w_v=w_v.asarray(), w_h=w_h.asarray(),
train_error=train_error, cv_error=cv_error)
print 'Saved: ' + options.output + '.npz'
except:
print 'ERROR: can\'t save'
save_plot(options.output + '_plot.png', range(epoch + 1), train_error, cv_error, epoch_eps_corrected, min_line=True)
cm.cublas_shutdown()
| mephistopheies/nn | rbm_cudamat/train_rbm.py | Python | gpl-2.0 | 17,818 | [
"Gaussian"
] | 28f3f48d7ae5b012a696f324c27b15ad1849e6a7bc7dd385faa08db510bbbb2f |
#
# Copyright (C) 2011 EADS France, Fabrice Desclaux <fabrice.desclaux@eads.net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Expressions manipulation functions
import itertools
import collections
import random
import string
import miasm2.expression.expression as m2_expr
def parity(a):
tmp = (a) & 0xFFL
cpt = 1
while tmp != 0:
cpt ^= tmp & 1
tmp >>= 1
return cpt
def merge_sliceto_slice(args):
sources = {}
non_slice = {}
sources_int = {}
for a in args:
if isinstance(a[0], m2_expr.ExprInt):
# sources_int[a.start] = a
# copy ExprInt because we will inplace modify arg just below
# /!\ TODO XXX never ever modify inplace args...
sources_int[a[1]] = (m2_expr.ExprInt_fromsize(a[2] - a[1],
a[0].arg.__class__(
a[0].arg)),
a[1],
a[2])
elif isinstance(a[0], m2_expr.ExprSlice):
if not a[0].arg in sources:
sources[a[0].arg] = []
sources[a[0].arg].append(a)
else:
non_slice[a[1]] = a
# find max stop to determine size
max_size = None
for a in args:
if max_size is None or max_size < a[2]:
max_size = a[2]
# first simplify all num slices
final_sources = []
sorted_s = []
for x in sources_int.values():
x = list(x)
# mask int
v = x[0].arg & ((1 << (x[2] - x[1])) - 1)
x[0] = m2_expr.ExprInt_from(x[0], v)
x = tuple(x)
sorted_s.append((x[1], x))
sorted_s.sort()
while sorted_s:
start, v = sorted_s.pop()
out = [m2_expr.ExprInt(v[0].arg), v[1], v[2]]
size = v[2] - v[1]
while sorted_s:
if sorted_s[-1][1][2] != start:
break
s_start, s_stop = sorted_s[-1][1][1], sorted_s[-1][1][2]
size += s_stop - s_start
a = m2_expr.mod_size2uint[size](
(int(out[0].arg) << (out[1] - s_start)) +
int(sorted_s[-1][1][0].arg))
out[0] = m2_expr.ExprInt(a)
sorted_s.pop()
out[1] = s_start
out[0] = m2_expr.ExprInt_fromsize(size, out[0].arg)
final_sources.append((start, out))
final_sources_int = final_sources
# check if same sources have corresponding start/stop
# is slice AND is sliceto
simp_sources = []
for args in sources.values():
final_sources = []
sorted_s = []
for x in args:
sorted_s.append((x[1], x))
sorted_s.sort()
while sorted_s:
start, v = sorted_s.pop()
ee = v[0].arg[v[0].start:v[0].stop]
out = ee, v[1], v[2]
while sorted_s:
if sorted_s[-1][1][2] != start:
break
if sorted_s[-1][1][0].stop != out[0].start:
break
start = sorted_s[-1][1][1]
# out[0].start = sorted_s[-1][1][0].start
o_e, _, o_stop = out
o1, o2 = sorted_s[-1][1][0].start, o_e.stop
o_e = o_e.arg[o1:o2]
out = o_e, start, o_stop
# update _size
# out[0]._size = out[0].stop-out[0].start
sorted_s.pop()
out = out[0], start, out[2]
final_sources.append((start, out))
simp_sources += final_sources
simp_sources += final_sources_int
for i, v in non_slice.items():
simp_sources.append((i, v))
simp_sources.sort()
simp_sources = [x[1] for x in simp_sources]
return simp_sources
op_propag_cst = ['+', '*', '^', '&', '|', '>>',
'<<', "a>>", ">>>", "<<<",
"/", "%", 'idiv', 'imod', 'umod', 'udiv']
def is_pure_int(e):
"""
return True if expr is only composed with integers
/!\ ExprCond returns True is src1 and src2 are integers
"""
def modify_cond(e):
if isinstance(e, m2_expr.ExprCond):
return e.src1 | e.src2
return e
def find_int(e, s):
if isinstance(e, m2_expr.ExprId) or isinstance(e, m2_expr.ExprMem):
s.add(e)
return e
s = set()
new_e = e.visit(modify_cond)
new_e.visit(lambda x: find_int(x, s))
if s:
return False
return True
def is_int_or_cond_src_int(e):
if isinstance(e, m2_expr.ExprInt):
return True
if isinstance(e, m2_expr.ExprCond):
return (isinstance(e.src1, m2_expr.ExprInt) and
isinstance(e.src2, m2_expr.ExprInt))
return False
def fast_unify(seq, idfun=None):
# order preserving unifying list function
if idfun is None:
idfun = lambda x: x
seen = {}
result = []
for item in seq:
marker = idfun(item)
if marker in seen:
continue
seen[marker] = 1
result.append(item)
return result
def get_missing_interval(all_intervals, i_min=0, i_max=32):
"""Return a list of missing interval in all_interval
@all_interval: list of (int, int)
@i_min: int, minimal missing interval bound
@i_max: int, maximal missing interval bound"""
my_intervals = all_intervals[:]
my_intervals.sort()
my_intervals.append((i_max, i_max))
missing_i = []
last_pos = i_min
for start, stop in my_intervals:
if last_pos != start:
missing_i.append((last_pos, start))
last_pos = stop
return missing_i
class Variables_Identifier(object):
"""Identify variables in an expression.
Returns:
- variables with their corresponding values
- original expression with variables translated
"""
# Attribute used to distinguish created variables from original ones
is_var_ident = "is_var_ident"
def __init__(self, expr, var_prefix="v"):
"""Set the expression @expr to handle and launch variable identification
process
@expr: Expr instance
@var_prefix: (optional) prefix of the variable name, default is 'v'"""
# Init
self.var_indice = itertools.count()
self.var_asked = set()
self._vars = {} # VarID -> Expr
self.var_prefix = var_prefix
# Launch recurrence
self.find_variables_rec(expr)
# Compute inter-variable dependencies
has_change = True
while has_change:
has_change = False
for var_id, var_value in self._vars.iteritems():
cur = var_value
# Do not replace with itself
to_replace = {v_val:v_id
for v_id, v_val in self._vars.iteritems()
if v_id != var_id}
var_value = var_value.replace_expr(to_replace)
if cur != var_value:
# Force @self._vars update
has_change = True
self._vars[var_id] = var_value
break
# Replace in the original equation
self._equation = expr.replace_expr({v_val: v_id for v_id, v_val
in self._vars.iteritems()})
# Compute variables dependencies
self._vars_ordered = collections.OrderedDict()
todo = set(self._vars.iterkeys())
needs = {}
## Build initial needs
for var_id, var_expr in self._vars.iteritems():
### Handle corner cases while using Variable Identifier on an
### already computed equation
needs[var_id] = [var_name
for var_name in var_expr.get_r(mem_read=True)
if self.is_var_identifier(var_name) and \
var_name in todo and \
var_name != var_id]
## Build order list
while todo:
done = set()
for var_id in todo:
all_met = True
for need in needs[var_id]:
if need not in self._vars_ordered:
# A dependency is not met
all_met = False
break
if not all_met:
continue
# All dependencies are already met, add current
self._vars_ordered[var_id] = self._vars[var_id]
done.add(var_id)
# Update the todo list
for element_done in done:
todo.remove(element_done)
@classmethod
def is_var_identifier(cls, expr):
"Return True iff @expr is a variable identifier"
if not isinstance(expr, m2_expr.ExprId):
return False
return hasattr(expr, cls.is_var_ident) and \
getattr(expr, cls.is_var_ident) == True
def find_variables_rec(self, expr):
"""Recursive method called by find_variable to expand @expr.
Set @var_names and @var_values.
This implementation is faster than an expression visitor because
we do not rebuild each expression.
"""
if (expr in self.var_asked):
# Expr has already been asked
if (expr not in self._vars.values()):
# Create var
identifier = m2_expr.ExprId("%s%s" % (self.var_prefix,
self.var_indice.next()),
size = expr.size)
setattr(identifier, self.__class__.is_var_ident, True)
self._vars[identifier] = expr
# Recursion stop case
return
else:
# First time for @expr
self.var_asked.add(expr)
if isinstance(expr, m2_expr.ExprOp):
for a in expr.args:
self.find_variables_rec(a)
elif isinstance(expr, m2_expr.ExprInt):
pass
elif isinstance(expr, m2_expr.ExprId):
pass
elif isinstance(expr, m2_expr.ExprMem):
self.find_variables_rec(expr.arg)
elif isinstance(expr, m2_expr.ExprCompose):
for a in expr.args:
self.find_variables_rec(list(a)[0])
elif isinstance(expr, m2_expr.ExprSlice):
self.find_variables_rec(expr.arg)
elif isinstance(expr, m2_expr.ExprCond):
self.find_variables_rec(expr.cond)
self.find_variables_rec(expr.src1)
self.find_variables_rec(expr.src2)
else:
raise NotImplementedError("Type not handled: %s" % expr)
@property
def vars(self):
return self._vars_ordered
@property
def equation(self):
return self._equation
def __str__(self):
"Display variables and final equation"
out = ""
for var_id, var_expr in self.vars.iteritems():
out += "%s = %s\n" % (var_id, var_expr)
out += "Final: %s" % self.equation
return out
class ExprRandom(object):
"""Return an expression randomly generated"""
# Identifiers length
identifier_len = 5
# Identifiers' name charset
identifier_charset = string.letters
# Number max value
number_max = 0xFFFFFFFF
# Available operations
operations_by_args_number = {1: ["-"],
2: ["<<", "<<<", ">>", ">>>"],
"2+": ["+", "*", "&", "|", "^"],
}
# Maximum number of argument for operations
operations_max_args_number = 5
# If set, output expression is a perfect tree
perfect_tree = True
# Max argument size in slice, relative to slice size
slice_add_size = 10
# Maximum number of layer in compose
compose_max_layer = 5
# Maximum size of memory address in bits
memory_max_address_size = 32
# Re-use already generated elements to mimic a more realistic behavior
reuse_element = True
generated_elements = {} # (depth, size) -> [Expr]
@classmethod
def identifier(cls, size=32):
"""Return a random identifier
@size: (optional) identifier size
"""
return m2_expr.ExprId("".join([random.choice(cls.identifier_charset)
for _ in xrange(cls.identifier_len)]),
size=size)
@classmethod
def number(cls, size=32):
"""Return a random number
@size: (optional) number max bits
"""
num = random.randint(0, cls.number_max % (2**size))
return m2_expr.ExprInt_fromsize(size, num)
@classmethod
def atomic(cls, size=32):
"""Return an atomic Expression
@size: (optional) Expr size
"""
available_funcs = [cls.identifier, cls.number]
return random.choice(available_funcs)(size=size)
@classmethod
def operation(cls, size=32, depth=1):
"""Return an ExprOp
@size: (optional) Operation size
@depth: (optional) Expression depth
"""
operand_type = random.choice(cls.operations_by_args_number.keys())
if isinstance(operand_type, str) and "+" in operand_type:
number_args = random.randint(int(operand_type[:-1]),
cls.operations_max_args_number)
else:
number_args = operand_type
args = [cls._gen(size=size, depth=depth - 1)
for _ in xrange(number_args)]
operand = random.choice(cls.operations_by_args_number[operand_type])
return m2_expr.ExprOp(operand,
*args)
@classmethod
def slice(cls, size=32, depth=1):
"""Return an ExprSlice
@size: (optional) Operation size
@depth: (optional) Expression depth
"""
start = random.randint(0, size)
stop = start + size
return cls._gen(size=random.randint(stop, stop + cls.slice_add_size),
depth=depth - 1)[start:stop]
@classmethod
def compose(cls, size=32, depth=1):
"""Return an ExprCompose
@size: (optional) Operation size
@depth: (optional) Expression depth
"""
# First layer
upper_bound = random.randint(1, size)
args = [(cls._gen(size=upper_bound, depth=depth - 1), 0, upper_bound)]
# Next layers
while (upper_bound < size):
if len(args) == (cls.compose_max_layer - 1):
# We reach the maximum size
upper_bound = size
else:
upper_bound = random.randint(args[-1][-1] + 1, size)
args.append((cls._gen(size=upper_bound - args[-1][-1]),
args[-1][-1],
upper_bound))
return m2_expr.ExprCompose(args)
@classmethod
def memory(cls, size=32, depth=1):
"""Return an ExprMem
@size: (optional) Operation size
@depth: (optional) Expression depth
"""
address_size = random.randint(1, cls.memory_max_address_size)
return m2_expr.ExprMem(cls._gen(size=address_size,
depth=depth - 1),
size=size)
@classmethod
def _gen(cls, size=32, depth=1):
"""Internal function for generating sub-expression according to options
@size: (optional) Operation size
@depth: (optional) Expression depth
/!\ @generated_elements is left modified
"""
# Perfect tree handling
if not cls.perfect_tree:
depth = random.randint(max(0, depth - 2), depth)
# Element re-use
if cls.reuse_element and random.choice([True, False]) and \
(depth, size) in cls.generated_elements:
return random.choice(cls.generated_elements[(depth, size)])
# Recursion stop
if depth == 0:
return cls.atomic(size=size)
# Build a more complex expression
available_funcs = [cls.operation, cls.slice, cls.compose, cls.memory]
gen = random.choice(available_funcs)(size=size, depth=depth)
# Save it
new_value = cls.generated_elements.get((depth, size), []) + [gen]
cls.generated_elements[(depth, size)] = new_value
return gen
@classmethod
def get(cls, size=32, depth=1, clean=True):
"""Return a randomly generated expression
@size: (optional) Operation size
@depth: (optional) Expression depth
@clean: (optional) Clean expression cache between two calls
"""
# Init state
if clean:
cls.generated_elements = {}
# Get an element
got = cls._gen(size=size, depth=depth)
# Clear state
if clean:
cls.generated_elements = {}
return got
| p-l-/miasm | miasm2/expression/expression_helper.py | Python | gpl-2.0 | 17,609 | [
"VisIt"
] | 6b5c69e06307825b7c6c5af69f4728e2dc306117a77d3b2d413d04b1ee0dc6a3 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Run this test like so:
# vtkpython TestStackedPlot.py -D $VTK_DATA_ROOT \
# -B $VTK_DATA_ROOT/Baseline/Charts/
import os
import vtk
import vtk.test.Testing
import math
month_labels = ["Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
book = [5675, 5902, 6388, 5990, 5575, 7393, 9878, 8082, 6417, 5946, 5526, 5166]
new_popular = [701, 687, 736, 696, 750, 814, 923, 860, 786, 735, 680, 741]
periodical = [184, 176, 166, 131, 171, 191, 231, 166, 197, 162, 152, 143]
audiobook = [903, 1038, 987, 1073, 1144, 1203, 1173, 1196, 1213, 1076, 926, 874]
video = [1524, 1565, 1627, 1445, 1179, 1816, 2293, 1811, 1588, 1561, 1542, 1563]
class TestStackedPlot(vtk.test.Testing.vtkTest):
def testStackedPlot(self):
"Test if stacked plots can be built with python"
# Set up a 2D scene, add an XY chart to it
view = vtk.vtkContextView()
view.GetRenderer().SetBackground(1.0,1.0,1.0)
view.GetRenderWindow().SetSize(400,300)
chart = vtk.vtkChartXY()
view.GetScene().AddItem(chart)
# Create a table with some data in it
table = vtk.vtkTable()
arrMonthLabels = vtk.vtkStringArray()
arrMonthPositions = vtk.vtkDoubleArray()
arrMonth = vtk.vtkIntArray()
arrMonth.SetName("Month")
arrBooks = vtk.vtkIntArray()
arrBooks.SetName("Books")
arrNew = vtk.vtkIntArray()
arrNew.SetName("New / Popular")
arrPeriodical = vtk.vtkIntArray()
arrPeriodical.SetName("Periodical")
arrAudiobook = vtk.vtkIntArray()
arrAudiobook.SetName("Audiobook")
arrVideo = vtk.vtkIntArray()
arrVideo.SetName("Video")
numMonths = 12
for i in range(0,numMonths):
arrMonthLabels.InsertNextValue(month_labels[i])
arrMonthPositions.InsertNextValue(float(i))
arrMonth.InsertNextValue(i)
arrBooks.InsertNextValue(book[i])
arrNew.InsertNextValue(new_popular[i])
arrPeriodical.InsertNextValue(periodical[i])
arrAudiobook.InsertNextValue(audiobook[i])
arrVideo.InsertNextValue(video[i])
table.AddColumn(arrMonth)
table.AddColumn(arrBooks)
table.AddColumn(arrNew)
table.AddColumn(arrPeriodical)
table.AddColumn(arrAudiobook)
table.AddColumn(arrVideo)
# Set up the X Labels
chart.GetAxis(1).SetTickLabels(arrMonthLabels)
chart.GetAxis(1).SetTickPositions(arrMonthPositions)
chart.GetAxis(1).SetMaximum(11)
# Create the stacked plot
stack = chart.AddPlot(3)
stack.SetUseIndexForXSeries(True)
stack.SetInputData(table)
stack.SetInputArray(1,"Books")
stack.SetInputArray(2,"New / Popular")
stack.SetInputArray(3,"Periodical")
stack.SetInputArray(4,"Audiobook")
stack.SetInputArray(5,"Video")
# Set up a nice color series
colorSeries = vtk.vtkColorSeries()
colorSeries.SetColorScheme(2)
stack.SetColorSeries(colorSeries)
view.GetRenderWindow().SetMultiSamples(0)
view.GetRenderWindow().Render()
img_file = "TestStackedPlot.png"
vtk.test.Testing.compareImage(view.GetRenderWindow(),
vtk.test.Testing.getAbsImagePath(img_file),
threshold=25)
vtk.test.Testing.interact()
if __name__ == "__main__":
vtk.test.Testing.main([(TestStackedPlot, 'test')])
| cjh1/VTK | Charts/Core/Testing/Python/TestStackedPlot.py | Python | bsd-3-clause | 3,643 | [
"VTK"
] | 5eca9fc347269924c294407b827a7e40a8351a82345af52edef1146fda4e0efb |
#!/usr/bin/env python3
#pylint: disable=missing-docstring
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
from chigger import RenderWindow
from chigger.annotations import ImageAnnotation
# Logos
moose = ImageAnnotation(filename='moose.png', position=[0, 0],
horizontal_alignment='left', vertical_alignment='bottom')
marmot = ImageAnnotation(filename='marmot_green.png', position=[0, 1],
horizontal_alignment='left', vertical_alignment='top')
pika = ImageAnnotation(filename='pika_white.png', position=[1, 1],
horizontal_alignment='right', vertical_alignment='top')
chigger = ImageAnnotation(filename='chigger_white.png', position=[1., 0.],
horizontal_alignment='right', vertical_alignment='bottom')
inl = ImageAnnotation(filename='inl.png')
# Create the window
window = RenderWindow(moose, marmot, pika, chigger, inl, test=True)
window.write('logo_annotation.png')
window.start()
| nuclear-wizard/moose | python/chigger/tests/annotations/logo_annotation.py | Python | lgpl-2.1 | 1,241 | [
"MOOSE"
] | 6481ed5a121a23e8e443272a2d2ce66479183d8c2ae8fc4292782812589ba990 |
from threading import Thread
def auto_builder_thread():
import time
from enum import IntEnum
from ui.xnova.xn_data import XNPlanet, XNPlanetBuildingItem
from ui.xnova.xn_world import XNovaWorld_instance, XNovaWorld
from ui.xnova.xn_techtree import XNTechTree_instance
from ui.xnova import xn_logger
class BGid(IntEnum):
METAL_FACTORY = 1
CRYSTAL_FACTORY = 2
DEIT_FACTORY = 3
SOLAR_STATION = 4
FACTORY = 14
NANITES = 15
SHIPYARD = 21
METAL_SILO = 22
CRYSTAL_SILO = 23
DEIT_SILO = 24
LAB = 31
ROCKET_SILO = 44
logger = xn_logger.get('auto_builder', debug=True)
world = XNovaWorld_instance()
world.script_command = 'running'
WORK_INTERVAL = 145 # seconds
IMPERIUM_REFRESH_INTERVAL = 300 # seconds
def check_bonus(world: XNovaWorld):
bonus_url = world.get_bonus_url()
if bonus_url is not None:
logger.info('Detected that bonus is available, get it!')
world.signal(world.SIGNAL_GET_URL, url=bonus_url, referer='?set=overview')
time.sleep(10)
world.clear_bonus_url()
time.sleep(2)
def energy_need_for_gid(gid: int, level: int) -> int:
if (gid == 1) or (gid == 2) or (gid == 12):
e = (10 * level) * (1.1 ** level)
return round(e)
if gid == 3:
e = (30 * level) * (1.1 ** level)
return round(e)
# error! incorrect gid supplied?
tt = XNTechTree_instance()
item = tt.find_item_by_gid(gid)
s = 'Don\'t know how to calculate energy need for gid={0} "{1}" ({2})'.format(
gid, item.name, item.category)
logger.error(s)
raise RuntimeError(s)
def calc_planet_next_building(planet: XNPlanet) -> XNPlanetBuildingItem:
if planet.is_moon or planet.is_base:
return None
met_level = 0
cry_level = 0
deit_level = 0
ss_level = 0
#
met_bitem = planet.find_bitem_by_gid(int(BGid.METAL_FACTORY))
if met_bitem is not None:
met_level = met_bitem.level
cry_bitem = planet.find_bitem_by_gid(int(BGid.CRYSTAL_FACTORY))
if cry_bitem is not None:
cry_level = cry_bitem.level
deit_bitem = planet.find_bitem_by_gid(int(BGid.DEIT_FACTORY))
if deit_bitem is not None:
deit_level = deit_bitem.level
ss_bitem = planet.find_bitem_by_gid(int(BGid.SOLAR_STATION))
if ss_bitem is not None:
ss_level = ss_bitem.level
free_energy = planet.energy.energy_left
#
# first, check energy
if free_energy <= 1:
logger.info('Planet [{0}] has too low energy ({1}), must '
'build solar station!'.format(planet.name, free_energy))
return ss_bitem
# second, check robotics factory, if it is below level 10
factory_level = 0
factory_bitem = planet.find_bitem_by_gid(int(BGid.FACTORY))
if factory_bitem is not None:
factory_level = factory_bitem.level
if factory_bitem.level < 10:
# check resources, this will build factory before any
# any other building only if enough resources NOW, do not wait
if (planet.res_current.met >= factory_bitem.cost_met) and \
(planet.res_current.cry >= factory_bitem.cost_cry) and \
(planet.res_current.deit >= factory_bitem.cost_deit):
logger.info('Planet [{0}] Factory level < 10 and have res for it,'
' build Factory!'.format(planet.name))
return factory_bitem
# maybe build shipyard? :)
shipyard_bitem = planet.find_bitem_by_gid(int(BGid.SHIPYARD))
if shipyard_bitem is not None:
if shipyard_bitem.level < factory_level:
if (planet.res_current.met >= shipyard_bitem.cost_met) and \
(planet.res_current.cry >= shipyard_bitem.cost_cry) and \
(planet.res_current.deit >= shipyard_bitem.cost_deit):
logger.info('Planet [{0}] Shipyard level < {1} and have res for it,'
' build Factory!'.format(planet.name, factory_level))
return shipyard_bitem
# maybe build nanites factory? :)
if factory_level >= 10:
nanites_bitem = planet.find_bitem_by_gid(int(BGid.NANITES))
if nanites_bitem is not None:
if (planet.res_current.met >= nanites_bitem.cost_met) and \
(planet.res_current.cry >= nanites_bitem.cost_cry) and \
(planet.res_current.deit >= nanites_bitem.cost_deit):
logger.info('Planet [{0}] can build NANITES!'.format(planet.name))
return nanites_bitem
# maybe build rocket silo?
rs_bitem = planet.find_bitem_by_gid(int(BGid.ROCKET_SILO))
if rs_bitem is not None:
if rs_bitem.level < 2:
if (planet.res_current.met >= rs_bitem.cost_met) and \
(planet.res_current.cry >= rs_bitem.cost_cry) and \
(planet.res_current.deit >= rs_bitem.cost_deit):
logger.info('Planet [{0}] can build rocket silo lv {1}'.format(
planet.name, rs_bitem.level+1))
return rs_bitem
#
# other resources buildings
logger.info('Planet [{0}] m/c/d/e levels: {1}/{2}/{3}/{4} free_en: {5}'.format(
planet.name, met_level, cry_level, deit_level, ss_level, free_energy))
if ss_level < met_level:
return ss_bitem
#
# calc energy needs
met_eneed = energy_need_for_gid(int(BGid.METAL_FACTORY), met_level+1) \
- energy_need_for_gid(int(BGid.METAL_FACTORY), met_level)
cry_eneed = energy_need_for_gid(int(BGid.CRYSTAL_FACTORY), cry_level+1) \
- energy_need_for_gid(int(BGid.CRYSTAL_FACTORY), cry_level)
deit_eneed = energy_need_for_gid(int(BGid.DEIT_FACTORY), deit_level+1) \
- energy_need_for_gid(int(BGid.DEIT_FACTORY), deit_level)
logger.info('Planet [{0}] needed en: {1}/{2}/{3}'.format(
planet.name, met_eneed, cry_eneed, deit_eneed))
# try to fit in energy some buildings
if (met_level < ss_level) and (met_eneed <= free_energy):
return met_bitem
if (cry_level < (ss_level-2)) and (cry_eneed <= free_energy):
return cry_bitem
if (deit_level < (ss_level-4)) and (deit_eneed <= free_energy):
return deit_bitem
#
# check resources storage capacity
if planet.res_max_silos.met > 0:
if planet.res_current.met / planet.res_max_silos.met >= 0.7:
silo_bitem = planet.find_bitem_by_gid(int(BGid.METAL_SILO))
logger.info('Planet [{0}] needs metal silo!'.format(planet.name))
return silo_bitem
if planet.res_max_silos.cry > 0:
if planet.res_current.cry / planet.res_max_silos.cry >= 0.7:
silo_bitem = planet.find_bitem_by_gid(int(BGid.CRYSTAL_SILO))
logger.info('Planet [{0}] needs crystal silo!'.format(planet.name))
return silo_bitem
if planet.res_max_silos.deit > 0:
if planet.res_current.deit / planet.res_max_silos.deit >= 0.7:
silo_bitem = planet.find_bitem_by_gid(int(BGid.DEIT_SILO))
logger.info('Planet [{0}] needs deit silo!'.format(planet.name))
return silo_bitem
#
# default - build solar station
logger.warn('Planet [{0}] for some reason cannot decide what to build, '
'will build solar station by default'.format(planet.name))
return ss_bitem
def check_planet_buildings(world: XNovaWorld, planet: XNPlanet):
# is there any building in progress on planet now?
build_in_progress = False
bitem = XNPlanetBuildingItem()
for bitem_ in planet.buildings_items:
if bitem_.is_in_progress():
build_in_progress = True
bitem = bitem_
break
if build_in_progress:
logger.info('Planet [{0}] has still build in progress {1} lv {2}'.format(
planet.name, bitem.name, bitem.level+1))
return
# no builds in progress, we can continue
bitem = calc_planet_next_building(planet)
if bitem is None:
logger.error('Planet [{0}]: for some reason could not calculate '
'next building, some internal error? Try to relogin and '
'refresh all world.'.format(planet.name))
return
logger.info('Planet [{0}] Next building will be: {1} lv {2}'.format(
planet.name, bitem.name, bitem.level+1))
logger.info('Planet [{0}] Its price: {1}m {2}c {3}d'.format(
planet.name, bitem.cost_met, bitem.cost_cry, bitem.cost_deit))
logger.info('Planet [{0}] We have: {1}m {2}c {3}d'.format(
planet.name, int(planet.res_current.met), int(planet.res_current.cry),
int(planet.res_current.deit)))
# do we have enough resources to build it?
if (planet.res_current.met >= bitem.cost_met) and \
(planet.res_current.cry >= bitem.cost_cry) and \
(planet.res_current.deit >= bitem.cost_deit):
logger.info('Planet [{0}] We have enough resources to build it, trigger!'.format(
planet.name))
world.signal(world.SIGNAL_BUILD_ITEM,
planet_id=planet.planet_id,
bitem=bitem,
quantity=0)
logger.info('Planet [{0}] Signal to build this item has been sent to world thread, wait 10s...'.format(
planet.name))
time.sleep(10) # actually wait
else:
logger.warn('Planet [{0}] We DO NOT have enough resources to build [{1} lv {2}]...'.format(
planet.name, bitem.name, bitem.level+1))
last_work_time = time.time() - WORK_INTERVAL
last_imperium_refresh_time = time.time()
logger.info('Started.')
while True:
time.sleep(1)
if world.script_command == 'stop':
break
cur_time = time.time()
if cur_time - last_work_time >= WORK_INTERVAL:
last_work_time = cur_time
# logger.debug('{0} seconds have passed, working...'.format(WORK_INTERVAL))
check_bonus(world)
planets = world.get_planets()
if len(planets) < 1:
continue
for planet in planets:
check_planet_buildings(world, planet)
time.sleep(1)
if world.script_command == 'stop':
break
# if we didn't sleep long enough for a work_interval
# refresh imperium from time to time
if cur_time - last_imperium_refresh_time >= IMPERIUM_REFRESH_INTERVAL:
logger.info('Time to refresh imperium...')
last_imperium_refresh_time = cur_time
world.signal(world.SIGNAL_RELOAD_PAGE, page_name='imperium')
# while True
del world.script_command
logger.info('Stopped.')
# start script as a parallel thread
thr = Thread(target=auto_builder_thread, name='auto_builder_thread')
thr.daemon = False
thr.start()
| minlexx/xnovacmd | scripts/auto_builder.py | Python | gpl-2.0 | 11,689 | [
"CRYSTAL"
] | 31e2e337e35384a489fc7bfb43dea65d535edd80c3ac1472782c077ff869e89a |
# ============================================================================
#
# Copyright (C) 2007-2012 Conceptive Engineering bvba. All rights reserved.
# www.conceptive.be / project-camelot@conceptive.be
#
# This file is part of the Camelot Library.
#
# This file may be used under the terms of the GNU General Public
# License version 2.0 as published by the Free Software Foundation
# and appearing in the file license.txt included in the packaging of
# this file. Please review this information to ensure GNU
# General Public Licensing requirements will be met.
#
# If you are unsure which license is appropriate for your use, please
# visit www.python-camelot.com or contact project-camelot@conceptive.be
#
# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
# WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
#
# For use of this library in commercial applications, please contact
# project-camelot@conceptive.be
#
# ============================================================================
from PyQt4 import QtGui
from PyQt4 import QtCore
from PyQt4.QtCore import Qt
from customeditor import CustomEditor
class ColorEditor(CustomEditor):
def __init__(self, parent=None, editable=True, field_name='color', **kwargs):
CustomEditor.__init__(self, parent)
self.setObjectName( field_name )
layout = QtGui.QVBoxLayout(self)
layout.setSpacing(0)
layout.setContentsMargins( 0, 0, 0, 0)
self.color_button = QtGui.QPushButton(parent)
self.color_button.setMaximumSize(QtCore.QSize(20, 20))
layout.addWidget(self.color_button)
if editable:
self.color_button.clicked.connect(self.buttonClicked)
self.setLayout(layout)
self._color = None
def get_value(self):
color = self.getColor()
if color:
value = (color.red(), color.green(), color.blue(), color.alpha())
else:
value = None
return CustomEditor.get_value(self) or value
def set_value(self, value):
value = CustomEditor.set_value(self, value)
if value:
color = QtGui.QColor()
color.setRgb(*value)
self.setColor(color)
else:
self.setColor(value)
def getColor(self):
return self._color
def set_enabled(self, editable=True):
self.color_button.setEnabled(editable)
def setColor(self, color):
pixmap = QtGui.QPixmap(16, 16)
if color:
pixmap.fill(color)
else:
pixmap.fill(Qt.transparent)
self.color_button.setIcon(QtGui.QIcon(pixmap))
self._color = color
def buttonClicked(self, raised):
if self._color:
color = QtGui.QColorDialog.getColor(self._color)
else:
color = QtGui.QColorDialog.getColor()
if color.isValid() and color!=self._color:
self.setColor(color)
self.editingFinished.emit()
| jeroendierckx/Camelot | camelot/view/controls/editors/coloreditor.py | Python | gpl-2.0 | 3,083 | [
"VisIt"
] | a4b07eb898d56a4d695b15a641f5f4791eb9ca802e171d1f50781ef6c75cdac6 |
# [the original paper](https://arxiv.org/abs/1404.3606)
import itertools
from chainer.cuda import to_gpu, to_cpu
from chainer.functions import convolution_2d
import numpy as np
from sklearn.decomposition import IncrementalPCA
from utils import gpu_enabled
if gpu_enabled():
try:
import cupy as xp
except ImportError:
import numpy as xp
else:
import numpy as xp
def steps(image_shape, filter_shape, step_shape):
"""
Generates feature map coordinates that filters visit
Parameters
----------
image_shape: tuple of ints
Image height / width
filter_shape: tuple of ints
Filter height / width
step_shape: tuple of ints
Step height / width
Returns
-------
ys: Map coordinates along y axis
xs: Map coordinates along x axis
"""
h, w = image_shape
fh, fw = filter_shape
sh, sw = step_shape
ys = range(0, h-fh+1, sh)
xs = range(0, w-fw+1, sw)
return ys, xs
def components_to_filters(components, n_channels, filter_shape):
"""
| In PCANet, components of PCA are used as filter weights.
| This function reshapes PCA components so that
it can be used as networks filters
"""
n_filters = components.shape[0]
return components.reshape(n_filters, n_channels, *filter_shape)
def output_shape(ys, xs):
return len(ys), len(xs)
class Patches(object):
def __init__(self, image, filter_shape, step_shape):
assert(image.ndim == 2)
# should be either numpy.ndarray or cupy.ndarray
self.ndarray = type(image)
self.image = image
self.filter_shape = filter_shape
self.ys, self.xs = steps(image.shape[0:2], filter_shape, step_shape)
@property
def patches(self):
"""
Return image patches of shape
(n_patches, filter_height, filter_width)
"""
fh, fw = self.filter_shape
it = list(itertools.product(self.ys, self.xs))
patches = self.ndarray((len(it), fh, fw), dtype=self.image.dtype)
for i, (y, x) in enumerate(it):
patches[i, :, :] = self.image[y:y+fh, x:x+fw]
return patches
@property
def output_shape(self):
return output_shape(self.ys, self.xs)
def atleast_4d(images):
"""Regard gray-scale images as 1-channel images"""
assert(np.ndim(images) == 3)
n, h, w = images.shape
return images.reshape(n, h, w, 1)
def to_channels_first(images):
"""
Change image channel order from
:code:`(n_images, y, x, n_channels)` to
:code:`(n_images, n_channels, y, x)`
"""
# images.shape == (n_images, y, x, n_channels)
images = np.swapaxes(images, 1, 3)
images = np.swapaxes(images, 2, 3)
# images.shape == (n_images, n_channels, y, x)
return images
def image_to_patch_vectors(image, filter_shape, step_shape):
"""
Parameters
----------
image: np.ndarray
Image to extract patch vectors
filter_shape: tuple of ints
The shape of a filter
step_shape: tuple of ints
Step height/width of a filter
Returns
-------
X: np.ndarray
A set of normalized and flattened patches
"""
X = Patches(image, filter_shape, step_shape).patches
# X.shape == (n_patches, filter_height, filter_width)
X = X.reshape(X.shape[0], -1) # flatten each patch
X = X - X.mean(axis=1, keepdims=True) # Remove mean from each patch.
return X # \overline{X}_i in the original paper
def binarize(X):
"""
Binarize each element of :code:`X`
.. code::
X = [1 if X[i] > 0 else 0 for i in range(len(X))]
"""
X[X > 0] = 1
X[X <= 0] = 0
return X
def binary_to_decimal(X):
"""
| This function takes :code:`X` of shape (n_images, L2, y, x) as an argument.
| Supporse that :code:`X[k]` (0 <= k < n_images) can be represented as
.. code-block:: none
X[k] = [map_k[0], map_k[1], ..., map_k[L2-1]]
where the shape of each map_k is (y, x).
Then we calculate
.. code-block:: none
a[0] * map_k[0] + a[1] * map_k[1] + ... + a[L2-1] * map_k[L2-1]
for each :code:`X[k]`, where :math:`a = [2^{L2-1}, 2^{L2-2}, ..., 2^{0}]`
Therefore, the output shape must be (n_images, y, x)
Parameters
----------
X: xp.ndarray
Feature maps
"""
a = xp.arange(X.shape[1])[::-1]
a = xp.power(2, a)
return xp.tensordot(X, a, axes=([1], [0]))
def to_tuple_if_int(value):
"""
If int is given, duplicate it and return as a 2 element tuple.
"""
if isinstance(value, int):
return (value, value)
return value
class PCANet(object):
def __init__(self, image_shape,
filter_shape_l1, step_shape_l1, n_l1_output,
filter_shape_l2, step_shape_l2, n_l2_output,
filter_shape_pooling, step_shape_pooling):
"""
Parameters
----------
image_shape: int or sequence of ints
Input image shape.
filter_shape_l1: int or sequence of ints
The shape of the kernel in the first convolution layer.
If the value is int, a filter of the square shape is applied.
If you want to apply a filter of a different aspect ratio, just
pass a tuple of shape (height, width).
step_shape_l1: int or sequence of ints
The shape of kernel step in the first convolution layer.
If the value is int, a step of the square shape is applied.
If you want to apply a step of a different aspect ratio, just
pass a tuple of shape (height, width).
n_l1_output:
L1 in the original paper. The number of outputs obtained
from a set of input images.
filter_shape_l2: int or sequence of ints
The shape of the kernel in the second convolution layer.
If the value is int, a filter of the square shape is applied.
If you want to apply a filter of a different aspect ratio, just
pass a tuple of shape (height, width).
step_shape_l2: int or sequence of ints
The shape of kernel step in the second convolution layer.
If the value is int, a step of the square shape is applied.
If you want to apply a step of a different aspect ratio, just
pass a tuple of shape (height, width).
n_l2_output:
L2 in the original paper. The number of outputs obtained
from each L1 output.
filter_shape_pooling: int or sequence of ints
The shape of the filter in the pooling layer.
step_shape_pooling: int or sequence of ints
The shape of the filter step in the pooling layer.
"""
self.image_shape = to_tuple_if_int(image_shape)
self.filter_shape_l1 = to_tuple_if_int(filter_shape_l1)
self.step_shape_l1 = to_tuple_if_int(step_shape_l1)
self.n_l1_output = n_l1_output
self.filter_shape_l2 = to_tuple_if_int(filter_shape_l2)
self.step_shape_l2 = to_tuple_if_int(step_shape_l2)
self.n_l2_output = n_l2_output
self.filter_shape_pooling = to_tuple_if_int(filter_shape_pooling)
self.step_shape_pooling = to_tuple_if_int(step_shape_pooling)
self.n_bins = None # TODO make n_bins specifiable
self.pca_l1 = IncrementalPCA(n_l1_output)
self.pca_l2 = IncrementalPCA(n_l2_output)
def histogram(self, binary_images):
"""
Separate a given image into blocks and calculate a histogram
in each block.
Supporse data in a block is in range [0, 3] and the acutual
values are
::
[0 0 1]
[2 2 2]
[2 3 3]
| If default bins ``[-0.5 0.5 1.5 2.5 3.5]`` applied,
the histogram will be ``[2 1 4 2]``.
| If ``n_bins`` is specified, the range of data divided equally.
| For example, if the data is in range ``[0, 3]`` and ``n_bins = 2``,
| bins will be ``[-0.5 1.5 3.5]`` and the histogram will be ``[3 6]``.
"""
k = pow(2, self.n_l2_output)
if self.n_bins is None:
self.n_bins = k + 1
bins = xp.linspace(-0.5, k - 0.5, self.n_bins)
def bhist(image):
# calculate Bhist(T) in the original paper
ps = Patches(
image,
self.filter_shape_pooling,
self.step_shape_pooling).patches
H = [xp.histogram(p.flatten(), bins)[0] for p in ps]
return xp.concatenate(H)
return xp.vstack([bhist(image) for image in binary_images])
def process_input(self, images):
assert(np.ndim(images) >= 3)
assert(images.shape[1:3] == self.image_shape)
if np.ndim(images) == 3:
# forcibly convert to multi-channel images
images = atleast_4d(images)
images = to_channels_first(images)
return images
def fit(self, images):
"""
Train PCANet
Parameters
----------
images: np.ndarray
| Color / grayscale images of shape
| (n_images, height, width, n_channels) or
| (n_images, height, width)
"""
images = self.process_input(images)
# images.shape == (n_images, n_channels, y, x)
for image in images:
X = []
for channel in image:
patches = image_to_patch_vectors(
channel,
self.filter_shape_l1,
self.step_shape_l1
)
X.append(patches)
patches = np.hstack(X)
# patches.shape = (n_patches, n_patches * vector length)
self.pca_l1.partial_fit(patches)
filters_l1 = components_to_filters(
self.pca_l1.components_,
n_channels=images.shape[1],
filter_shape=self.filter_shape_l1,
)
if gpu_enabled():
images = to_gpu(images)
filters_l1 = to_gpu(filters_l1)
images = convolution_2d(
images,
filters_l1,
stride=self.step_shape_l1
).data
if gpu_enabled():
images = to_cpu(images)
filters_l1 = to_cpu(filters_l1)
# images.shape == (n_images, L1, y, x)
images = images.reshape(-1, *images.shape[2:4])
for image in images:
patches = image_to_patch_vectors(
image,
self.filter_shape_l2,
self.step_shape_l2
)
self.pca_l2.partial_fit(patches)
return self
def transform(self, images):
"""
Parameters
----------
images: np.ndarray
| Color / grayscale images of shape
| (n_images, height, width, n_channels) or
| (n_images, height, width)
Returns
-------
X: np.ndarray
A set of feature vectors of shape (n_images, n_features)
where :code:`n_features` is determined by the hyperparameters
"""
images = self.process_input(images)
# images.shape == (n_images, n_channels, y, x)
filters_l1 = components_to_filters(
self.pca_l1.components_,
n_channels=images.shape[1],
filter_shape=self.filter_shape_l1,
)
filters_l2 = components_to_filters(
self.pca_l2.components_,
n_channels=1,
filter_shape=self.filter_shape_l2
)
if gpu_enabled():
images = to_gpu(images)
filters_l1 = to_gpu(filters_l1)
filters_l2 = to_gpu(filters_l2)
images = convolution_2d(
images,
filters_l1,
stride=self.step_shape_l1
).data
images = xp.swapaxes(images, 0, 1)
# L1.shape == (L1, n_images, y, x)
# iterate over each L1 output
X = []
for maps in images:
n_images, h, w = maps.shape
maps = convolution_2d(
maps.reshape(n_images, 1, h, w), # 1 channel images
filters_l2,
stride=self.step_shape_l2
).data
# maps.shape == (n_images, L2, y, x) right here
maps = binarize(maps)
maps = binary_to_decimal(maps)
# maps.shape == (n_images, y, x)
x = self.histogram(maps)
# x is a set of feature vectors.
# The shape of x is (n_images, vector length)
X.append(x)
# concatenate over L1
X = xp.hstack(X)
if gpu_enabled():
X = to_cpu(X)
X = X.astype(np.float64)
# The shape of X is (n_images, L1 * vector length)
return X
def validate_structure(self):
"""
Check that the filter visits all pixels of input images without
dropping any information.
Raises
------
ValueError:
if the network structure does not satisfy the above constraint.
"""
def is_valid_(input_shape, filter_shape, step_shape):
ys, xs = steps(input_shape, filter_shape, step_shape)
fh, fw = filter_shape
h, w = input_shape
if ys[-1]+fh != h or xs[-1]+fw != w:
raise ValueError("Invalid network structure.")
return output_shape(ys, xs)
output_shape_l1 = is_valid_(self.image_shape,
self.filter_shape_l1,
self.step_shape_l1)
output_shape_l2 = is_valid_(output_shape_l1,
self.filter_shape_l2,
self.step_shape_l2)
is_valid_(
output_shape_l2,
self.filter_shape_pooling,
self.filter_shape_pooling
)
| IshitaTakeshi/PCANet | pcanet.py | Python | mit | 13,966 | [
"VisIt"
] | 2aff4c0af19ee86fd645f1d602f3b29eb1d690d1f40064b325db01a94881f401 |
import numpy as np
import ray
from ray import tune
from ray.tune.suggest.bayesopt import BayesOptSearch
from ray.tune.suggest import ConcurrencyLimiter
import unittest
def loss(config, reporter):
x = config.get("x")
reporter(loss=x**2) # A simple function to optimize
class ConvergenceTest(unittest.TestCase):
"""Test convergence in gaussian process."""
def test_convergence_gaussian_process(self):
np.random.seed(0)
ray.init(local_mode=True, num_cpus=1, num_gpus=1)
space = {
"x": (0, 20) # This is the space of parameters to explore
}
resources_per_trial = {"cpu": 1, "gpu": 0}
# Following bayesian optimization
gp = BayesOptSearch(
space, metric="loss", mode="min", random_search_steps=10)
gp.repeat_float_precision = 5
gp = ConcurrencyLimiter(gp, 1)
# Execution of the BO.
analysis = tune.run(
loss,
# stop=EarlyStopping("loss", mode="min", patience=5),
search_alg=gp,
config={},
num_samples=100, # Number of iterations
resources_per_trial=resources_per_trial,
raise_on_failed_trial=False,
fail_fast=True,
verbose=1)
assert len(analysis.trials) == 41
ray.shutdown()
| richardliaw/ray | python/ray/tune/tests/test_convergence_gaussian_process.py | Python | apache-2.0 | 1,336 | [
"Gaussian"
] | 1ea0caf4b8d810fdad3336572c5df0359892577e9608e20a651532c485d25e7a |
import warnings
import operator
import sys
import functools as ft
from functools import reduce
import numpy as np
import xarray as xr
import pandas as pd
import dask.array as dsar
from dask import delayed
import scipy.signal as sps
import scipy.linalg as spl
from .detrend import detrend as _detrend
__all__ = [
"fft",
"ifft",
"dft",
"idft",
"power_spectrum",
"cross_spectrum",
"cross_phase",
"isotropize",
"isotropic_power_spectrum",
"isotropic_cross_spectrum",
"isotropic_powerspectrum",
"isotropic_crossspectrum",
"fit_loglog",
]
def _fft_module(da):
if da.chunks:
return dsar.fft
else:
return np.fft
def _apply_window(da, dims, window_type="hann"):
"""Creating windows in dimensions dims."""
if window_type == True:
window_type = "hann"
warnings.warn(
"Please provide the name of window adhering to scipy.signal.windows. The boolean option will be deprecated in future releases.",
FutureWarning,
)
elif window_type not in [
"hann",
"hamming",
"kaiser",
"tukey",
"parzen",
"taylor",
"boxcar",
"barthann",
"bartlett",
"blackman",
"blackmanharris",
"bohman",
"chebwin",
"cosine",
"dpss",
"exponential",
"flattop",
"gaussian",
"general_cosine",
"general_gaussian",
"general_hamming",
"triang",
"nuttall",
]:
raise NotImplementedError(
"Window type {window_type} not supported. Please adhere to scipy.signal.windows for naming convention."
)
if dims is None:
dims = list(da.dims)
else:
if isinstance(dims, str):
dims = [dims]
scipy_win_func = getattr(sps.windows, window_type)
if da.chunks:
def dask_win_func(n, sym=False):
return dsar.from_delayed(
delayed(scipy_win_func, pure=True)(n, sym=sym), (n,), float
)
win_func = dask_win_func
else:
win_func = scipy_win_func
windows = [
xr.DataArray(
win_func(len(da[d]), sym=False), dims=da[d].dims, coords=da[d].coords
)
for d in dims
]
return reduce(operator.mul, windows[::-1]), da * reduce(operator.mul, windows[::-1])
def _stack_chunks(da, dim, suffix="_segment"):
"""Reshape a DataArray so there is only one chunk along dimension `dim`"""
data = da.data
attr = da.attrs
newdims = []
newcoords = {}
newshape = []
for d in da.dims:
if d in dim:
axis_num = da.get_axis_num(d)
if np.diff(da.chunks[axis_num]).sum() != 0:
raise ValueError("Chunk lengths need to be the same.")
n = len(da[d])
chunklen = da.chunks[axis_num][0]
coord_rs = da[d].data.reshape((int(n / chunklen), int(chunklen)))
newdims.append(d + suffix)
newdims.append(d)
newshape.append(int(n / chunklen))
newshape.append(int(chunklen))
newcoords[d + suffix] = range(int(n / chunklen))
newcoords[d] = coord_rs[0]
else:
newdims.append(d)
newshape.append(len(da[d]))
newcoords[d] = da[d].data
da = xr.DataArray(
data.reshape(newshape), dims=newdims, coords=newcoords, attrs=attr
)
return da
def _freq(N, delta_x, real, shift):
# calculate frequencies from coordinates
# coordinates are always loaded eagerly, so we use numpy
if real is None:
fftfreq = [np.fft.fftfreq] * len(N)
else:
# Discard negative frequencies from transform along last axis to be
# consistent with np.fft.rfftn
fftfreq = [np.fft.fftfreq] * (len(N) - 1)
fftfreq.append(np.fft.rfftfreq)
k = [fftfreq(Nx, dx) for (fftfreq, Nx, dx) in zip(fftfreq, N, delta_x)]
if shift:
k = [np.fft.fftshift(l) for l in k]
return k
def _ifreq(N, delta_x, real, shift):
# calculate frequencies from coordinates
# coordinates are always loaded eagerly, so we use numpy
if real is None:
fftfreq = [np.fft.fftfreq] * len(N)
else:
irfftfreq = lambda Nx, dx: np.fft.fftfreq(
2 * (Nx - 1), dx
) # Not in standard numpy !
fftfreq = [np.fft.fftfreq] * (len(N) - 1)
fftfreq.append(irfftfreq)
k = [fftfreq(Nx, dx) for (fftfreq, Nx, dx) in zip(fftfreq, N, delta_x)]
if shift:
k = [np.fft.fftshift(l) for l in k]
return k
def _new_dims_and_coords(da, dim, wavenm, prefix):
# set up new dimensions and coordinates for dataarray
swap_dims = dict()
new_coords = dict()
wavenm = dict(zip(dim, wavenm))
for d in dim:
k = wavenm[d]
new_name = prefix + d if d[: len(prefix)] != prefix else d[len(prefix) :]
new_dim = xr.DataArray(k, dims=new_name, coords={new_name: k}, name=new_name)
new_dim.attrs.update({"spacing": k[1] - k[0]})
new_coords[new_name] = new_dim
swap_dims[d] = new_name
return new_coords, swap_dims
def _diff_coord(coord):
"""Returns the difference as a xarray.DataArray."""
v0 = coord.values[0]
calendar = getattr(v0, "calendar", None)
if calendar:
import cftime
ref_units = "seconds since 1800-01-01 00:00:00"
decoded_time = cftime.date2num(coord, ref_units, calendar)
coord = xr.DataArray(decoded_time, dims=coord.dims, coords=coord.coords)
return np.diff(coord)
elif pd.api.types.is_datetime64_dtype(v0):
return np.diff(coord).astype("timedelta64[s]").astype("f8")
else:
return np.diff(coord)
def _lag_coord(coord):
"""Returns the coordinate lag"""
v0 = coord.values[0]
calendar = getattr(v0, "calendar", None)
if coord[-1] > coord[0]:
coord_data = coord.data
else:
coord_data = np.flip(coord.data, axis=-1)
lag = coord_data[len(coord.data) // 2]
if calendar:
import cftime
ref_units = "seconds since 1800-01-01 00:00:00"
decoded_time = cftime.date2num(lag, ref_units, calendar)
return decoded_time
elif pd.api.types.is_datetime64_dtype(v0):
return lag.astype("timedelta64[s]").astype("f8").data
else:
return lag.data
def dft(
da, dim=None, true_phase=False, true_amplitude=False, **kwargs
): # pragma: no cover
"""
Deprecated function. See fft doc
"""
msg = (
"This function has been renamed and will disappear in the future."
+ " Please use `fft` instead"
)
warnings.warn(msg, FutureWarning)
return fft(
da, dim=dim, true_phase=true_phase, true_amplitude=true_amplitude, **kwargs
)
def idft(
daft, dim=None, true_phase=False, true_amplitude=False, **kwargs
): # pragma: no cover
"""
Deprecated function. See ifft doc
"""
msg = (
"This function has been renamed and will disappear in the future."
+ " Please use `ifft` instead"
)
warnings.warn(msg, FutureWarning)
return ifft(
daft, dim=dim, true_phase=true_phase, true_amplitude=true_amplitude, **kwargs
)
def fft(
da,
spacing_tol=1e-3,
dim=None,
real_dim=None,
shift=True,
detrend=None,
window=None,
true_phase=False,
true_amplitude=False,
chunks_to_segments=False,
prefix="freq_",
**kwargs,
):
"""
Perform discrete Fourier transform of xarray data-array `da` along the
specified dimensions.
.. math::
daft = \mathbb{F}(da - \overline{da})
Parameters
----------
da : `xarray.DataArray`
The data to be transformed
spacing_tol: float, optional
Spacing tolerance. Fourier transform should not be applied to uneven grid but
this restriction can be relaxed with this setting. Use caution.
dim : str or sequence of str, optional
The dimensions along which to take the transformation. If `None`, all
dimensions will be transformed. If the inputs are dask arrays, the
arrays must not be chunked along these dimensions.
real_dim : str, optional
Real Fourier transform will be taken along this dimension.
shift : bool, default
Whether to shift the fft output. Default is `True`, unless `real_dim is not None`,
in which case shift will be set to False always.
detrend : {None, 'constant', 'linear'}
If `constant`, the mean across the transform dimensions will be
subtracted before calculating the Fourier transform (FT).
If `linear`, the linear least-square fit will be subtracted before
the FT. For `linear`, only dims of length 1 and 2 are supported.
window : str, optional
Whether to apply a window to the data before the Fourier
transform is taken. A window will be applied to all the dimensions in
dim. Please follow `scipy.signal.windows`' naming convention.
true_phase : bool, optional
If set to False, standard fft algorithm is applied on signal without consideration of coordinates.
If set to True, coordinates location are correctly taken into account to evaluate Fourier Tranforrm phase and
fftshift is applied on input signal prior to fft (fft algorithm intrinsically considers that input signal is on fftshifted grid).
true_amplitude : bool, optional
If set to True, output is multiplied by the spacing of the transformed variables to match theoretical FT amplitude.
If set to False, amplitude regularisation by spacing is not applied (as in numpy.fft)
chunks_to_segments : bool, optional
Whether the data is chunked along the axis to take FFT.
prefix : str
The prefix for the new transformed dimensions.
Returns
-------
daft : `xarray.DataArray`
The output of the Fourier transformation, with appropriate dimensions.
"""
if not true_phase and not true_amplitude:
msg = "Flags true_phase and true_amplitude will be set to True in future versions of xrft.dft to preserve the theoretical phasing and amplitude of Fourier Transform. Consider using xrft.fft to ensure future compatibility with numpy.fft like behavior and to deactivate this warning."
warnings.warn(msg, FutureWarning)
if dim is None:
dim = list(da.dims)
else:
if isinstance(dim, str):
dim = [dim]
if "real" in kwargs:
real_dim = kwargs.get("real")
msg = "`real` flag will be deprecated in future version of xrft.dft and replaced by `real_dim` flag."
warnings.warn(msg, FutureWarning)
if real_dim is not None:
if real_dim not in da.dims:
raise ValueError(
"The dimension along which real FT is taken must be one of the existing dimensions."
)
else:
dim = [d for d in dim if d != real_dim] + [
real_dim
] # real dim has to be moved or added at the end !
if chunks_to_segments:
da = _stack_chunks(da, dim)
rawdims = da.dims # take care of segmented dimesions, if any
if real_dim is not None:
da = da.transpose(
*[d for d in da.dims if d not in [real_dim]] + [real_dim]
) # dimension for real transformed is moved at the end
fftm = _fft_module(da)
if real_dim is None:
fft_fn = fftm.fftn
else:
shift = False
fft_fn = fftm.rfftn
# the axes along which to take ffts
axis_num = [
da.get_axis_num(d) for d in dim
] # if there is a real dim , it has to be the last one
N = [da.shape[n] for n in axis_num]
# raise error if there are multiple coordinates attached to the dimension(s) over which the FFT is taken
for d in dim:
bad_coords = [
cname for cname in da.coords if cname != d and d in da[cname].dims
]
if bad_coords:
raise ValueError(
f"The input array contains coordinate variable(s) ({bad_coords}) whose dims include the transform dimension(s) `{d}`. "
f"Please drop these coordinates (`.drop({bad_coords}`) before invoking xrft."
)
# verify even spacing of input coordinates
delta_x = []
lag_x = []
for d in dim:
diff = _diff_coord(da[d])
delta = np.abs(diff[0])
lag = _lag_coord(da[d])
if not np.allclose(diff, diff[0], rtol=spacing_tol):
raise ValueError(
"Can't take Fourier transform because "
"coodinate %s is not evenly spaced" % d
)
if delta == 0.0:
raise ValueError(
"Can't take Fourier transform because spacing in coordinate %s is zero"
% d
)
delta_x.append(delta)
lag_x.append(lag)
if detrend is not None:
if detrend == "linear":
orig_dims = da.dims
da = _detrend(da, dim, detrend_type=detrend).transpose(*orig_dims)
else:
da = _detrend(da, dim, detrend_type=detrend)
if window is not None:
_, da = _apply_window(da, dim, window_type=window)
if true_phase:
reversed_axis = [
da.get_axis_num(d) for d in dim if da[d][-1] < da[d][0]
] # handling decreasing coordinates
f = fft_fn(
fftm.ifftshift(np.flip(da, axis=reversed_axis), axes=axis_num),
axes=axis_num,
)
else:
f = fft_fn(da.data, axes=axis_num)
if shift:
f = fftm.fftshift(f, axes=axis_num)
k = _freq(N, delta_x, real_dim, shift)
newcoords, swap_dims = _new_dims_and_coords(da, dim, k, prefix)
daft = xr.DataArray(
f, dims=da.dims, coords=dict([c for c in da.coords.items() if c[0] not in dim])
)
daft = daft.swap_dims(swap_dims).assign_coords(newcoords)
daft = daft.drop([d for d in dim if d in daft.coords])
updated_dims = [
daft.dims[i] for i in da.get_axis_num(dim)
] # List of transformed dimensions
if true_phase:
for up_dim, lag in zip(updated_dims, lag_x):
daft = daft * xr.DataArray(
np.exp(-1j * 2.0 * np.pi * newcoords[up_dim] * lag),
dims=up_dim,
coords={up_dim: newcoords[up_dim]},
) # taking advantage of xarray broadcasting and ordered coordinates
daft[up_dim].attrs.update({"direct_lag": lag.obj})
if true_amplitude:
daft = daft * np.prod(delta_x)
return daft.transpose(
*[swap_dims.get(d, d) for d in rawdims]
) # Do nothing if da was not transposed
def ifft(
daft,
spacing_tol=1e-3,
dim=None,
real_dim=None,
shift=True,
true_phase=False,
true_amplitude=False,
chunks_to_segments=False,
prefix="freq_",
lag=None,
**kwargs,
):
"""
Perform inverse discrete Fourier transform of xarray data-array `daft` along the
specified dimensions.
.. math::
da = \mathbb{F}(daft - \overline{daft})
Parameters
----------
daft : `xarray.DataArray`
The data to be transformed
spacing_tol: float, optional
Spacing tolerance. Fourier transform should not be applied to uneven grid but
this restriction can be relaxed with this setting. Use caution.
dim : str or sequence of str, optional
The dimensions along which to take the transformation. If `None`, all
dimensions will be transformed.
real_dim : str, optional
Real Fourier transform will be taken along this dimension.
shift : bool, default
Whether to shift the fft output. Default is `True`.
chunks_to_segments : bool, optional
Whether the data is chunked along the axis to take FFT.
prefix : str
The prefix for the new transformed dimensions.
true_phase : bool, optional
If set to False, standard ifft algorithm is applied on signal without consideration of coordinates order.
If set to True, coordinates are correctly taken into account to evaluate Inverse Fourier Tranforrm phase and
fftshift is applied on input signal prior to ifft (ifft algorithm intrinsically considers that input signal is on fftshifted grid).
true_amplitude : bool, optional
If set to True, output is divided by the spacing of the transformed variables to match theoretical IFT amplitude.
If set to False, amplitude regularisation by spacing is not applied (as in numpy.ifft)
lag : None, float or sequence of float and/or None, optional
Output coordinates of transformed dimensions will be shifted by corresponding lag values and correct signal phasing will be preserved if true_phase is set to True.
If lag is None (default), 'direct_lag' attributes of each dimension is used (or set to zero if not found).
If defined, lag must have same length as dim.
If lag is a sequence, a None element means that 'direct_lag' attribute will be used for the corresponding dimension
Manually set lag to zero to get output coordinates centered on zero.
Returns
-------
da : `xarray.DataArray`
The output of the Inverse Fourier transformation, with appropriate dimensions.
"""
if not true_phase and not true_amplitude:
msg = "Flags true_phase and true_amplitude will be set to True in future versions of xrft.idft to preserve the theoretical phasing and amplitude of Inverse Fourier Transform. Consider using xrft.ifft to ensure future compatibility with numpy.ifft like behavior and to deactivate this warning."
warnings.warn(msg, FutureWarning)
if dim is None:
dim = list(daft.dims)
else:
if isinstance(dim, str):
dim = [dim]
if "real" in kwargs:
real_dim = kwargs.get("real")
msg = "`real` flag will be deprecated in future version of xrft.idft and replaced by `real_dim` flag."
warnings.warn(msg, FutureWarning)
if real_dim is not None:
if real_dim not in daft.dims:
raise ValueError(
"The dimension along which real IFT is taken must be one of the existing dimensions."
)
else:
dim = [d for d in dim if d != real_dim] + [
real_dim
] # real dim has to be moved or added at the end !
if lag is None:
lag = [daft[d].attrs.get("direct_lag", 0.0) for d in dim]
msg = "Default idft's behaviour (lag=None) changed! Default value of lag was zero (centered output coordinates) and is now set to transformed coordinate's attribute: 'direct_lag'."
warnings.warn(msg, FutureWarning)
else:
if isinstance(lag, float) or isinstance(lag, int):
lag = [lag]
if len(dim) != len(lag):
raise ValueError("dim and lag must have the same length.")
if not true_phase:
msg = "Setting lag with true_phase=False does not guarantee accurate idft."
warnings.warn(msg, Warning)
lag = [
daft[d].attrs.get("direct_lag") if l is None else l
for d, l in zip(dim, lag)
] # enable lag of the form [3.2, None, 7]
if true_phase:
for d, l in zip(dim, lag):
daft = daft * np.exp(1j * 2.0 * np.pi * daft[d] * l)
if chunks_to_segments:
daft = _stack_chunks(daft, dim)
rawdims = daft.dims # take care of segmented dimensions, if any
if real_dim is not None:
daft = daft.transpose(
*[d for d in daft.dims if d not in [real_dim]] + [real_dim]
) # dimension for real transformed is moved at the end
fftm = _fft_module(daft)
if real_dim is None:
fft_fn = fftm.ifftn
else:
fft_fn = fftm.irfftn
# the axes along which to take ffts
axis_num = [daft.get_axis_num(d) for d in dim]
N = [daft.shape[n] for n in axis_num]
# verify even spacing of input coordinates (It handle fftshifted grids)
delta_x = []
for d in dim:
diff = _diff_coord(daft[d])
delta = np.abs(diff[0])
l = _lag_coord(daft[d]) if d is not real_dim else daft[d][0].data
if not np.allclose(
diff, delta, rtol=spacing_tol
): # means that input is not on regular increasing grid
reordered_coord = daft[d].copy()
reordered_coord = reordered_coord.sortby(d)
diff = _diff_coord(reordered_coord)
l = _lag_coord(reordered_coord)
if np.allclose(
diff, diff[0], rtol=spacing_tol
): # means that input is on fftshifted grid
daft = daft.sortby(d) # reordering the input
else:
raise ValueError(
"Can't take Fourier transform because "
"coodinate %s is not evenly spaced" % d
)
if np.abs(l) > spacing_tol:
raise ValueError(
"Inverse Fourier Transform can not be computed because coordinate %s is not centered on zero frequency"
% d
)
if delta == 0.0:
raise ValueError(
"Can't take Inverse Fourier transform because spacing in coordinate %s is zero"
% d
)
delta_x.append(delta)
axis_shift = [
daft.get_axis_num(d) for d in dim if d is not real_dim
] # remove real dim of the list
f = fftm.ifftshift(
daft.data, axes=axis_shift
) # Force to be on fftshift grid before Fourier Transform
f = fft_fn(f, axes=axis_num)
if not true_phase:
f = fftm.ifftshift(f, axes=axis_num)
if shift:
f = fftm.fftshift(f, axes=axis_num)
k = _ifreq(N, delta_x, real_dim, shift)
newcoords, swap_dims = _new_dims_and_coords(daft, dim, k, prefix)
da = xr.DataArray(
f,
dims=daft.dims,
coords=dict([c for c in daft.coords.items() if c[0] not in dim]),
)
da = da.swap_dims(swap_dims).assign_coords(newcoords)
da = da.drop([d for d in dim if d in da.coords])
with xr.set_options(
keep_attrs=True
): # This line ensures keeping spacing attribute in output coordinates
for d, l in zip(dim, lag):
tfd = swap_dims[d]
da = da.assign_coords({tfd: da[tfd] + l})
if true_amplitude:
da = da / np.prod([float(da[up_dim].spacing) for up_dim in swap_dims.values()])
return da.transpose(
*[swap_dims.get(d, d) for d in rawdims]
) # Do nothing if daft was not transposed
def power_spectrum(
da, dim=None, real_dim=None, scaling="density", window_correction=False, **kwargs
):
"""
Calculates the power spectrum of da.
.. math::
da' = da - \overline{da}
.. math::
ps = \mathbb{F}(da') {\mathbb{F}(da')}^*
Parameters
----------
da : `xarray.DataArray`
The data to be transformed
dim : str or sequence of str, optional
The dimensions along which to take the transformation. If `None`, all
dimensions will be transformed.
real_dim : str, optional
Real Fourier transform will be taken along this dimension.
scaling : str, optional
If 'density', it will normalize the output to power spectral density
If 'spectrum', it will normalize the output to power spectrum
window_correction : boolean
If True, it will correct for the energy reduction resulting from applying a non-uniform window.
This is the default behaviour of many tools for computing power spectrum (e.g scipy.signal.welch and scipy.signal.periodogram).
If scaling = 'spectrum', correct the amplitude of peaks in the spectrum. This ensures, for example, that the peak in the one-sided power spectrum of a 10 Hz sine wave with RMS**2 = 10 has a magnitude of 10.
If scaling = 'density', correct for the energy (integral) of the spectrum. This ensures, for example, that the power spectral density integrates to the square of the RMS of the signal (ie that Parseval's theorem is satisfied). Note that in most cases, Parseval's theorem will only be approximately satisfied with this correction as it assumes that the signal being windowed is independent of the window. The correction becomes more accurate as the width of the window gets large in comparison with any noticeable period in the signal.
If False, the spectrum gives a representation of the power in the windowed signal.
Note that when True, Parseval's theorem may only be approximately satisfied.
kwargs : dict : see xrft.dft for argument list
"""
if "density" in kwargs:
density = kwargs.pop("density")
msg = (
"density flag will be deprecated in future version of xrft.power_spectrum and replaced by scaling flag. "
+ 'density=True should be replaced by scaling="density" and '
+ "density=False will not be maintained.\nscaling flag is ignored !"
)
warnings.warn(msg, FutureWarning)
scaling = "density" if density else "false_density"
if "real" in kwargs:
real_dim = kwargs.get("real")
msg = "`real` flag will be deprecated in future version of xrft.power_spectrum and replaced by `real_dim` flag."
warnings.warn(msg, FutureWarning)
kwargs.update(
{"true_amplitude": True, "true_phase": False}
) # true_phase do not matter in power_spectrum
daft = fft(da, dim=dim, real_dim=real_dim, **kwargs)
updated_dims = [
d for d in daft.dims if (d not in da.dims and "segment" not in d)
] # Transformed dimensions
ps = np.abs(daft) ** 2
if real_dim is not None:
real = [d for d in updated_dims if real_dim == d[-len(real_dim) :]][
0
] # find transformed real dimension
f = np.full(ps.sizes[real], 2.0)
if len(da[real_dim]) % 2 == 0:
f[0], f[-1] = 1.0, 1.0
else:
f[0] = 1.0
ps = ps * xr.DataArray(f, dims=real, coords=ps[real].coords)
if scaling == "density":
if window_correction:
if kwargs.get("window") == None:
raise ValueError(
"window_correction can only be applied when windowing is turned on."
)
else:
windows, _ = _apply_window(da, dim, window_type=kwargs.get("window"))
ps = ps / (windows ** 2).mean()
fs = np.prod([float(ps[d].spacing) for d in updated_dims])
ps *= fs
elif scaling == "spectrum":
if window_correction:
if kwargs.get("window") == None:
raise ValueError(
"window_correction can only be applied when windowing is turned on."
)
else:
windows, _ = _apply_window(da, dim, window_type=kwargs.get("window"))
ps = ps / windows.mean() ** 2
fs = np.prod([float(ps[d].spacing) for d in updated_dims])
ps *= fs ** 2
elif scaling == "false_density": # Corresponds to density=False
pass
else:
raise ValueError("Unknown {} scaling flag".format(scaling))
return ps
def cross_spectrum(
da1,
da2,
dim=None,
real_dim=None,
scaling="density",
window_correction=False,
true_phase=False,
**kwargs,
):
"""
Calculates the cross spectra of da1 and da2.
.. math::
da1' = da1 - \overline{da1};\ \ da2' = da2 - \overline{da2}
.. math::
cs = \mathbb{F}(da1') {\mathbb{F}(da2')}^*
Parameters
----------
da1 : `xarray.DataArray`
The data to be transformed
da2 : `xarray.DataArray`
The data to be transformed
dim : str or sequence of str, optional
The dimensions along which to take the transformation. If `None`, all
dimensions will be transformed.
real_dim : str, optional
Real Fourier transform will be taken along this dimension.
scaling : str, optional
If 'density', it will normalize the output to power spectral density
If 'spectrum', it will normalize the output to power spectrum
window_correction : boolean
If True, it will correct for the energy reduction resulting from applying a non-uniform window.
This is the default behaviour of many tools for computing power spectrum (e.g scipy.signal.welch and scipy.signal.periodogram).
If scaling = 'spectrum', correct the amplitude of peaks in the spectrum. This ensures, for example, that the peak in the one-sided power spectrum of a 10 Hz sine wave with RMS**2 = 10 has a magnitude of 10.
If scaling = 'density', correct for the energy (integral) of the spectrum. This ensures, for example, that the power spectral density integrates to the square of the RMS of the signal (ie that Parseval's theorem is satisfied). Note that in most cases, Parseval's theorem will only be approximately satisfied with this correction as it assumes that the signal being windowed is independent of the window. The correction becomes more accurate as the width of the window gets large in comparison with any noticeable period in the signal.
If False, the spectrum gives a representation of the power in the windowed signal.
Note that when True, Parseval's theorem may only be approximately satisfied.
kwargs : dict : see xrft.dft for argument list
"""
if not true_phase:
msg = (
"true_phase flag will be set to True in future version of xrft.dft possibly impacting cross_spectrum output. "
+ "Set explicitely true_phase = False in cross_spectrum arguments list to ensure future compatibility "
+ "with numpy-like behavior where the coordinates are disregarded."
)
warnings.warn(msg, FutureWarning)
if "real" in kwargs:
real_dim = kwargs.get("real")
msg = "`real` flag will be deprecated in future version of xrft.cross_spectrum and replaced by `real_dim` flag."
warnings.warn(msg, FutureWarning)
if "density" in kwargs:
density = kwargs.pop("density")
msg = (
"density flag will be deprecated in future version of xrft.cross_spectrum and replaced by scaling flag. "
+ 'density=True should be replaced by scaling="density" and '
+ "density=False will not be maintained.\nscaling flag is ignored !"
)
warnings.warn(msg, FutureWarning)
scaling = "density" if density else "false_density"
kwargs.update({"true_amplitude": True})
daft1 = fft(da1, dim=dim, real_dim=real_dim, true_phase=true_phase, **kwargs)
daft2 = fft(da2, dim=dim, real_dim=real_dim, true_phase=true_phase, **kwargs)
if daft1.dims != daft2.dims:
raise ValueError("The two datasets have different dimensions")
updated_dims = [
d for d in daft1.dims if (d not in da1.dims and "segment" not in d)
] # Transformed dimensions
cs = daft1 * np.conj(daft2)
if real_dim is not None:
real = [d for d in updated_dims if real_dim == d[-len(real_dim) :]][
0
] # find transformed real dimension
f = np.full(cs.sizes[real], 2.0)
if len(da1[real_dim]) % 2 == 0:
f[0], f[-1] = 1.0, 1.0
else:
f[0] = 1.0
cs = cs * xr.DataArray(f, dims=real, coords=cs[real].coords)
if scaling == "density":
if window_correction:
if kwargs.get("window") == None:
raise ValueError(
"window_correction can only be applied when windowing is turned on."
)
else:
windows, _ = _apply_window(da, dim, window_type=kwargs.get("window"))
cs = cs / (windows ** 2).mean()
fs = np.prod([float(cs[d].spacing) for d in updated_dims])
cs *= fs
elif scaling == "spectrum":
if window_correction:
if kwargs.get("window") == None:
raise ValueError(
"window_correction can only be applied when windowing is turned on."
)
else:
windows, _ = _apply_window(da, dim, window_type=kwargs.get("window"))
cs = cs / windows.mean() ** 2
fs = np.prod([float(cs[d].spacing) for d in updated_dims])
cs *= fs ** 2
elif scaling == "false_density": # Corresponds to density=False
pass
else:
raise ValueError("Unknown {} scaling flag".format(scaling))
return cs
def cross_phase(da1, da2, dim=None, true_phase=False, **kwargs):
"""
Calculates the cross-phase between da1 and da2.
Returned values are in [-pi, pi].
.. math::
da1' = da1 - \overline{da1};\ \ da2' = da2 - \overline{da2}
.. math::
cp = \text{Arg} [\mathbb{F}(da1')^*, \mathbb{F}(da2')]
Parameters
----------
da1 : `xarray.DataArray`
The data to be transformed
da2 : `xarray.DataArray`
The data to be transformed
kwargs : dict : see xrft.dft for argument list
"""
if not true_phase:
msg = (
"true_phase flag will be set to True in future version of xrft.dft possibly impacting cross_phase output. "
+ "Set explicitely true_phase = False in cross_spectrum arguments list to ensure future compatibility "
+ "with numpy-like behavior where the coordinates are disregarded."
)
warnings.warn(msg, FutureWarning)
cp = xr.ufuncs.angle(
cross_spectrum(da1, da2, dim=dim, true_phase=true_phase, **kwargs)
)
if da1.name and da2.name:
cp.name = "{}_{}_phase".format(da1.name, da2.name)
return cp
def _binned_agg(
array: np.ndarray,
indices: np.ndarray,
num_bins: int,
*,
func,
fill_value,
dtype,
) -> np.ndarray:
"""NumPy helper function for aggregating over bins."""
try:
import numpy_groupies
except ImportError:
raise ImportError(
"This function requires the `numpy_groupies` package to be installed. Please install it with pip or conda."
)
mask = np.logical_not(np.isnan(indices))
int_indices = indices[mask].astype(int)
shape = array.shape[: -indices.ndim] + (num_bins,)
result = numpy_groupies.aggregate(
int_indices,
array[..., mask],
func=func,
size=num_bins,
fill_value=fill_value,
dtype=dtype,
axis=-1,
)
return result
def _groupby_bins_agg(
array: xr.DataArray,
group: xr.DataArray,
bins,
func="sum",
fill_value=0,
dtype=None,
**cut_kwargs,
) -> xr.DataArray:
"""Faster equivalent of Xarray's groupby_bins(...).sum()."""
# https://github.com/pydata/xarray/issues/4473
binned = pd.cut(np.ravel(group), bins, **cut_kwargs)
new_dim_name = group.name + "_bins"
indices = group.copy(data=binned.codes.reshape(group.shape))
result = xr.apply_ufunc(
_binned_agg,
array,
indices,
input_core_dims=[indices.dims, indices.dims],
output_core_dims=[[new_dim_name]],
output_dtypes=[array.dtype],
dask_gufunc_kwargs=dict(
allow_rechunk=True,
output_sizes={new_dim_name: binned.categories.size},
),
kwargs={
"num_bins": binned.categories.size,
"func": func,
"fill_value": fill_value,
"dtype": dtype,
},
dask="parallelized",
)
result.coords[new_dim_name] = binned.categories
return result
def isotropize(ps, fftdim, nfactor=4, truncate=False):
"""
Isotropize a 2D power spectrum or cross spectrum
by taking an azimuthal average.
.. math::
\text{iso}_{ps} = k_r N^{-1} \sum_{N} |\mathbb{F}(da')|^2
where :math:`N` is the number of azimuthal bins.
Parameters
----------
ps : `xarray.DataArray`
The power spectrum or cross spectrum to be isotropized.
fftdim : list
The fft dimensions overwhich the isotropization must be performed.
nfactor : int, optional
Ratio of number of bins to take the azimuthal averaging with the
data size. Default is 4.
truncate : bool, optional
If True, the spectrum will be truncated for wavenumbers larger than
the Nyquist wavenumber.
"""
# compute radial wavenumber bins
k = ps[fftdim[1]]
l = ps[fftdim[0]]
N = [k.size, l.size]
nbins = int(min(N) / nfactor)
freq_r = np.sqrt(k ** 2 + l ** 2).rename("freq_r")
kr = _groupby_bins_agg(freq_r, freq_r, bins=nbins, func="mean")
if truncate:
if k.max() > l.max():
kmax = l.max()
else:
kmax = k.max()
kr = kr.where(kr <= kmax)
else:
msg = (
"The flag `truncate` will be set to True by default in future version "
+ "in order to truncate the isotropic wavenumber larger than the "
+ "Nyquist wavenumber."
)
warnings.warn(msg, FutureWarning)
iso_ps = (
_groupby_bins_agg(ps, freq_r, bins=nbins, func="mean")
.rename({"freq_r_bins": "freq_r"})
.drop_vars("freq_r")
)
iso_ps.coords["freq_r"] = kr.data
if truncate:
return (iso_ps * iso_ps.freq_r).dropna("freq_r")
else:
return iso_ps * iso_ps.freq_r
def isotropic_powerspectrum(*args, **kwargs): # pragma: no cover
"""
Deprecated function. See isotropic_power_spectrum doc
"""
msg = (
"This function has been renamed and will disappear in the future."
+ " Please use isotropic_power_spectrum instead"
)
warnings.warn(msg, Warning)
return isotropic_power_spectrum(*args, **kwargs)
def isotropic_power_spectrum(
da,
spacing_tol=1e-3,
dim=None,
shift=True,
detrend=None,
scaling="density",
window=None,
window_correction=False,
nfactor=4,
truncate=False,
**kwargs,
):
"""
Calculates the isotropic spectrum from the
two-dimensional power spectrum by taking the
azimuthal average.
.. math::
\text{iso}_{ps} = k_r N^{-1} \sum_{N} |\mathbb{F}(da')|^2
where :math:`N` is the number of azimuthal bins.
Note: the method is not lazy does trigger computations.
Parameters
----------
da : `xarray.DataArray`
The data to be transformed
spacing_tol: float, optional
Spacing tolerance. Fourier transform should not be applied to uneven grid but
this restriction can be relaxed with this setting. Use caution.
dim : list, optional
The dimensions along which to take the transformation. If `None`, all
dimensions will be transformed.
shift : bool, optional
Whether to shift the fft output.
detrend : str, optional
If `constant`, the mean across the transform dimensions will be
subtracted before calculating the Fourier transform (FT).
If `linear`, the linear least-square fit will be subtracted before
the FT.
density : list, optional
If true, it will normalize the spectrum to spectral density
window : str, optional
Whether to apply a window to the data before the Fourier
transform is taken. Please adhere to scipy.signal.windows for naming convention.
nfactor : int, optional
Ratio of number of bins to take the azimuthal averaging with the
data size. Default is 4.
truncate : bool, optional
If True, the spectrum will be truncated for wavenumbers larger than
the Nyquist wavenumber.
Returns
-------
iso_ps : `xarray.DataArray`
Isotropic power spectrum
"""
if "density" in kwargs:
density = kwargs.pop("density")
scaling = "density" if density else "false_density"
if dim is None:
dim = da.dims
if len(dim) != 2:
raise ValueError("The Fourier transform should be two dimensional")
ps = power_spectrum(
da,
spacing_tol=spacing_tol,
dim=dim,
shift=shift,
detrend=detrend,
scaling=scaling,
window_correction=window_correction,
window=window,
**kwargs,
)
fftdim = ["freq_" + d for d in dim]
return isotropize(ps, fftdim, nfactor=nfactor, truncate=truncate)
def isotropic_crossspectrum(*args, **kwargs): # pragma: no cover
"""
Deprecated function. See isotropic_cross_spectrum doc
"""
msg = (
"This function has been renamed and will disappear in the future."
+ " Please use isotropic_cross_spectrum instead"
)
warnings.warn(msg, Warning)
return isotropic_cross_spectrum(*args, **kwargs)
def isotropic_cross_spectrum(
da1,
da2,
spacing_tol=1e-3,
dim=None,
shift=True,
detrend=None,
scaling="density",
window=None,
window_correction=False,
nfactor=4,
truncate=False,
**kwargs,
):
"""
Calculates the isotropic spectrum from the
two-dimensional power spectrum by taking the
azimuthal average.
.. math::
\text{iso}_{cs} = k_r N^{-1} \sum_{N} (\mathbb{F}(da1') {\mathbb{F}(da2')}^*)
where :math:`N` is the number of azimuthal bins.
Note: the method is not lazy does trigger computations.
Parameters
----------
da1 : `xarray.DataArray`
The data to be transformed
da2 : `xarray.DataArray`
The data to be transformed
spacing_tol: float (default)
Spacing tolerance. Fourier transform should not be applied to uneven grid but
this restriction can be relaxed with this setting. Use caution.
dim : list (optional)
The dimensions along which to take the transformation. If `None`, all
dimensions will be transformed.
shift : bool (optional)
Whether to shift the fft output.
detrend : str (optional)
If `constant`, the mean across the transform dimensions will be
subtracted before calculating the Fourier transform (FT).
If `linear`, the linear least-square fit will be subtracted before
the FT.
density : list (optional)
If true, it will normalize the spectrum to spectral density
window : str (optional)
Whether to apply a window to the data before the Fourier
transform is taken. Please adhere to scipy.signal.windows for naming convention.
nfactor : int (optional)
Ratio of number of bins to take the azimuthal averaging with the
data size. Default is 4.
truncate : bool, optional
If True, the spectrum will be truncated for wavenumbers larger than
the Nyquist wavenumber.
Returns
-------
iso_cs : `xarray.DataArray`
Isotropic cross spectrum
"""
if "density" in kwargs:
density = kwargs.pop("density")
scaling = "density" if density else "false_density"
if dim is None:
dim = da1.dims
dim2 = da2.dims
if dim != dim2:
raise ValueError("The two datasets have different dimensions")
if len(dim) != 2:
raise ValueError("The Fourier transform should be two dimensional")
cs = cross_spectrum(
da1,
da2,
spacing_tol=spacing_tol,
dim=dim,
shift=shift,
detrend=detrend,
scaling=scaling,
window_correction=window_correction,
window=window,
**kwargs,
)
fftdim = ["freq_" + d for d in dim]
return isotropize(cs, fftdim, nfactor=nfactor, truncate=truncate)
def fit_loglog(x, y):
"""
Fit a line to isotropic spectra in log-log space
Parameters
----------
x : `numpy.array`
Coordinate of the data
y : `numpy.array`
data
Returns
-------
y_fit : `numpy.array`
The linear fit
a : float64
Slope of the fit
b : float64
Intercept of the fit
"""
# fig log vs log
p = np.polyfit(np.log2(x), np.log2(y), 1)
y_fit = 2 ** (np.log2(x) * p[0] + p[1])
return y_fit, p[0], p[1]
| rabernat/xrft | xrft/xrft.py | Python | mit | 44,136 | [
"Gaussian"
] | d69c2d5e722c9ed5cc888875702e3f6a3fbe43a5507840be497e1902bf07860b |
#!/usr/bin/env python
from __future__ import division
import imp
import os
import threading
import time
import numpy as np
import cv2
from cv_bridge import CvBridge, CvBridgeError
import rospy
import dynamic_reconfigure.server
from sensor_msgs.msg import Image
from std_msgs.msg import Float32, Header, String
from multi_tracker.msg import Contourinfo, Contourlist
from multi_tracker.msg import Trackedobject, Trackedobjectlist
from multi_tracker.srv import resetBackgroundService
from multi_tracker.srv import addImageToBackgroundService
# for basler ace cameras, use camera_aravis
# https://github.com/ssafarik/camera_aravis
# rosrun camera_aravis camnode
# default image: camera/image_raw
# For firefley cameras, camera1394 does not provide timestamps but otherwise
# works. Use point grey drivers.
# http://wiki.ros.org/pointgrey_camera_driver
# rosrun pointgrey_camera_driver camera_node
# default image: camera/image_mono
# The main tracking class, a ROS node
class Tracker:
def __init__(self):
'''
Default image_topic for:
Basler ace cameras with camera_aravis driver: camera/image_raw
Pt Grey Firefly cameras with pt grey driver : camera/image_mono
'''
rospy.init_node('multi_tracker')
rospy.sleep(1)
# TODO load from a defaults.yaml?
# default parameters (parameter server overides them)
self.params = {
'image_topic' : 'camera/image_mono',
'threshold' : 20,
'backgroundupdate' : 0.001,
'medianbgupdateinterval': 30,
# fireflies are bgr8, basler gige cams are mono8
'camera_encoding' : 'mono8',
'erode' : 1,
'dilate' : 2,
'morph_open_kernel_size': 3,
'max_change_in_frame' : 0.2,
'min_size' : 5,
'max_size' : 200,
'max_expected_area' : 500,
'denoise' : True,
# TODO what does this do? remove?
'liveview' : False,
# see notes in delta_video_simplebuffer
'roi_l' : 0,
'roi_r' : -1,
'roi_b' : 0,
'roi_t' : -1,
'~circular_mask_x' : None,
'~circular_mask_y' : None,
'~circular_mask_r' : None,
# TODO implement
'~roi_points' : None,
# use moments for x,y,area instead of fitted ellipse
'use_moments' : True,
}
# TODO break this code out into a utility function
for parameter, default_value in self.params.items():
use_default = False
try:
if parameter[0] == '~':
value = rospy.get_param(parameter)
else:
p = 'multi_tracker/tracker/' + parameter
value = rospy.get_param(p)
# for maintaining backwards compatibility w/ Floris' config
# files that would use 'none' to signal default should be used.
# may break some future use cases.
if self.params[parameter] is None:
if isinstance(value, str):
use_default = True
except KeyError:
use_default = True
if use_default:
rospy.loginfo(rospy.get_name() + ' using default parameter: ' +
parameter + ' = ' + str(default_value))
value = default_value
if parameter[0] == '~':
del self.params[parameter]
parameter = parameter[1:]
self.params[parameter] = value
# If we are tracking an experiment that is being played back
# ("retracking"), we don't want to further restrict roi, and we will
# always use the same camera topic.
if rospy.get_param('/use_sim_time', False):
self.params['image_topic'] = 'camera/image_raw'
'''
self.params['roi_l'] = 0
self.params['roi_r'] = -1
self.params['roi_b'] = 0
self.params['roi_t'] = -1
'''
if self.params['min_size'] < 5:
rospy.logfatal('only contours that can be fit with ellipses are ' +
'supported now. contours must have at least 5 pixels to be ' +
'fit. please increase min_size parameter.')
# TODO maybe just reduce to debug flag and not save data in that case?
self.save_data = rospy.get_param('multi_tracker/tracker/save_data',True)
self.debug = rospy.get_param('multi_tracker/tracker/debug', False)
node_name = rospy.get_name()
last_name_component = node_name.split('_')[-1]
# TODO see discussion in this portion in save_bag.py
try:
self.pipeline_num = int(last_name_component)
remap_topics = True
except ValueError:
# warn if?
self.pipeline_num = 1
remap_topics = False
# TODO should the experiment_basename have the pipeline # in it? ->
# should each fly's data be saved to a separate directory (probably
# not?)?
self.experiment_basename = \
rospy.get_param('multi_tracker/experiment_basename', None)
if self.experiment_basename is None:
rospy.logwarn('Basenames output by different nodes in this ' +
'tracker run may differ!')
self.experiment_basename = time.strftime('%Y%m%d_%H%M%S',
time.localtime())
# used by image_processing code that is spliced in with imp
self.explicit_directories = \
rospy.get_param('multi_tracker/explicit_directories', False)
# initialize the node
self.time_start = rospy.Time.now().to_sec()
# background reset service
self.reset_background_flag = False
self.add_image_to_background_flag = False
# TODO is this taking the right reset_background?
# another function of similar name is loaded in here
# with imp...
self.reset_background_service = rospy.Service(
'multi_tracker/tracker/reset_background',
resetBackgroundService,
self.reset_background
)
self.add_image_to_background_service = rospy.Service(
'multi_tracker/tracker/add_image_to_background',
addImageToBackgroundService,
self.add_image_to_background
)
self.cvbridge = CvBridge()
self.imgScaled = None
self.backgroundImage = None
self.lockBuffer = threading.Lock()
self.image_buffer = []
self.framestamp = None
tracked_object_topic = 'multi_tracker/tracked_objects'
if remap_topics:
suffix = '_' + str(self.pipeline_num)
tracked_object_topic = tracked_object_topic + suffix
else:
suffix = ''
self.pubContours = rospy.Publisher('multi_tracker/contours_{}'.format(
self.pipeline_num), Contourlist, queue_size=300)
self.image_mask = None
# TODO define dynamically
# Size of header + data.
sizeImage = 128+1024*1024*3
# TODO have delta_video also publish a cropped version for this
# pipeline?
self.subImage = rospy.Subscriber(self.params['image_topic'],
Image, self.image_callback, queue_size=60,
buff_size=2*sizeImage, tcp_nodelay=True)
# TODO launch from within a python script so i can actually
# conditionally open their viewers (based on debug settings)?
if self.debug:
self.pub_mask = rospy.Publisher('multi_tracker/0_mask' + suffix,
Image,
queue_size=5)
self.pub_threshed = rospy.Publisher(
'multi_tracker/1_thresholded' + suffix, Image, queue_size=5)
self.pub_denoised = rospy.Publisher(
'multi_tracker/2_denoised' + suffix, Image, queue_size=5)
self.pub_dilated = rospy.Publisher(
'multi_tracker/3_dilated' + suffix, Image, queue_size=5)
self.pub_eroded = rospy.Publisher('multi_tracker/4_eroded' + suffix,
Image,
queue_size=5)
self.pub_processed = rospy.Publisher(
'multi_tracker/processed_image' + suffix, Image, queue_size=5)
def image_callback(self, rosimg):
with self.lockBuffer:
self.image_buffer.append(rosimg)
# TODO get rid of argument if not using them? or necessary
# for rospy service callback? (return too?)
def reset_background(self, service_call):
self.reset_background_flag = True
return 1
# same thing about arg?
def add_image_to_background(self, service_call):
self.add_image_to_background_flag = True
return 1
def process_image_buffer(self, rosimg):
# TODO check for problems this dtCamera stuff might cause
if self.framestamp is not None:
self.dtCamera = (rosimg.header.stamp - self.framestamp).to_sec()
else:
self.dtCamera = 0.03
self.framenumber = rosimg.header.seq
self.framestamp = rosimg.header.stamp
# Convert the image.
try:
# might need to change to bgr for color cameras
img = self.cvbridge.imgmsg_to_cv2(rosimg, 'passthrough')
except CvBridgeError, e:
rospy.logwarn('Exception converting background image from ROS to' +
' OpenCV: %s' % e)
# TODO define dynamically. is this even consistent w/ above?
img = np.zeros((320,240))
# TODO is this unreachable now?
if img is None:
return
self.imgScaled = img[self.params['roi_b']:self.params['roi_t'],
self.params['roi_l']:self.params['roi_r']]
# (height, width)
self.shapeImage = self.imgScaled.shape
if self.backgroundImage is not None:
if self.backgroundImage.shape != self.imgScaled.shape:
# TODO why would this happen in one run?
# i might be in favor of an error here?
self.backgroundImage = None
self.reset_background_flag = True
########### Call to image processing function ##########################
# must be defined seperately - see "main" code at the bottom of this
# script
self.process_image()
########################################################################
def main(self):
while (not rospy.is_shutdown()):
with self.lockBuffer:
time_then = rospy.Time.now()
if len(self.image_buffer) > 0:
self.process_image_buffer(self.image_buffer.pop(0))
if len(self.image_buffer) > 9:
pt = (rospy.Time.now() - time_then).to_sec()
rospy.logwarn('Tracking processing time exceeds ' +
'acquisition rate. Processing time: %f, Buffer: %d',
pt, len(self.image_buffer))
cv2.destroyAllWindows()
if __name__ == '__main__':
tracker_node_basename = 'multi_tracker/tracker'
image_processing_function = \
rospy.get_param(tracker_node_basename + '/image_processor')
image_processing_module = \
rospy.get_param(tracker_node_basename + '/image_processing_module')
if image_processing_module == 'default':
catkin_node_directory = os.path.dirname(os.path.realpath(__file__))
image_processing_module = \
os.path.join(catkin_node_directory, 'image_processing.py')
# put behind debug flags
print('looking for image_processing module: ' + image_processing_module)
print('trying to load: ' + image_processing_function)
image_processing = \
imp.load_source('image_processing', image_processing_module)
image_processor = \
image_processing.__getattribute__(image_processing_function)
Tracker.process_image = image_processor
tracker = Tracker()
tracker.main()
| tom-f-oconnell/multi_tracker | nodes/tracker_simplebuffer.py | Python | mit | 12,727 | [
"Firefly"
] | f0b09d445ee38861b69ad530faa8029e3f69f54ac388157aa8fd8b4ccdcd7274 |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Tcoffee(MakefilePackage):
"""T-Coffee is a multiple sequence alignment program."""
homepage = "http://www.tcoffee.org/"
git = "https://github.com/cbcrg/tcoffee.git"
version('2017-08-17', commit='f389b558e91d0f82e7db934d9a79ce285f853a71')
depends_on('perl', type=('build', 'run'))
depends_on('blast-plus')
depends_on('dialign-tx')
depends_on('viennarna')
depends_on('clustalw')
depends_on('tmalign')
depends_on('muscle')
depends_on('mafft')
depends_on('pcma')
depends_on('poamsa')
depends_on('probconsrna')
build_directory = 'compile'
def build(self, spec, prefix):
with working_dir(self.build_directory):
make('t_coffee')
def install(self, spec, prefix):
mkdirp(prefix.bin)
with working_dir(self.build_directory):
install('t_coffee', prefix.bin)
| LLNL/spack | var/spack/repos/builtin/packages/tcoffee/package.py | Python | lgpl-2.1 | 1,103 | [
"BLAST"
] | 33e03c9037adb3856f251e4d399286dfb1a59f16d2959d7384c1c550925f00e6 |
# -*- coding: utf-8 -*-
# Copyright 2007-2021 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import logging
from functools import partial
import warnings
import numpy as np
import dask.array as da
import dask.delayed as dd
import dask
from dask.diagnostics import ProgressBar
from itertools import product
from packaging.version import Version
from hyperspy.signal import BaseSignal
from hyperspy.defaults_parser import preferences
from hyperspy.docstrings.signal import SHOW_PROGRESSBAR_ARG
from hyperspy.exceptions import VisibleDeprecationWarning
from hyperspy.external.progressbar import progressbar
from hyperspy.misc.array_tools import (_requires_linear_rebin,
get_signal_chunk_slice)
from hyperspy.misc.hist_tools import histogram_dask
from hyperspy.misc.machine_learning import import_sklearn
from hyperspy.misc.utils import (multiply, dummy_context_manager, isiterable,
process_function_blockwise, guess_output_signal_size,)
_logger = logging.getLogger(__name__)
lazyerror = NotImplementedError('This method is not available in lazy signals')
def to_array(thing, chunks=None):
"""Accepts BaseSignal, dask or numpy arrays and always produces either
numpy or dask array.
Parameters
----------
thing : {BaseSignal, dask.array.Array, numpy.ndarray}
the thing to be converted
chunks : {None, tuple of tuples}
If None, the returned value is a numpy array. Otherwise returns dask
array with the chunks as specified.
Returns
-------
res : {numpy.ndarray, dask.array.Array}
"""
if thing is None:
return None
if isinstance(thing, BaseSignal):
thing = thing.data
if chunks is None:
if isinstance(thing, da.Array):
thing = thing.compute()
if isinstance(thing, np.ndarray):
return thing
else:
raise ValueError
else:
if isinstance(thing, np.ndarray):
thing = da.from_array(thing, chunks=chunks)
if isinstance(thing, da.Array):
if thing.chunks != chunks:
thing = thing.rechunk(chunks)
return thing
else:
raise ValueError
def _get_navigation_dimension_chunk_slice(navigation_indices, chunks):
"""Get the slice necessary to get the dask data chunk containing the
navigation indices.
Parameters
----------
navigation_indices : iterable
chunks : iterable
Returns
-------
chunk_slice : list of slices
Examples
--------
Making all the variables
>>> import dask.array as da
>>> from hyperspy._signals.lazy import _get_navigation_dimension_chunk_slice
>>> data = da.random.random((128, 128, 256, 256), chunks=(32, 32, 32, 32))
>>> s = hs.signals.Signal2D(data).as_lazy()
>>> sig_dim = s.axes_manager.signal_dimension
>>> nav_chunks = s.data.chunks[:-sig_dim]
>>> navigation_indices = s.axes_manager._getitem_tuple[:-sig_dim]
The navigation index here is (0, 0), giving us the slice which contains
this index.
>>> chunk_slice = _get_navigation_dimension_chunk_slice(navigation_indices, nav_chunks)
>>> print(chunk_slice)
(slice(0, 32, None), slice(0, 32, None))
>>> data_chunk = data[chunk_slice]
Moving the navigator to a new position, by directly setting the indices.
Normally, this is done by moving the navigator while plotting the data.
Note the "inversion" of the axes here: the indices is given in (x, y),
while the chunk_slice is given in (y, x).
>>> s.axes_manager.indices = (128, 70)
>>> navigation_indices = s.axes_manager._getitem_tuple[:-sig_dim]
>>> chunk_slice = _get_navigation_dimension_chunk_slice(navigation_indices, nav_chunks)
>>> print(chunk_slice)
(slice(64, 96, None), slice(96, 128, None))
>>> data_chunk = data[chunk_slice]
"""
chunk_slice_list = da.core.slices_from_chunks(chunks)
for chunk_slice in chunk_slice_list:
is_slice = True
for index_nav in range(len(navigation_indices)):
temp_slice = chunk_slice[index_nav]
nav = navigation_indices[index_nav]
if not (temp_slice.start <= nav < temp_slice.stop):
is_slice = False
break
if is_slice:
return chunk_slice
return False
class LazySignal(BaseSignal):
"""A Lazy Signal instance that delays computation until explicitly saved
(assuming storing the full result of computation in memory is not feasible)
"""
_lazy = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# The _cache_dask_chunk and _cache_dask_chunk_slice attributes are
# used to temporarily cache data contained in one chunk, when
# self.__call__ is used. Typically done when using plot or fitting.
# _cache_dask_chunk has the NumPy array itself, while
# _cache_dask_chunk_slice has the navigation dimension chunk which
# the NumPy array originates from.
self._cache_dask_chunk = None
self._cache_dask_chunk_slice = None
if not self._clear_cache_dask_data in self.events.data_changed.connected:
self.events.data_changed.connect(self._clear_cache_dask_data)
def compute(self, close_file=False, show_progressbar=None, **kwargs):
"""Attempt to store the full signal in memory.
Parameters
----------
close_file : bool, default False
If True, attemp to close the file associated with the dask
array data if any. Note that closing the file will make all other
associated lazy signals inoperative.
%s
Returns
-------
None
"""
if "progressbar" in kwargs:
warnings.warn(
"The `progressbar` keyword is deprecated and will be removed "
"in HyperSpy 2.0. Use `show_progressbar` instead.",
VisibleDeprecationWarning,
)
show_progressbar = kwargs["progressbar"]
if show_progressbar is None:
show_progressbar = preferences.General.show_progressbar
cm = ProgressBar if show_progressbar else dummy_context_manager
with cm():
da = self.data
data = da.compute()
if close_file:
self.close_file()
self.data = data
self._lazy = False
self._assign_subclass()
compute.__doc__ %= SHOW_PROGRESSBAR_ARG
def rechunk(self,
nav_chunks="auto",
sig_chunks=-1,
inplace=True,
**kwargs):
"""Rechunks the data using the same rechunking formula from Dask
expect that the navigation and signal chunks are defined seperately.
Note, for most functions sig_chunks should remain ``None`` so that it
spans the entire signal axes.
Parameters
----------
nav_chunks : {tuple, int, "auto", None}
The navigation block dimensions to create.
-1 indicates the full size of the corresponding dimension.
Default is “auto” which automatically determines chunk sizes.
sig_chunks : {tuple, int, "auto", None}
The signal block dimensions to create.
-1 indicates the full size of the corresponding dimension.
Default is -1 which automatically spans the full signal dimension
**kwargs : dict
Any other keyword arguments for :py:func:`dask.array.rechunk`.
"""
if not isinstance(sig_chunks, tuple):
sig_chunks = (sig_chunks,)*len(self.axes_manager.signal_shape)
if not isinstance(nav_chunks, tuple):
nav_chunks = (nav_chunks,)*len(self.axes_manager.navigation_shape)
new_chunks = nav_chunks + sig_chunks
if inplace:
self.data = self.data.rechunk(new_chunks,
**kwargs)
else:
return self._deepcopy_with_new_data(self.data.rechunk(new_chunks,
**kwargs)
)
def close_file(self):
"""Closes the associated data file if any.
Currently it only supports closing the file associated with a dask
array created from an h5py DataSet (default HyperSpy hdf5 reader).
"""
try:
self._get_file_handle().close()
except AttributeError:
_logger.warning("Failed to close lazy signal file")
def _get_file_handle(self, warn=True):
"""Return file handle when possible; currently only hdf5 file are
supported.
"""
arrkey = None
for key in self.data.dask.keys():
if "array-original" in key:
arrkey = key
break
if arrkey:
try:
return self.data.dask[arrkey].file
except (AttributeError, ValueError):
if warn:
_logger.warning("Failed to retrieve file handle, either "
"the file is already closed or it is not "
"an hdf5 file.")
def _clear_cache_dask_data(self, obj=None):
self._cache_dask_chunk = None
self._cache_dask_chunk_slice = None
def _get_dask_chunks(self, axis=None, dtype=None):
"""Returns dask chunks.
Aims:
- Have at least one signal (or specified axis) in a single chunk,
or as many as fit in memory
Parameters
----------
axis : {int, string, None, axis, tuple}
If axis is None (default), returns chunks for current data shape so
that at least one signal is in the chunk. If an axis is specified,
only that particular axis is guaranteed to be "not sliced".
dtype : {string, np.dtype}
The dtype of target chunks.
Returns
-------
Tuple of tuples, dask chunks
"""
dc = self.data
dcshape = dc.shape
for _axis in self.axes_manager._axes:
if _axis.index_in_array < len(dcshape):
_axis.size = int(dcshape[_axis.index_in_array])
if axis is not None:
need_axes = self.axes_manager[axis]
if not np.iterable(need_axes):
need_axes = [need_axes, ]
else:
need_axes = self.axes_manager.signal_axes
if dtype is None:
dtype = dc.dtype
elif not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
typesize = max(dtype.itemsize, dc.dtype.itemsize)
want_to_keep = multiply([ax.size for ax in need_axes]) * typesize
# @mrocklin reccomends to have around 100MB chunks, so we do that:
num_that_fit = int(100. * 2.**20 / want_to_keep)
# want to have at least one "signal" per chunk
if num_that_fit < 2:
chunks = [tuple(1 for _ in range(i)) for i in dc.shape]
for ax in need_axes:
chunks[ax.index_in_array] = dc.shape[ax.index_in_array],
return tuple(chunks)
sizes = [
ax.size for ax in self.axes_manager._axes if ax not in need_axes
]
indices = [
ax.index_in_array for ax in self.axes_manager._axes
if ax not in need_axes
]
while True:
if multiply(sizes) <= num_that_fit:
break
i = np.argmax(sizes)
sizes[i] = np.floor(sizes[i] / 2)
chunks = []
ndim = len(dc.shape)
for i in range(ndim):
if i in indices:
size = float(dc.shape[i])
split_array = np.array_split(
np.arange(size), np.ceil(size / sizes[indices.index(i)]))
chunks.append(tuple(len(sp) for sp in split_array))
else:
chunks.append((dc.shape[i], ))
return tuple(chunks)
def _get_navigation_chunk_size(self):
nav_axes = self.axes_manager.navigation_indices_in_array
nav_chunks = tuple([self.data.chunks[i] for i in sorted(nav_axes)])
return nav_chunks
def _make_lazy(self, axis=None, rechunk=False, dtype=None):
self.data = self._lazy_data(axis=axis, rechunk=rechunk, dtype=dtype)
def change_dtype(self, dtype, rechunk=True):
# To be consistent with the rechunk argument of other method, we use
# 'dask_auto' in favour of a chunking which doesn't split signal space.
if rechunk:
rechunk = 'dask_auto'
from hyperspy.misc import rgb_tools
if not isinstance(dtype, np.dtype) and (dtype not in
rgb_tools.rgb_dtypes):
dtype = np.dtype(dtype)
super().change_dtype(dtype)
self._make_lazy(rechunk=rechunk, dtype=dtype)
change_dtype.__doc__ = BaseSignal.change_dtype.__doc__
def _lazy_data(self, axis=None, rechunk=True, dtype=None):
"""Return the data as a dask array, rechunked if necessary.
Parameters
----------
axis: None, DataAxis or tuple of data axes
The data axis that must not be broken into chunks when `rechunk`
is `True`. If None, it defaults to the current signal axes.
rechunk: bool, "dask_auto"
If `True`, it rechunks the data if necessary making sure that the
axes in ``axis`` are not split into chunks. If `False` it does
not rechunk at least the data is not a dask array, in which case
it chunks as if rechunk was `True`. If "dask_auto", rechunk if
necessary using dask's automatic chunk guessing.
"""
if rechunk == "dask_auto":
new_chunks = "auto"
else:
new_chunks = self._get_dask_chunks(axis=axis, dtype=dtype)
if isinstance(self.data, da.Array):
res = self.data
if self.data.chunks != new_chunks and rechunk:
_logger.info(
"Rechunking.\nOriginal chunks: %s" % str(self.data.chunks))
res = self.data.rechunk(new_chunks)
_logger.info(
"Final chunks: %s " % str(res.chunks))
else:
if isinstance(self.data, np.ma.masked_array):
data = np.where(self.data.mask, np.nan, self.data)
else:
data = self.data
res = da.from_array(data, chunks=new_chunks)
assert isinstance(res, da.Array)
return res
def _apply_function_on_data_and_remove_axis(self, function, axes,
out=None, rechunk=True):
def get_dask_function(numpy_name):
# Translate from the default numpy to dask functions
translations = {'amax': 'max', 'amin': 'min'}
if numpy_name in translations:
numpy_name = translations[numpy_name]
return getattr(da, numpy_name)
function = get_dask_function(function.__name__)
axes = self.axes_manager[axes]
if not np.iterable(axes):
axes = (axes, )
ar_axes = tuple(ax.index_in_array for ax in axes)
if len(ar_axes) == 1:
ar_axes = ar_axes[0]
# For reduce operations the actual signal and navigation
# axes configuration does not matter. Hence we leave
# dask guess the chunks
if rechunk is True:
rechunk = "dask_auto"
current_data = self._lazy_data(rechunk=rechunk)
# Apply reducing function
new_data = function(current_data, axis=ar_axes)
if not new_data.ndim:
new_data = new_data.reshape((1, ))
if out:
if out.data.shape == new_data.shape:
out.data = new_data
out.events.data_changed.trigger(obj=out)
else:
raise ValueError(
"The output shape %s does not match the shape of "
"`out` %s" % (new_data.shape, out.data.shape))
else:
s = self._deepcopy_with_new_data(new_data)
s._remove_axis([ax.index_in_axes_manager for ax in axes])
return s
def _get_cache_dask_chunk(self, indices):
"""Method for handling caching of dask chunks, when using __call__.
When accessing data in a chunked HDF5 file, the whole chunks needs
to be loaded into memory. So even if you only want to access a single
index in the navigation dimension, the whole chunk in the navigation
dimension needs to be loaded into memory. This method keeps (caches)
this chunk in memory after loading it, so moving to a different
position with the same chunk will be much faster, reducing amount of
data which needs be read from the disk.
If a navigation index (via the indices parameter) in a different chunk
is asked for, the currently cached chunk is discarded, and the new
chunk is loaded into memory.
This only works for functions using self.__call__, for example
plot and fitting functions. This will not work with the region of
interest functionality.
The cached chunk is stored in the attribute s._cache_dask_chunk,
and the slice needed to extract this chunk is in
s._cache_dask_chunk_slice. To these, use s._clear_cache_dask_data()
Parameter
---------
indices : tuple
Must be the same length as navigation dimensions in self.
Returns
-------
value : NumPy array
Same shape as the signal shape of self.
Examples
--------
>>> import dask.array as da
>>> s = hs.signals.Signal2D(da.ones((5, 10, 20, 30, 40))).as_lazy()
>>> value = s._get_cache_dask_chunk((3, 6, 2))
>>> cached_chunk = s._cache_dask_chunk # Cached array
>>> cached_chunk_slice = s._cache_dask_chunk_slice # Slice of chunk
>>> s._clear_cache_dask_data() # Clearing both of these
"""
sig_dim = self.axes_manager.signal_dimension
chunks = self._get_navigation_chunk_size()
navigation_indices = indices[:-sig_dim]
chunk_slice = _get_navigation_dimension_chunk_slice(navigation_indices, chunks)
if (chunk_slice != self._cache_dask_chunk_slice or
self._cache_dask_chunk is None):
self._cache_dask_chunk = np.asarray(self.data.__getitem__(chunk_slice))
self._cache_dask_chunk_slice = chunk_slice
indices = list(indices)
for i, temp_slice in enumerate(chunk_slice):
indices[i] -= temp_slice.start
indices = tuple(indices)
value = self._cache_dask_chunk[indices]
return value
def rebin(self, new_shape=None, scale=None,
crop=False, dtype=None, out=None, rechunk=True):
factors = self._validate_rebin_args_and_get_factors(
new_shape=new_shape,
scale=scale)
if _requires_linear_rebin(arr=self.data, scale=factors):
if new_shape:
raise NotImplementedError(
"Lazy rebin requires that the new shape is a divisor "
"of the original signal shape e.g. if original shape "
"(10| 6), new_shape=(5| 3) is valid, (3 | 4) is not.")
else:
raise NotImplementedError(
"Lazy rebin requires scale to be integer and divisor of the "
"original signal shape")
axis = {ax.index_in_array: ax
for ax in self.axes_manager._axes}[factors.argmax()]
self._make_lazy(axis=axis, rechunk=rechunk)
return super().rebin(new_shape=new_shape, scale=scale, crop=crop,
dtype=dtype, out=out)
rebin.__doc__ = BaseSignal.rebin.__doc__
def __array__(self, dtype=None):
return self.data.__array__(dtype=dtype)
def _make_sure_data_is_contiguous(self):
self._make_lazy(rechunk=True)
def diff(self, axis, order=1, out=None, rechunk=True):
if not self.axes_manager[axis].is_uniform:
raise NotImplementedError(
"Performing a numerical difference on a non-uniform axis "
"is not implemented. Consider using `derivative` instead."
)
arr_axis = self.axes_manager[axis].index_in_array
def dask_diff(arr, n, axis):
# assume arr is da.Array already
n = int(n)
if n == 0:
return arr
if n < 0:
raise ValueError("order must be positive")
nd = len(arr.shape)
slice1 = [slice(None)] * nd
slice2 = [slice(None)] * nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
slice1 = tuple(slice1)
slice2 = tuple(slice2)
if n > 1:
return dask_diff(arr[slice1] - arr[slice2], n - 1, axis=axis)
else:
return arr[slice1] - arr[slice2]
current_data = self._lazy_data(axis=axis, rechunk=rechunk)
new_data = dask_diff(current_data, order, arr_axis)
if not new_data.ndim:
new_data = new_data.reshape((1, ))
s = out or self._deepcopy_with_new_data(new_data)
if out:
if out.data.shape == new_data.shape:
out.data = new_data
else:
raise ValueError(
"The output shape %s does not match the shape of "
"`out` %s" % (new_data.shape, out.data.shape))
axis2 = s.axes_manager[axis]
new_offset = self.axes_manager[axis].offset + (order * axis2.scale / 2)
axis2.offset = new_offset
s.get_dimensions_from_data()
if out is None:
return s
else:
out.events.data_changed.trigger(obj=out)
diff.__doc__ = BaseSignal.diff.__doc__
def integrate_simpson(self, axis, out=None):
axis = self.axes_manager[axis]
from scipy import integrate
axis = self.axes_manager[axis]
data = self._lazy_data(axis=axis, rechunk=True)
new_data = data.map_blocks(
integrate.simps,
x=axis.axis,
axis=axis.index_in_array,
drop_axis=axis.index_in_array,
dtype=data.dtype)
s = out or self._deepcopy_with_new_data(new_data)
if out:
if out.data.shape == new_data.shape:
out.data = new_data
out.events.data_changed.trigger(obj=out)
else:
raise ValueError(
"The output shape %s does not match the shape of "
"`out` %s" % (new_data.shape, out.data.shape))
else:
s._remove_axis(axis.index_in_axes_manager)
return s
integrate_simpson.__doc__ = BaseSignal.integrate_simpson.__doc__
def valuemax(self, axis, out=None, rechunk=True):
idx = self.indexmax(axis, rechunk=rechunk)
old_data = idx.data
data = old_data.map_blocks(
lambda x: self.axes_manager[axis].index2value(x))
if out is None:
idx.data = data
return idx
else:
out.data = data
out.events.data_changed.trigger(obj=out)
valuemax.__doc__ = BaseSignal.valuemax.__doc__
def valuemin(self, axis, out=None, rechunk=True):
idx = self.indexmin(axis, rechunk=rechunk)
old_data = idx.data
data = old_data.map_blocks(
lambda x: self.axes_manager[axis].index2value(x))
if out is None:
idx.data = data
return idx
else:
out.data = data
out.events.data_changed.trigger(obj=out)
valuemin.__doc__ = BaseSignal.valuemin.__doc__
def get_histogram(self, bins='fd', out=None, rechunk=True, **kwargs):
if 'range_bins' in kwargs:
_logger.warning("'range_bins' argument not supported for lazy "
"signals")
del kwargs['range_bins']
from hyperspy.signals import Signal1D
data = self._lazy_data(rechunk=rechunk).flatten()
hist, bin_edges = histogram_dask(data, bins=bins, **kwargs)
if out is None:
hist_spec = Signal1D(hist)
hist_spec._lazy = True
hist_spec._assign_subclass()
else:
hist_spec = out
# we always overwrite the data because the computation is lazy ->
# the result signal is lazy. Assume that the `out` is already lazy
hist_spec.data = hist
hist_spec.axes_manager[0].scale = bin_edges[1] - bin_edges[0]
hist_spec.axes_manager[0].offset = bin_edges[0]
hist_spec.axes_manager[0].size = hist.shape[-1]
hist_spec.axes_manager[0].name = 'value'
hist_spec.axes_manager[0].is_binned = True
hist_spec.metadata.General.title = (
self.metadata.General.title + " histogram")
if out is None:
return hist_spec
else:
out.events.data_changed.trigger(obj=out)
get_histogram.__doc__ = BaseSignal.get_histogram.__doc__
@staticmethod
def _estimate_poissonian_noise_variance(dc, gain_factor, gain_offset,
correlation_factor):
variance = (dc * gain_factor + gain_offset) * correlation_factor
# The lower bound of the variance is the gaussian noise.
variance = da.clip(variance, gain_offset * correlation_factor, np.inf)
return variance
# def _get_navigation_signal(self, data=None, dtype=None):
# return super()._get_navigation_signal(data=data, dtype=dtype).as_lazy()
# _get_navigation_signal.__doc__ = BaseSignal._get_navigation_signal.__doc__
# def _get_signal_signal(self, data=None, dtype=None):
# return super()._get_signal_signal(data=data, dtype=dtype).as_lazy()
# _get_signal_signal.__doc__ = BaseSignal._get_signal_signal.__doc__
def _calculate_summary_statistics(self, rechunk=True):
if rechunk is True:
# Use dask auto rechunk instead of HyperSpy's one, what should be
# better for these operations
rechunk = "dask_auto"
data = self._lazy_data(rechunk=rechunk)
_raveled = data.ravel()
_mean, _std, _min, _q1, _q2, _q3, _max = da.compute(
da.nanmean(data),
da.nanstd(data),
da.nanmin(data),
da.percentile(_raveled, [25, ]),
da.percentile(_raveled, [50, ]),
da.percentile(_raveled, [75, ]),
da.nanmax(data), )
return _mean, _std, _min, _q1, _q2, _q3, _max
def _map_all(self, function, inplace=True, **kwargs):
calc_result = dd(function)(self.data, **kwargs)
if inplace:
self.data = da.from_delayed(calc_result, shape=self.data.shape,
dtype=self.data.dtype)
return None
return self._deepcopy_with_new_data(calc_result)
def _map_iterate(self,
function,
iterating_kwargs=None,
show_progressbar=None,
parallel=None,
max_workers=None,
ragged=False,
inplace=True,
output_signal_size=None,
output_dtype=None,
**kwargs):
# unpacking keyword arguments
if iterating_kwargs is None:
iterating_kwargs = {}
elif isinstance(iterating_kwargs, (tuple, list)):
iterating_kwargs = dict((k, v) for k, v in iterating_kwargs)
nav_indexes = self.axes_manager.navigation_indices_in_array
if ragged and inplace:
raise ValueError("Ragged and inplace are not compatible with a lazy signal")
chunk_span = np.equal(self.data.chunksize, self.data.shape)
chunk_span = [chunk_span[i] for i in self.axes_manager.signal_indices_in_array]
if not all(chunk_span):
_logger.info("The chunk size needs to span the full signal size, rechunking...")
old_sig = self.rechunk(inplace=False)
else:
old_sig = self
autodetermine = (output_signal_size is None or output_dtype is None) # try to guess output dtype and sig size?
nav_chunks = old_sig._get_navigation_chunk_size()
args = ()
arg_keys = ()
for key in iterating_kwargs:
if not isinstance(iterating_kwargs[key], BaseSignal):
iterating_kwargs[key] = BaseSignal(iterating_kwargs[key].T).T
warnings.warn(
"Passing arrays as keyword arguments can be ambigous. "
"This is deprecated and will be removed in HyperSpy 2.0. "
"Pass signal instances instead.",
VisibleDeprecationWarning)
if iterating_kwargs[key]._lazy:
if iterating_kwargs[key]._get_navigation_chunk_size() != nav_chunks:
iterating_kwargs[key].rechunk(nav_chunks=nav_chunks)
else:
iterating_kwargs[key] = iterating_kwargs[key].as_lazy()
iterating_kwargs[key].rechunk(nav_chunks=nav_chunks)
extra_dims = (len(old_sig.axes_manager.signal_shape) -
len(iterating_kwargs[key].axes_manager.signal_shape))
if extra_dims > 0:
old_shape = iterating_kwargs[key].data.shape
new_shape = old_shape + (1,)*extra_dims
args += (iterating_kwargs[key].data.reshape(new_shape), )
else:
args += (iterating_kwargs[key].data, )
arg_keys += (key,)
if autodetermine: #trying to guess the output d-type and size from one signal
testing_kwargs = {}
for key in iterating_kwargs:
test_ind = (0,) * len(old_sig.axes_manager.navigation_axes)
testing_kwargs[key] = np.squeeze(iterating_kwargs[key].inav[test_ind].data).compute()
testing_kwargs = {**kwargs, **testing_kwargs}
test_data = np.array(old_sig.inav[(0,) * len(old_sig.axes_manager.navigation_shape)].data.compute())
output_signal_size, output_dtype = guess_output_signal_size(test_signal=test_data,
function=function,
ragged=ragged,
**testing_kwargs)
# Dropping/Adding Axes
if output_signal_size == old_sig.axes_manager.signal_shape:
drop_axis = None
new_axis = None
axes_changed = False
else:
axes_changed = True
if len(output_signal_size) != len(old_sig.axes_manager.signal_shape):
drop_axis = old_sig.axes_manager.signal_indices_in_array
new_axis = tuple(range(len(nav_indexes), len(nav_indexes) + len(output_signal_size)))
else:
drop_axis = [it for (o, i, it) in zip(output_signal_size,
old_sig.axes_manager.signal_shape,
old_sig.axes_manager.signal_indices_in_array)
if o != i]
new_axis = drop_axis
chunks = tuple([old_sig.data.chunks[i] for i in sorted(nav_indexes)]) + output_signal_size
mapped = da.map_blocks(process_function_blockwise,
old_sig.data,
*args,
function=function,
nav_indexes=nav_indexes,
drop_axis=drop_axis,
new_axis=new_axis,
output_signal_size=output_signal_size,
dtype=output_dtype,
chunks=chunks,
arg_keys=arg_keys,
**kwargs)
if inplace:
self.data = mapped
sig = self
else:
sig = self._deepcopy_with_new_data(mapped)
if ragged:
axes_dicts = self.axes_manager._get_axes_dicts(
self.axes_manager.navigation_axes
)
sig.axes_manager.__init__(axes_dicts)
sig.axes_manager._ragged = True
sig._assign_subclass()
return sig
# remove if too many axes
if axes_changed:
sig.axes_manager.remove(sig.axes_manager.signal_axes[len(output_signal_size):])
# add additional required axes
for ind in range(
len(output_signal_size) - sig.axes_manager.signal_dimension, 0, -1):
sig.axes_manager._append_axis(size=output_signal_size[-ind],
navigate=False)
if not ragged:
sig.get_dimensions_from_data()
return sig
def _block_iterator(self,
flat_signal=True,
get=dask.threaded.get,
navigation_mask=None,
signal_mask=None):
"""A function that allows iterating lazy signal data by blocks,
defining the dask.Array.
Parameters
----------
flat_signal: bool
returns each block flattened, such that the shape (for the
particular block) is (navigation_size, signal_size), with
optionally masked elements missing. If false, returns
the equivalent of s.inav[{blocks}].data, where masked elements are
set to np.nan or 0.
get : dask scheduler
the dask scheduler to use for computations;
default `dask.threaded.get`
navigation_mask : {BaseSignal, numpy array, dask array}
The navigation locations marked as True are not returned (flat) or
set to NaN or 0.
signal_mask : {BaseSignal, numpy array, dask array}
The signal locations marked as True are not returned (flat) or set
to NaN or 0.
"""
self._make_lazy()
data = self._data_aligned_with_axes
nav_chunks = data.chunks[:self.axes_manager.navigation_dimension]
indices = product(*[range(len(c)) for c in nav_chunks])
signalsize = self.axes_manager.signal_size
sig_reshape = (signalsize,) if signalsize else ()
data = data.reshape((self.axes_manager.navigation_shape[::-1] +
sig_reshape))
if signal_mask is None:
signal_mask = slice(None) if flat_signal else \
np.zeros(self.axes_manager.signal_size, dtype='bool')
else:
try:
signal_mask = to_array(signal_mask).ravel()
except ValueError:
# re-raise with a message
raise ValueError("signal_mask has to be a signal, numpy or"
" dask array, but "
"{} was given".format(type(signal_mask)))
if flat_signal:
signal_mask = ~signal_mask
if navigation_mask is None:
nav_mask = da.zeros(
self.axes_manager.navigation_shape[::-1],
chunks=nav_chunks,
dtype='bool')
else:
try:
nav_mask = to_array(navigation_mask, chunks=nav_chunks)
except ValueError:
# re-raise with a message
raise ValueError("navigation_mask has to be a signal, numpy or"
" dask array, but "
"{} was given".format(type(navigation_mask)))
if flat_signal:
nav_mask = ~nav_mask
for ind in indices:
chunk = get(data.dask,
(data.name, ) + ind + (0,) * bool(signalsize))
n_mask = get(nav_mask.dask, (nav_mask.name, ) + ind)
if flat_signal:
yield chunk[n_mask, ...][..., signal_mask]
else:
chunk = chunk.copy()
value = np.nan if np.can_cast('float', chunk.dtype) else 0
chunk[n_mask, ...] = value
chunk[..., signal_mask] = value
yield chunk.reshape(chunk.shape[:-1] +
self.axes_manager.signal_shape[::-1])
def decomposition(
self,
normalize_poissonian_noise=False,
algorithm="SVD",
output_dimension=None,
signal_mask=None,
navigation_mask=None,
get=dask.threaded.get,
num_chunks=None,
reproject=True,
print_info=True,
**kwargs
):
"""Perform Incremental (Batch) decomposition on the data.
The results are stored in ``self.learning_results``.
Read more in the :ref:`User Guide <big_data.decomposition>`.
Parameters
----------
normalize_poissonian_noise : bool, default False
If True, scale the signal to normalize Poissonian noise using
the approach described in [KeenanKotula2004]_.
algorithm : {'SVD', 'PCA', 'ORPCA', 'ORNMF'}, default 'SVD'
The decomposition algorithm to use.
output_dimension : int or None, default None
Number of components to keep/calculate. If None, keep all
(only valid for 'SVD' algorithm)
get : dask scheduler
the dask scheduler to use for computations;
default `dask.threaded.get`
num_chunks : int or None, default None
the number of dask chunks to pass to the decomposition model.
More chunks require more memory, but should run faster. Will be
increased to contain at least ``output_dimension`` signals.
navigation_mask : {BaseSignal, numpy array, dask array}
The navigation locations marked as True are not used in the
decomposition. Not implemented for the 'SVD' algorithm.
signal_mask : {BaseSignal, numpy array, dask array}
The signal locations marked as True are not used in the
decomposition. Not implemented for the 'SVD' algorithm.
reproject : bool, default True
Reproject data on the learnt components (factors) after learning.
print_info : bool, default True
If True, print information about the decomposition being performed.
In the case of sklearn.decomposition objects, this includes the
values of all arguments of the chosen sklearn algorithm.
**kwargs
passed to the partial_fit/fit functions.
References
----------
.. [KeenanKotula2004] M. Keenan and P. Kotula, "Accounting for Poisson noise
in the multivariate analysis of ToF-SIMS spectrum images", Surf.
Interface Anal 36(3) (2004): 203-212.
See Also
--------
* :py:meth:`~.learn.mva.MVA.decomposition` for non-lazy signals
* :py:func:`dask.array.linalg.svd`
* :py:class:`sklearn.decomposition.IncrementalPCA`
* :py:class:`~.learn.rpca.ORPCA`
* :py:class:`~.learn.ornmf.ORNMF`
"""
if kwargs.get("bounds", False):
warnings.warn(
"The `bounds` keyword is deprecated and will be removed "
"in v2.0. Since version > 1.3 this has no effect.",
VisibleDeprecationWarning,
)
kwargs.pop("bounds", None)
# Deprecate 'ONMF' for 'ORNMF'
if algorithm == "ONMF":
warnings.warn(
"The argument `algorithm='ONMF'` has been deprecated and will "
"be removed in future. Please use `algorithm='ORNMF'` instead.",
VisibleDeprecationWarning,
)
algorithm = "ORNMF"
# Check algorithms requiring output_dimension
algorithms_require_dimension = ["PCA", "ORPCA", "ORNMF"]
if algorithm in algorithms_require_dimension and output_dimension is None:
raise ValueError(
"`output_dimension` must be specified for '{}'".format(algorithm)
)
explained_variance = None
explained_variance_ratio = None
_al_data = self._data_aligned_with_axes
nav_chunks = _al_data.chunks[: self.axes_manager.navigation_dimension]
sig_chunks = _al_data.chunks[self.axes_manager.navigation_dimension :]
num_chunks = 1 if num_chunks is None else num_chunks
blocksize = np.min([multiply(ar) for ar in product(*nav_chunks)])
nblocks = multiply([len(c) for c in nav_chunks])
if output_dimension and blocksize / output_dimension < num_chunks:
num_chunks = np.ceil(blocksize / output_dimension)
blocksize *= num_chunks
# Initialize print_info
to_print = [
"Decomposition info:",
f" normalize_poissonian_noise={normalize_poissonian_noise}",
f" algorithm={algorithm}",
f" output_dimension={output_dimension}"
]
# LEARN
if algorithm == "PCA":
if not import_sklearn.sklearn_installed:
raise ImportError("algorithm='PCA' requires scikit-learn")
obj = import_sklearn.sklearn.decomposition.IncrementalPCA(n_components=output_dimension)
method = partial(obj.partial_fit, **kwargs)
reproject = True
to_print.extend(["scikit-learn estimator:", obj])
elif algorithm == "ORPCA":
from hyperspy.learn.rpca import ORPCA
batch_size = kwargs.pop("batch_size", None)
obj = ORPCA(output_dimension, **kwargs)
method = partial(obj.fit, batch_size=batch_size)
elif algorithm == "ORNMF":
from hyperspy.learn.ornmf import ORNMF
batch_size = kwargs.pop("batch_size", None)
obj = ORNMF(output_dimension, **kwargs)
method = partial(obj.fit, batch_size=batch_size)
elif algorithm != "SVD":
raise ValueError("'algorithm' not recognised")
original_data = self.data
try:
_logger.info("Performing decomposition analysis")
if normalize_poissonian_noise:
_logger.info("Scaling the data to normalize Poissonian noise")
data = self._data_aligned_with_axes
ndim = self.axes_manager.navigation_dimension
sdim = self.axes_manager.signal_dimension
nm = da.logical_not(
da.zeros(self.axes_manager.navigation_shape[::-1], chunks=nav_chunks)
if navigation_mask is None
else to_array(navigation_mask, chunks=nav_chunks)
)
sm = da.logical_not(
da.zeros(self.axes_manager.signal_shape[::-1], chunks=sig_chunks)
if signal_mask is None
else to_array(signal_mask, chunks=sig_chunks)
)
ndim = self.axes_manager.navigation_dimension
sdim = self.axes_manager.signal_dimension
bH, aG = da.compute(
data.sum(axis=tuple(range(ndim))),
data.sum(axis=tuple(range(ndim, ndim + sdim))),
)
bH = da.where(sm, bH, 1)
aG = da.where(nm, aG, 1)
raG = da.sqrt(aG)
rbH = da.sqrt(bH)
coeff = raG[(...,) + (None,) * rbH.ndim] * rbH[(None,) * raG.ndim + (...,)]
coeff.map_blocks(np.nan_to_num)
coeff = da.where(coeff == 0, 1, coeff)
data = data / coeff
self.data = data
# LEARN
if algorithm == "SVD":
reproject = False
from dask.array.linalg import svd
try:
self._unfolded4decomposition = self.unfold()
# TODO: implement masking
if navigation_mask is not None or signal_mask is not None:
raise NotImplementedError("Masking is not yet implemented for lazy SVD")
U, S, V = svd(self.data)
if output_dimension is None:
min_shape = min(min(U.shape), min(V.shape))
else:
min_shape = output_dimension
U = U[:, :min_shape]
S = S[:min_shape]
V = V[:min_shape]
factors = V.T
explained_variance = S ** 2 / self.data.shape[0]
loadings = U * S
finally:
if self._unfolded4decomposition is True:
self.fold()
self._unfolded4decomposition is False
else:
self._check_navigation_mask(navigation_mask)
self._check_signal_mask(signal_mask)
this_data = []
try:
for chunk in progressbar(
self._block_iterator(
flat_signal=True,
get=get,
signal_mask=signal_mask,
navigation_mask=navigation_mask,
),
total=nblocks,
leave=True,
desc="Learn",
):
this_data.append(chunk)
if len(this_data) == num_chunks:
thedata = np.concatenate(this_data, axis=0)
method(thedata)
this_data = []
if len(this_data):
thedata = np.concatenate(this_data, axis=0)
method(thedata)
except KeyboardInterrupt: # pragma: no cover
pass
# GET ALREADY CALCULATED RESULTS
if algorithm == "PCA":
explained_variance = obj.explained_variance_
explained_variance_ratio = obj.explained_variance_ratio_
factors = obj.components_.T
elif algorithm == "ORPCA":
factors, loadings = obj.finish()
loadings = loadings.T
elif algorithm == "ORNMF":
factors, loadings = obj.finish()
loadings = loadings.T
# REPROJECT
if reproject:
if algorithm == "PCA":
method = obj.transform
def post(a):
return np.concatenate(a, axis=0)
elif algorithm == "ORPCA":
method = obj.project
def post(a):
return np.concatenate(a, axis=1).T
elif algorithm == "ORNMF":
method = obj.project
def post(a):
return np.concatenate(a, axis=1).T
_map = map(
lambda thing: method(thing),
self._block_iterator(
flat_signal=True,
get=get,
signal_mask=signal_mask,
navigation_mask=navigation_mask,
),
)
H = []
try:
for thing in progressbar(_map, total=nblocks, desc="Project"):
H.append(thing)
except KeyboardInterrupt: # pragma: no cover
pass
loadings = post(H)
if explained_variance is not None and explained_variance_ratio is None:
explained_variance_ratio = explained_variance / explained_variance.sum()
# RESHUFFLE "blocked" LOADINGS
ndim = self.axes_manager.navigation_dimension
if algorithm != "SVD": # Only needed for online algorithms
try:
loadings = _reshuffle_mixed_blocks(
loadings, ndim, (output_dimension,), nav_chunks
).reshape((-1, output_dimension))
except ValueError:
# In case the projection step was not finished, it's left
# as scrambled
pass
finally:
self.data = original_data
target = self.learning_results
target.decomposition_algorithm = algorithm
target.output_dimension = output_dimension
if algorithm != "SVD":
target._object = obj
target.factors = factors
target.loadings = loadings
target.explained_variance = explained_variance
target.explained_variance_ratio = explained_variance_ratio
# Rescale the results if the noise was normalized
if normalize_poissonian_noise is True:
target.factors = target.factors * rbH.ravel()[:, np.newaxis]
target.loadings = target.loadings * raG.ravel()[:, np.newaxis]
# Print details about the decomposition we just performed
if print_info:
print("\n".join([str(pr) for pr in to_print]))
def plot(self, navigator='auto', **kwargs):
if self.axes_manager.ragged:
raise RuntimeError("Plotting ragged signal is not supported.")
if isinstance(navigator, str):
if navigator == 'spectrum':
# We don't support the 'spectrum' option to keep it simple
_logger.warning("The `navigator='spectrum'` option is not "
"supported for lazy signals, 'auto' is used "
"instead.")
navigator = 'auto'
if navigator == 'auto':
nav_dim = self.axes_manager.navigation_dimension
if nav_dim in [1, 2]:
if self.navigator is None:
self.compute_navigator()
navigator = self.navigator
elif nav_dim > 2:
navigator = 'slider'
super().plot(navigator=navigator, **kwargs)
def compute_navigator(self, index=None, chunks_number=None,
show_progressbar=None):
"""
Compute the navigator by taking the sum over a single chunk contained
the specified coordinate. Taking the sum over a single chunk is a
computationally efficient approach to compute the navigator. The data
can be rechunk by specifying the ``chunks_number`` argument.
Parameters
----------
index : (int, float, None) or iterable, optional
Specified where to take the sum, follows HyperSpy indexing syntax
for integer and float. If None, the index is the centre of the
signal_space
chunks_number : (int, None) or iterable, optional
Define the number of chunks in the signal space used for rechunk
the when calculating of the navigator. Useful to define the range
over which the sum is calculated.
If None, the existing chunking will be considered when picking the
chunk used in the navigator calculation.
%s
Returns
-------
None.
Note
----
The number of chunks will affect where the sum is taken. If the sum
needs to be taken in the centre of the signal space (for example, in
the case of diffraction pattern), the number of chunk needs to be an
odd number, so that the middle is centered.
"""
signal_shape = self.axes_manager.signal_shape
if index is None:
index = [round(shape / 2) for shape in signal_shape]
else:
if not isiterable(index):
index = [index] * len(signal_shape)
index = [axis._get_index(_idx)
for _idx, axis in zip(index, self.axes_manager.signal_axes)]
_logger.info(f"Using index: {index}")
if chunks_number is None:
chunks = self.data.chunks
else:
if not isiterable(chunks_number):
chunks_number = [chunks_number] * len(signal_shape)
# Determine the chunk size
signal_chunks = da.core.normalize_chunks(
[int(size / cn) for cn, size in zip(chunks_number, signal_shape)],
shape=signal_shape
)
# Needs to reverse the chunks list to match dask chunking order
signal_chunks = list(signal_chunks)[::-1]
navigation_chunks = ['auto'] * len(self.axes_manager.navigation_shape)
if Version(dask.__version__) >= Version("2.30.0"):
kwargs = {'balance':True}
else:
kwargs = {}
chunks = self.data.rechunk([*navigation_chunks, *signal_chunks],
**kwargs).chunks
# Get the slice of the corresponding chunk
signal_size = len(signal_shape)
signal_chunks = tuple(chunks[i-signal_size] for i in range(signal_size))
_logger.info(f"Signal chunks: {signal_chunks}")
isig_slice = get_signal_chunk_slice(index, chunks)
_logger.info(f'Computing sum over signal dimension: {isig_slice}')
axes = [axis.index_in_array for axis in self.axes_manager.signal_axes]
navigator = self.isig[isig_slice].sum(axes)
navigator.compute(show_progressbar=show_progressbar)
navigator.original_metadata.set_item('sum_from', str(isig_slice))
self.navigator = navigator.T
compute_navigator.__doc__ %= SHOW_PROGRESSBAR_ARG
def _reshuffle_mixed_blocks(array, ndim, sshape, nav_chunks):
"""Reshuffles dask block-shuffled array
Parameters
----------
array : np.ndarray
the array to reshuffle
ndim : int
the number of navigation (shuffled) dimensions
sshape : tuple of ints
The shape
"""
splits = np.cumsum([multiply(ar)
for ar in product(*nav_chunks)][:-1]).tolist()
if splits:
all_chunks = [
ar.reshape(shape + sshape)
for shape, ar in zip(
product(*nav_chunks), np.split(array, splits))
]
def split_stack_list(what, step, axis):
total = len(what)
if total != step:
return [
np.concatenate(
what[i:i + step], axis=axis)
for i in range(0, total, step)
]
else:
return np.concatenate(what, axis=axis)
for chunks, axis in zip(nav_chunks[::-1], range(ndim - 1, -1, -1)):
step = len(chunks)
all_chunks = split_stack_list(all_chunks, step, axis)
return all_chunks
else:
return array
| ericpre/hyperspy | hyperspy/_signals/lazy.py | Python | gpl-3.0 | 56,329 | [
"Gaussian"
] | 279dd2586d1a1c0a3741d72eef66d3cf596c54a7e7724c2c2ad8fb5784ccc2c6 |
"""Computes partition function for RBM-like models using Annealed Importance Sampling."""
import numpy as np
from deepnet import dbm
from deepnet import util
from deepnet import trainer as tr
from choose_matrix_library import *
import sys
import numpy as np
import pdb
import time
import itertools
import matplotlib.pyplot as plt
from deepnet import visualize
import deepnet
import scipy.io as sio
def LogMeanExp(x):
offset = x.max()
return offset + np.log(np.exp(x-offset).mean())
def LogSumExp(x):
offset = x.max()
return offset + np.log(np.exp(x-offset).sum())
def Display(w, hid_state, input_state, w_var=None, x_axis=None):
w = w.asarray().flatten()
plt.figure(1)
plt.clf()
plt.hist(w, 100)
visualize.display_hidden(hid_state.asarray(), 2, 'activations', prob=True)
# plt.figure(3)
# plt.clf()
# plt.imshow(hid_state.asarray().T, cmap=plt.cm.gray, interpolation='nearest')
# plt.figure(4)
# plt.clf()
# plt.imshow(input_state.asarray().T, cmap=plt.cm.gray, interpolation='nearest')
#, state.shape[0], state.shape[1], state.shape[0], 3, title='Markov chains')
# plt.tight_layout(pad=0, w_pad=0, h_pad=0)
# plt.figure(5)
# plt.clf()
# plt.suptitle('Variance')
# plt.plot(np.array(x_axis), np.array(w_var))
# plt.draw()
def impute_dbm_ais(model):
"""Run approximate pll using AIS on a DBM """
def impute_rbm_gaussian_exact(model):
""" run exact exact pll and imputation error on an rbm """
batchsize = model.batchsize
input_layer = model.GetLayerByName('input_layer')
hidden_layer = model.GetLayerByName('bernoulli_hidden1')
bern2_hidden_layer = model.GetLayerByName('bernoulli2_hidden1')
gaussian_layer = model.GetLayerByName('gaussian_hidden1')
# Get input layer features
dimensions = input_layer.dimensions
numlabels = input_layer.numlabels
data = input_layer.data
# set up temp data structures
for layer in model.layer:
layer.foo = layer.statesize
layer.bar = layer.deriv
zeroslice = cm.CUDAMatrix(np.zeros([input_layer.numlabels,\
batchsize]))
onesrow = cm.CUDAMatrix(np.ones([1,\
batchsize]))
batchslice = cm.CUDAMatrix(np.zeros([1, batchsize]))
batchzeroslice = cm.CUDAMatrix(np.zeros([1, batchsize]))
batchslice2 = cm.CUDAMatrix(np.zeros([1, batchsize]))
datasize_squared = cm.CUDAMatrix(np.zeros([batchsize, batchsize]))
datasize_eye = cm.CUDAMatrix(np.eye(batchsize))
datasize_eye2 = cm.CUDAMatrix(np.eye(batchsize))
if hidden_layer:
hidden_bias = hidden_layer.params['bias']
bedge = next(e for e in model.edge if e.node1.name == 'input_layer' \
and e.node2.name == 'bernoulli_hidden1')
w = bedge.params['weight']
if bern2_hidden_layer:
bern2_hidden_bias = bern2_hidden_layer.params['bias']
bedge2 = next(e for e in model.edge if e.node1.name == 'input_layer' \
and e.node2.name == 'bernoulli2_hidden1')
w2 = bedge2.params['weight']
if 'bias' in input_layer.params:
input_bias = input_layer.params['bias']
if gaussian_layer:
gedge = next(e for e in model.edge if e.node1.name == 'input_layer' \
and e.node2.name == 'gaussian_hidden1')
gw = gedge.params['weight']
input_diag = input_layer.params['diag']
diag_val = input_diag.sum() / (input_layer.dimensions * input_layer.numlabels)
# RUN Imputation Error
for dim_idx in range(dimensions):
#-------------------------------------------
# Set state of input variables
input_layer.GetData()
dim_offset = dim_idx * numlabels
for label_idx in range(numlabels):
batchslice.assign(batchzeroslice)
#Assign state value
label_offset = dim_idx * numlabels + label_idx
input_layer.state.set_row_slice(dim_offset, dim_offset + numlabels, \
zeroslice)
input_layer.state.set_row_slice(label_offset, label_offset+1, onesrow)
if hidden_layer:
# Add the contributions from bernoulli hidden layer
cm.dot(w.T, input_layer.state, target=hidden_layer.state)
hidden_layer.state.add_col_vec(hidden_bias)
cm.log_1_plus_exp(hidden_layer.state)
hidden_layer.state.sum(axis=0, target=batchslice)
if bern2_hidden_layer:
# Add the contributions from bernoulli hidden layer
cm.dot(w2.T, input_layer.state, target=bern2_hidden_layer.state)
bern2_hidden_layer.state.add_col_vec(bern2_hidden_bias)
cm.log_1_plus_exp(bern2_hidden_layer.state)
batchslice.add_sums(bern2_hidden_layer.state, axis=0)
if 'bias' in input_layer.params:
cm.dot(input_bias.T, input_layer.state, target=batchslice2)
batchslice.add_row_vec(batchslice2)
if gaussian_layer:
# Add contributions from gaussian hidden layer
cm.dot(gw.T, input_layer.state, target=gaussian_layer.state)
cm.dot(gaussian_layer.state.T, gaussian_layer.state, target= datasize_squared)
datasize_squared.mult(datasize_eye, target=datasize_eye2)
datasize_eye2.sum(axis=0, target=batchslice2)
# Add constants from gaussian hidden layer
integration_constant = gaussian_layer.dimensions * np.log(2*np.pi)
integration_constant += input_layer.dimensions * diag_val
batchslice2.add(integration_constant)
batchslice2.mult(0.5)
batchslice.add_row_vec(batchslice2)
input_layer.foo.set_row_slice(label_offset, label_offset+1, batchslice)
# Apply softmax on log Z_v as energies
input_layer.foo.reshape((numlabels, dimensions * batchsize))
input_layer.foo.apply_softmax()
data.reshape((1, dimensions * batchsize))
# Calculate Imputation Error
input_layer.batchsize_temp.reshape((1, dimensions * batchsize))
input_layer.foo.get_softmax_correct(data, target=input_layer.batchsize_temp)
input_layer.batchsize_temp.reshape((dimensions, batchsize))
imperr_cpu = (dimensions - input_layer.batchsize_temp.sum(axis=0).asarray() )/ (0. + dimensions)
# Calculate Pseudo ll
input_layer.batchsize_temp.reshape((1, dimensions * batchsize))
input_layer.foo.get_softmax_cross_entropy(data, target=input_layer.batchsize_temp, \
tiny=input_layer.tiny)
input_layer.batchsize_temp.reshape((dimensions, batchsize))
pll_cpu = - input_layer.batchsize_temp.sum(axis=0).asarray()
# Undo rehapes
input_layer.foo.reshape((numlabels * dimensions, batchsize))
data.reshape((dimensions, batchsize))
zeroslice.free_device_memory()
onesrow.free_device_memory()
batchslice.free_device_memory()
return pll_cpu, imperr_cpu
def impute_rbm_exact(model):
""" run exact exact pll and imputation error on an rbm """
batchsize = model.batchsize
input_layer = model.GetLayerByName('input_layer')
hidden_layer = model.GetLayerByName('hidden1')
# Get input layer features
dimensions = input_layer.dimensions
numlabels = input_layer.numlabels
data = input_layer.data
# set up temp data structures
for layer in model.layer:
layer.foo = layer.statesize
layer.bar = layer.deriv
zeroslice = cm.CUDAMatrix(np.zeros([input_layer.numlabels,\
batchsize]))
onesrow = cm.CUDAMatrix(np.ones([1,\
batchsize]))
batchslice = cm.CUDAMatrix(np.zeros([1, batchsize]))
batchslice2 = cm.CUDAMatrix(np.zeros([1, batchsize]))
hidden_bias = hidden_layer.params['bias']
input_bias = input_layer.params['bias']
edge = model.edge[0]
w = edge.params['weight']
# RUN Imputation Error
for dim_idx in range(dimensions):
#-------------------------------------------
# Set state of input variables
input_layer.GetData()
dim_offset = dim_idx * numlabels
for label_idx in range(numlabels):
#Assign state value
label_offset = dim_idx * numlabels + label_idx
input_layer.state.set_row_slice(dim_offset, dim_offset + numlabels, \
zeroslice)
input_layer.state.set_row_slice(label_offset, label_offset+1, onesrow)
cm.dot(w.T, input_layer.state, target=hidden_layer.state)
hidden_layer.state.add_col_vec(hidden_bias)
cm.log_1_plus_exp(hidden_layer.state)
hidden_layer.state.sum(axis=0, target=batchslice)
cm.dot(input_bias.T, input_layer.state, target=batchslice2)
batchslice.add_row_vec(batchslice2)
input_layer.foo.set_row_slice(label_offset, label_offset+1, batchslice)
# Apply softmax on log Z_v as energies
input_layer.foo.reshape((numlabels, dimensions * batchsize))
input_layer.foo.apply_softmax()
data.reshape((1, dimensions * batchsize))
# Calculate Imputation Error
input_layer.batchsize_temp.reshape((1, dimensions * batchsize))
input_layer.foo.get_softmax_correct(data, target=input_layer.batchsize_temp)
input_layer.batchsize_temp.reshape((dimensions, batchsize))
imperr_cpu = (dimensions - input_layer.batchsize_temp.sum(axis=0).asarray() )/ (0. + dimensions)
# Calculate Pseudo ll
input_layer.batchsize_temp.reshape((1, dimensions * batchsize))
input_layer.foo.get_softmax_cross_entropy(data, target=input_layer.batchsize_temp, \
tiny=input_layer.tiny)
input_layer.batchsize_temp.reshape((dimensions, batchsize))
pll_cpu = - input_layer.batchsize_temp.sum(axis=0).asarray()
# Undo rehapes
input_layer.foo.reshape((numlabels * dimensions, batchsize))
data.reshape((dimensions, batchsize))
zeroslice.free_device_memory()
onesrow.free_device_memory()
batchslice.free_device_memory()
return pll_cpu, imperr_cpu
def impute_mf(model, mf_steps, hidden_mf_steps, **opts):
# Initialize stuff
batchsize = model.batchsize
input_layer = model.GetLayerByName('input_layer')
hidden_layers = []
for layer in model.layer:
if not layer.is_input:
hidden_layers.append(layer)
dimensions = input_layer.dimensions
numlabels = input_layer.numlabels
data = input_layer.data
# set up temp data structures
for layer in model.layer:
layer.foo = layer.statesize
input_layer.fooslice = cm.CUDAMatrix(np.zeros([input_layer.numlabels,\
batchsize]))
input_layer.barslice = cm.CUDAMatrix(np.zeros([1, batchsize]))
pll = cm.CUDAMatrix(np.zeros([1, batchsize]))
imputation_err = cm.CUDAMatrix(np.zeros([1, batchsize]))
input_layer.biasslice = cm.CUDAMatrix(np.zeros([input_layer.numlabels,\
batchsize]))
input_layer.biasslice.apply_softmax()
# INITIALIZE TO UNIFORM RANDOM for all layers except clamped layers
for layer in model.layer:
layer.state.assign(0)
layer.ApplyActivation()
def reshape_softmax(enter=True):
if enter:
input_layer.state.reshape((numlabels, dimensions * batchsize))
input_layer.foo.reshape((numlabels, dimensions * batchsize))
data.reshape((1, dimensions * batchsize))
input_layer.batchsize_temp.reshape((1, dimensions * batchsize))
else:
input_layer.state.reshape((numlabels * dimensions, batchsize))
input_layer.foo.reshape((numlabels * dimensions, batchsize))
data.reshape((dimensions, batchsize))
input_layer.batchsize_temp.reshape((dimensions, batchsize))
# RUN Imputation Error
for dim_idx in range(dimensions):
#-------------------------------------------
# Set state of input variables
input_layer.GetData()
offset = dim_idx * numlabels
input_layer.state.set_row_slice(offset, offset + numlabels, \
input_layer.biasslice)
for layer in model.layer:
if not layer.is_input:
layer.state.assign(0)
# Run MF steps
for mf_idx in range(mf_steps):
for hid_mf_idx in range(hidden_mf_steps):
for layer in hidden_layers:
model.ComputeUp(layer, train=False, compute_input=False, step=0,
maxsteps=0, use_samples=False, neg_phase=False)
model.ComputeUp(input_layer, train=False, compute_input=True, step=0,
maxsteps=0, use_samples=False, neg_phase=False)
input_layer.state.get_row_slice(offset, offset + numlabels , \
target=input_layer.fooslice)
input_layer.GetData()
input_layer.state.set_row_slice(offset, offset + numlabels , \
input_layer.fooslice)
# Calculate pll
reshape_softmax(enter=True)
input_layer.state.get_softmax_cross_entropy(data,\
target=input_layer.batchsize_temp, tiny=input_layer.tiny)
reshape_softmax(enter=False)
input_layer.batchsize_temp.get_row_slice(dim_idx, dim_idx + 1 , \
target=input_layer.barslice)
pll.add_sums(input_layer.barslice, axis=0)
# Calculate imputation error
if 'blosum90' in opts:
reshape_softmax(enter=True)
input_layer.state.get_softmax_blosum90(data, target=input_layer.batchsize_temp)
reshape_softmax(enter=False)
input_layer.batchsize_temp.get_row_slice(dim_idx, dim_idx + 1 , \
target=input_layer.barslice)
imputation_err.add_sums(input_layer.barslice, axis=0)
else:
reshape_softmax(enter=True)
input_layer.state.get_softmax_correct(data, target=input_layer.batchsize_temp)
reshape_softmax(enter=False)
input_layer.batchsize_temp.get_row_slice(dim_idx, dim_idx + 1 , \
target=input_layer.barslice)
imputation_err.add_sums(input_layer.barslice, axis=0, mult=-1.)
imputation_err.add(1.)
#--------------------------------------
# free device memory for newly created arrays
pll_cpu = -pll.asarray()
imperr_cpu = imputation_err.asarray()
imperr_cpu /= (dimensions+0.)
input_layer.fooslice.free_device_memory()
input_layer.biasslice.free_device_memory()
input_layer.barslice.free_device_memory()
pll.free_device_memory()
imputation_err.free_device_memory()
return pll_cpu, imperr_cpu
def multicol_mf(model, multicols, **opts):
# Initialize stuff
batchsize = model.batchsize
input_layer = model.GetLayerByName('input_layer')
hidden_layers = []
for layer in model.layer:
if not layer.is_input:
hidden_layers.append(layer)
dimensions = input_layer.dimensions
numlabels = input_layer.numlabels
data = input_layer.data
# set up temp data structures
for layer in model.layer:
layer.foo = layer.statesize
input_layer.fooslice = cm.CUDAMatrix(np.zeros([input_layer.numlabels,\
batchsize]))
input_layer.barslice = cm.CUDAMatrix(np.zeros([1, batchsize]))
pll = cm.CUDAMatrix(np.zeros([1, batchsize]))
imputation_err = cm.CUDAMatrix(np.zeros([1, batchsize]))
input_layer.biasslice = cm.CUDAMatrix(np.zeros([input_layer.numlabels,\
batchsize]))
input_layer.biasslice.apply_softmax()
# Get the multicol dimensions
nBlocks, nCols = multicols.shape
# INITIALIZE TO UNIFORM RANDOM for all layers except clamped layers
for layer in model.layer:
layer.state.assign(0)
layer.ApplyActivation()
def reshape_softmax(enter=True):
if enter:
input_layer.state.reshape((numlabels, dimensions * batchsize))
input_layer.foo.reshape((numlabels, dimensions * batchsize))
data.reshape((1, dimensions * batchsize))
input_layer.batchsize_temp.reshape((1, dimensions * batchsize))
else:
input_layer.state.reshape((numlabels * dimensions, batchsize))
input_layer.foo.reshape((numlabels * dimensions, batchsize))
data.reshape((dimensions, batchsize))
input_layer.batchsize_temp.reshape((dimensions, batchsize))
# RUN Imputation Error
for mult_idx in range(nBlocks):
#-------------------------------------------
# Set state of input variables
input_layer.GetData()
for col_idx in range(nCols):
dim_idx = multicols[mult_idx, col_idx]
offset = dim_idx * numlabels
input_layer.state.set_row_slice(offset, offset + numlabels, \
input_layer.biasslice)
for layer in model.layer:
if not layer.is_input:
layer.state.assign(0)
for layer in hidden_layers:
model.ComputeUp(layer, train=False, compute_input=False, step=0,
maxsteps=0, use_samples=False, neg_phase=False)
model.ComputeUp(input_layer, train=False, compute_input=True, step=0,
maxsteps=0, use_samples=False, neg_phase=False)
# Calculate pll
reshape_softmax(enter=True)
input_layer.state.get_softmax_cross_entropy(data,\
target=input_layer.batchsize_temp, tiny=input_layer.tiny)
reshape_softmax(enter=False)
for col_idx in range(nCols):
dim_idx = multicols[mult_idx, col_idx]
input_layer.batchsize_temp.get_row_slice(dim_idx, dim_idx + 1 , \
target=input_layer.barslice)
pll.add_sums(input_layer.barslice, axis=0)
# Calculate imputation error
if 'blosum90' in opts:
reshape_softmax(enter=True)
input_layer.state.get_softmax_blosum90(data, target=input_layer.batchsize_temp)
reshape_softmax(enter=False)
for col_idx in range(nCols):
dim_idx = multicols[mult_idx, col_idx]
input_layer.batchsize_temp.get_row_slice(dim_idx, dim_idx + 1 , \
target=input_layer.barslice)
imputation_err.add_sums(input_layer.barslice, axis=0)
else:
reshape_softmax(enter=True)
input_layer.state.get_softmax_correct(data, target=input_layer.batchsize_temp)
reshape_softmax(enter=False)
for col_idx in range(nCols):
dim_idx = multicols[mult_idx, col_idx]
input_layer.batchsize_temp.get_row_slice(dim_idx, dim_idx + 1 , \
target=input_layer.barslice)
imputation_err.add_sums(input_layer.barslice, axis=0, mult=-1.)
imputation_err.add(1.)
#--------------------------------------
# free device memory for newly created arrays
pll_cpu = -pll.asarray()
imperr_cpu = imputation_err.asarray()
imperr_cpu /= (nBlocks * nCols +0.)
input_layer.fooslice.free_device_memory()
input_layer.biasslice.free_device_memory()
input_layer.barslice.free_device_memory()
pll.free_device_memory()
imputation_err.free_device_memory()
return pll_cpu, imperr_cpu
def Usage():
print '%s <model file> <number of Markov chains to run> [number of words (for Replicated Softmax models)]'
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser(description="Run AIS")
parser.add_argument("--model_file", type=str)
parser.add_argument("--train_file", type=str)
parser.add_argument("--infer-method", type=str, default='exact', \
help='mf/gibbs/exact/gaussian_exact')
parser.add_argument("--mf-steps", type=int, default=1)
parser.add_argument("--hidden-mf-steps", type=int, default=1)
parser.add_argument("--outf", type=str, help='Output File')
parser.add_argument("--valid_only", action='store_true', help="only run the validation set")
parser.add_argument("--blosum90", action='store_true', help="Calculate blosum90 scores")
parser.add_argument("--ncols", type=int, help="Number of multiple columns")
parser.add_argument("--multmode", type=str, help="Multicol mode",default='rand')
args = parser.parse_args()
if not args.outf :
raise ValueError('Output file not defined')
if not args.train_file or not args.model_file :
raise ValueError('Models and data missing')
board = tr.LockGPU()
model_file = args.model_file
train_file = args.train_file
model = dbm.DBM(model_file, train_file)
trainer_pb = util.ReadOperation(train_file)
dataset = os.path.basename(trainer_pb.data_proto_prefix)
# Fix paths
dirname = os.path.split(model.t_op.data_proto_prefix)[1]
model.t_op.data_proto_prefix = os.path.join('datasets/',\
dirname)
model.t_op.skip_last_piece = False
model.t_op.get_last_piece = True
model.t_op.randomize = False
model.LoadModelOnGPU()
model.SetUpData()
if args.valid_only:
data_types = ['valid']
else:
data_types = ['train', 'valid', 'test']
datagetters = {
'train' : model.GetTrainBatch,
'valid' : model.GetValidationBatch,
'test' : model.GetTestBatch
}
batchsizes = {
'train' : model.train_data_handler.num_batches,
'valid' : model.validation_data_handler.num_batches,
'test' : model.test_data_handler.num_batches
}
opts = {}
cm.CUDAMatrix.init_random(seed=int(time.time()))
if len(model.layer) > 2 and args.infer_method=='exact':
raise ValueError('Cannot use exact Exact inference for DBMs')
from collections import defaultdict
pll_data = defaultdict(list)
imperr_data = defaultdict(list)
for data_type in data_types:
num_batches = batchsizes[data_type]
datagetter = datagetters[data_type]
for batch_idx in range(num_batches):
print("Evalutating batch {}".format(batch_idx+1))
datagetter()
if args.infer_method == 'mf':
if args.blosum90:
pll, imperr = impute_mf(model, args.mf_steps, args.hidden_mf_steps, blosum90=True)
else:
pll, imperr = impute_mf(model, args.mf_steps, args.hidden_mf_steps)
elif args.infer_method == 'multicol':
ncols = args.ncols;
multicol_file = 'datasets/{0}/multicol/{1}_{2}.mat'.format(dataset,args.multmode, ncols)
multicols = sio.loadmat(multicol_file)['multicols']
multicols = np.asarray(multicols, dtype=np.int)
multicols = multicols - 1; # convert from matlab indexing
if args.blosum90:
pll, imperr = multicol_mf(model, multicols, blosum90=True)
else:
pll, imperr = multicol_mf(model, multicols)
elif args.infer_method == 'exact':
pll, imperr = impute_rbm_exact(model)
elif args.infer_method == 'gaussian_exact':
pll, imperr = impute_rbm_gaussian_exact(model)
else:
raise ValueError("Unknown infer method")
pll, imperr = pll.flatten(), imperr.flatten()
pll_data[data_type].append(pll)
imperr_data[data_type].append(imperr)
pll_data[data_type] = np.concatenate(pll_data[data_type])
imperr_data[data_type] = np.concatenate(imperr_data[data_type])
#-------------------------------------------------------------------
# Print and save the results
for dtype in pll_data :
pll = pll_data[dtype]
imperr = imperr_data[dtype]
print '%s : Pseudo-LogLikelihood %.5f, std %.5f' % (dtype, pll.mean(), pll.std())
print '%s : Imputation Error %.5f, std %.5f' % (dtype, imperr.mean(), imperr.std())
tr.FreeGPU(board)
import pickle
with open(args.outf,'wb') as fout:
pkldata = { 'pll' : pll_data, 'imperr' : imperr_data }
pickle.dump(pkldata, fout)
| smoitra87/gerbil | deepnet/impute.py | Python | bsd-3-clause | 24,315 | [
"Gaussian"
] | 3ac901c5d7f9625a12866172fde5f94c772c0f5aa2ebd0b1b0360ba6003af8ad |
"""
Example to generate a .fit, .mod and .dat file to feed in MrMoose for
demonstration. The model consists of one single power-laws and two black
bodies, with 15 data points. All is a mixture of unresolved and
blended/spatially identified components, with the black bodies being at
different redshifts (z=2 and z=4), but the z=4 black body is fitted with
the redshift as a free parameter.
"""
import sys
# adding the path
sys.path.insert(0, '/Users/guillaume/Desktop/MrMoose/MrMoose/')
from utils.models import *
import numpy as np
import utils.mm_utilities as mm
import utils.read_files as rd
#def fake_sync_source():
# define the parameters of the components of the model
# note: the normalisation will effectively be values-23 due to the Jansky transformation
# first group of component at same redshift
redshift1 = 2.0
func1a = 'sync_law' # need to make sure function is existing in model.py
norm_sync1 = 7.0 # parameters - normalisation
alpha_sync1 = -2. # parameters - spectral index
func1b = 'BB_law'
norm_bb1 = 1.0 # parameter - normalisation
temp1 = 40 # parameter - temperature [K]
# second group of component at other redshift
redshift2 = 4.0
func2a = 'BB_law'
norm_bb2 = 0.1 # parameter - normalisation
temp2 = 20 # parameter - temperature [K]
# making all in form to build the fake system
# array of the function name
comp_function = np.array([func1a, func1b, func2a])
# array of the redshift of the component
comp_redshift = np.array([redshift1, redshift1, redshift2])
# array of parameter values, organised as sub-arrays respecting function calls
comp_param = np.array([[norm_sync1, alpha_sync1], [norm_bb1, temp1], [norm_bb2, temp2]])
nu = 10**np.linspace(6, 18, 10000) # frequency range
# list of the filters, arrangements and components
filter_name = np.array(['VLA_L', 'VLA_C', 'VLA_C', 'VLA_X', 'VLA_X',
'ATCA_47', 'ALMA_3', 'ALMA_6', 'ALMA_6_nr1',
'laboca_870', 'spire_500', 'spire_350', 'spire_250',
'pacs_160', 'pacs_70'])
data_nature = np.array(['d', 'd', 'd', 'd', 'd',
'd', 'd', 'd', 'd',
'd', 'd', 'd', 'd',
'd', 'd']) # "d" for detections, "u" for upper limit
arrangement = np.array(['1', '1', '1', '1', '1',
'1', '2', '3', '4',
'5', '5', '5', '5',
'5', '5']) # do not forget the "," for the last element!
comp_number = np.array(['0', '0', '0', '0', '0',
'0', '0,1,2', '0,1', '2',
'1,2', '1,2', '1,2', '1,2',
'1,2', '1,2'])
sn_mod = np.array([5., 5., 5., 5., 5.,
5., 5., 5., 5.,
5., 5., 5., 5.,
5., 5.]) # SN detection to estimate noise level for each point
notes = np.array(["'sync'", "'sync'", "'sync'", "'sync'", "'sync'",
"'sync'", "'all'", "'host'", "'comp'",
"'host+comp'", "'host+comp'", "'host+comp'", "'host+comp'",
"'host+comp'", "'host+comp'"]) # notes on observations
RA_list = ['12h00m00s', '12h00m00.1s', '11h59m59.95s', '12h00m00.1s', '11h59m59.95s',
'12h00m00s', '12h00m00s', '12h00m00.1s', '11h59m59.95s',
'12h00m00s', '12h00m00s', '12h00m00s', '12h00m00s',
'12h00m00s', '12h00m00s']
Dec_list = ['-40d00m00s', '-39d59m59s', '-40d00m01s', '-39d59m59s', '-40d00m01s',
'-40d00m00s', '-40d00m00s', '-39d59m59s', '-40d00m00.5s',
'-40d00m00s', '-40d00m00s', '-40d00m00s', '-40d00m00s',
'-40d00m00s', '-40d00m00s']
res_list = [20., 1.0, 1.0, 0.5, 0.5,
10., 3.0, 0.3, 0.3,
15., 35., 25., 17.,
5., 4.]
# create the array to feed in the data file
fnu_mod = np.zeros(filter_name.size)
fnu_err = np.zeros(filter_name.size)
lambda0 = np.zeros(filter_name.size)
#temp = [globals()[param[number_of_component[i][j]]['func']]
# (xscale, param[number_of_component[i][j]]['current'], redshift)
# for j in range(len(number_of_component[i]))]
# convert the component numbers into integer list to create the combined SED following the provided arrangements
func_index = [map(int, (elem.replace(',', ''))) for elem in comp_number]
# run through the filters to create the simulated data
for i_filter, name_filter in enumerate(filter_name):
# calculate the sum of components for this arrangement
fnu = [globals()[comp_function[j]](nu, comp_param[j], comp_redshift[j]) for j in func_index[i_filter]]
# trick to get rid off the extra dimension
fnu = np.sum(fnu, axis=0)
# read the filter transmission
nu_filter, trans_filter = rd.read_single_filter('../filters/'+name_filter+'.fil')
# calculate the lambda0
lambda0[i_filter] = np.average(nu_filter, weights=trans_filter)
# perform the integration
tmp = mm.integrate_filter(nu, fnu, nu_filter, trans_filter)
# add a gaussian noise (depending on the signal to noise defined previously)
fnu_err[i_filter] = tmp/sn_mod[i_filter]
fnu_mod[i_filter] = np.random.normal(tmp, fnu_err[i_filter])
if data_nature[i_filter] == 'u':
fnu_err[i_filter] = fnu_mod[i_filter]
# create the data file
with open('../data/fake_source_ex6zz.dat', 'w') as fake:
fake.write('# filter RA Dec resolution lambda0 det_type flux flux_error arrangement component component_number \n')
for i in range(filter_name.size-1):
fake.write('{:15} {:15} {:15} {:5.1f} {:10e} {:5} {:10e} {:10e} {:10} {:10} {:10} \n'.format(
filter_name[i], RA_list[i], Dec_list[i], res_list[i],
lambda0[i], data_nature[i], fnu_mod[i], fnu_err[i], arrangement[i], notes[i], comp_number[i]))
fake.write('{:15} {:15} {:15} {:5.1f} {:10e} {:5} {:10e} {:10e} {:10} {:10} {:10}'.format(
filter_name[i+1], RA_list[i+1], Dec_list[i+1], res_list[i+1],
lambda0[i+1], data_nature[i+1], fnu_mod[i+1], fnu_err[i+1], arrangement[i+1], notes[i+1], comp_number[i+1]))
# create the fit file
with open('../fake_source_ex6zz.fit', 'w') as fake:
fake.write('source_file: data/fake_source_ex6zz.dat \n')
fake.write('model_file: models/fake_source_ex6zz.mod \n')
fake.write('all_same_redshift: False \n')
fake.write('redshift: '+"[{:.4f}, {:.4f}, {:.4f}]".format(redshift1, redshift1, -1.)+'\n')
fake.write('nwalkers: 20 \n')
fake.write('nsteps: 80 \n')
fake.write('nsteps_cut: 78 \n')
fake.write('percentiles: [10., 25., 50., 75., 90.] \n')
fake.write('skip_imaging: False \n')
fake.write('skip_fit: False \n')
fake.write('skip_MCChains: False \n')
fake.write('skip_triangle: False \n')
fake.write('skip_SED: False \n')
fake.write("unit_obs: 'Hz' \n")
fake.write("unit_flux: 'Jy' \n")
# create the model file with the redshift of the second component as unkonwn
with open('../models/fake_source_ex6zz.mod', 'w') as fake:
fake.write('sync_law 2 \n')
fake.write('$N_{s1}$ -22 -12 \n')
fake.write('$\\alpha_{s1}$ -3.5 -0.5 \n')
fake.write('BB_law 2 \n')
fake.write('$N_{BB1}$ -28 -18 \n')
fake.write('$T_1$ 10 60 \n')
fake.write('BB_law_z 3 \n')
fake.write('$N_{BB2}$ -28 -18 \n')
fake.write('$T_2$ 10 40 \n')
fake.write('$z$ 1 6 \n')
| gdrouart/MrMoose | examples/example_6zz.py | Python | gpl-3.0 | 7,371 | [
"Gaussian"
] | 8748ab0877d9af77439af6a71b131af1ec437f3b99771d2eff3e5f1a7c7d509c |
"""Handle extraction of final files from processing pipelines into storage.
"""
import datetime
import os
import six
import toolz as tz
from bcbio import log, utils
from bcbio.upload import shared, filesystem, galaxy, s3, irods
from bcbio.pipeline import run_info
from bcbio.variation import vcfutils
import bcbio.pipeline.datadict as dd
from bcbio.rnaseq.ericscript import EricScriptConfig
_approaches = {"filesystem": filesystem,
"galaxy": galaxy,
"s3": s3,
"irods": irods}
def project_from_sample(sample):
upload_config = sample.get("upload")
if upload_config:
approach = _approaches[upload_config.get("method", "filesystem")]
for finfo in _get_files_project(sample, upload_config):
approach.update_file(finfo, None, upload_config)
return [[sample]]
def from_sample(sample):
"""Upload results of processing from an analysis pipeline sample.
"""
upload_config = sample.get("upload")
if upload_config:
approach = _approaches[upload_config.get("method", "filesystem")]
for finfo in _get_files(sample):
approach.update_file(finfo, sample, upload_config)
return [[sample]]
def get_all_upload_paths_from_sample(sample):
upload_path_mapping = dict()
upload_config = sample.get("upload")
if upload_config:
method = upload_config.get("method", "filesystem")
if method == "filesystem":
approach = _approaches[method]
for finfo in _get_files_project(sample, upload_config):
path = approach.get_upload_path(finfo, None, upload_config)
upload_path_mapping[finfo["path"]] = path
for finfo in _get_files(sample):
path = approach.get_upload_path(finfo, sample, upload_config)
upload_path_mapping[finfo["path"]] = path
return upload_path_mapping
# ## File information from sample
def _get_files(sample):
"""Retrieve files for the sample, dispatching by analysis type.
Each file is a dictionary containing the path plus associated
metadata about the file and pipeline versions.
"""
analysis = sample.get("analysis")
if analysis.lower() in ["variant", "snp calling", "variant2", "standard"]:
return _get_files_variantcall(sample)
elif analysis.lower() in ["rna-seq", "fastrna-seq"]:
return _get_files_rnaseq(sample)
elif analysis.lower() in ["smallrna-seq"]:
return _get_files_srnaseq(sample)
elif analysis.lower() in ["chip-seq"]:
return _get_files_chipseq(sample)
elif analysis.lower() in ["scrna-seq"]:
return _get_files_scrnaseq(sample)
elif analysis.lower() in ["wgbs-seq"]:
return _get_files_wgbsseq(sample)
else:
return []
def _get_files_rnaseq(sample):
out = []
algorithm = sample["config"]["algorithm"]
out = _maybe_add_summary(algorithm, sample, out)
out = _maybe_add_alignment(algorithm, sample, out)
out = _maybe_add_transcriptome_alignment(sample, out)
out = _maybe_add_disambiguate(algorithm, sample, out)
out = _maybe_add_counts(algorithm, sample, out)
out = _maybe_add_cufflinks(algorithm, sample, out)
out = _maybe_add_stringtie(algorithm, sample, out)
out = _maybe_add_oncofuse(algorithm, sample, out)
out = _maybe_add_pizzly(algorithm, sample, out)
out = _maybe_add_rnaseq_variant_file(algorithm, sample, out)
out = _maybe_add_sailfish_files(algorithm, sample, out)
out = _maybe_add_salmon_files(algorithm, sample, out)
out = _maybe_add_kallisto_files(algorithm, sample, out)
out = _maybe_add_ericscript_files(algorithm, sample, out)
out = _maybe_add_arriba_files(algorithm, sample, out)
out = _maybe_add_junction_files(algorithm, sample, out)
return _add_meta(out, sample)
def _get_files_srnaseq(sample):
out = []
algorithm = sample["config"]["algorithm"]
out = _maybe_add_summary(algorithm, sample, out)
out = _maybe_add_trimming(algorithm, sample, out)
out = _maybe_add_seqbuster(algorithm, sample, out)
out = _maybe_add_trna(algorithm, sample, out)
out = _maybe_add_transcriptome_alignment(sample, out)
return _add_meta(out, sample)
def _get_files_scrnaseq(sample):
out = []
algorithm = sample["config"]["algorithm"]
out = _maybe_add_summary(algorithm, sample, out)
out = _maybe_add_transcriptome_alignment(sample, out)
out = _maybe_add_scrnaseq(algorithm, sample, out)
out = _maybe_add_barcode_histogram(algorithm, sample, out)
return _add_meta(out, sample)
def _get_files_chipseq(sample):
out = []
algorithm = sample["config"]["algorithm"]
out = _maybe_add_summary(algorithm, sample, out)
out = _maybe_add_alignment(algorithm, sample, out)
out = _maybe_add_nucleosome_alignments(algorithm, sample, out)
out = _maybe_add_peaks(algorithm, sample, out)
out = _maybe_add_greylist(algorithm, sample, out)
return _add_meta(out, sample)
def _get_files_wgbsseq(sample):
out = []
algorithm = sample["config"]["algorithm"]
# upload sorted bam as output
sample["work_bam"] = sample["align_bam"]
out = _maybe_add_alignment(algorithm, sample, out)
bismark_report_dir = sample.get("bismark_report")
if bismark_report_dir:
out.append({"path": bismark_report_dir,
"type": "directory",
"ext": "bismark"})
bam_report = sample.get("bam_report")
if bam_report:
out.append({"path": bam_report,
"type": "txt",
"ext": "bam_report"})
deduplication_report = sample.get("deduplication_report")
if deduplication_report:
out.append({"path": deduplication_report,
"type": "txt",
"ext": "deduplication_report"})
return _add_meta(out, sample)
def _add_meta(xs, sample=None, config=None):
"""Add top level information about the sample or flowcell to output.
Sorts outputs into sample names (sample input) and project (config input).
"""
out = []
for x in xs:
if not isinstance(x["path"], six.string_types) or not os.path.exists(x["path"]):
raise ValueError("Unexpected path for upload: %s" % x)
x["mtime"] = shared.get_file_timestamp(x["path"])
if sample:
sample_name = dd.get_sample_name(sample)
if "sample" not in x:
x["sample"] = sample_name
elif x["sample"] != sample_name:
x["run"] = sample_name
if config:
fc_name = config.get("fc_name") or "project"
fc_date = config.get("fc_date") or datetime.datetime.now().strftime("%Y-%m-%d")
x["run"] = "%s_%s" % (fc_date, fc_name)
out.append(x)
return out
def _get_files_variantcall(sample):
"""Return output files for the variant calling pipeline"""
out = []
algorithm = sample["config"]["algorithm"]
out = _maybe_add_summary(algorithm, sample, out)
out = _maybe_add_alignment(algorithm, sample, out)
out = _maybe_add_callable(sample, out)
out = _maybe_add_disambiguate(algorithm, sample, out)
out = _maybe_add_variant_file(algorithm, sample, out)
out = _maybe_add_sv(sample, out)
out = _maybe_add_hla(algorithm, sample, out)
out = _maybe_add_heterogeneity(algorithm, sample, out)
out = _maybe_add_validate(algorithm, sample, out)
out = _maybe_add_purecn_files(sample, out)
return _add_meta(out, sample)
def _maybe_add_purecn_files(sample, out):
"""keep all files from purecn dir"""
purecn_coverage = tz.get_in(["depth", "bins", "purecn"], sample)
if purecn_coverage:
purecn_dir, purecn_file = os.path.split(purecn_coverage)
out.append({"path": purecn_dir,
"type": "directory",
"ext": "purecn"})
return out
def _maybe_add_validate(algorith, sample, out):
for i, plot in enumerate(tz.get_in(("validate", "grading_plots"), sample, [])):
ptype = os.path.splitext(plot)[-1].replace(".", "")
out.append({"path": plot,
"type": ptype,
"ext": "validate%s" % ("" if i == 0 else "-%s" % (i + 1))})
return out
def _maybe_add_rnaseq_variant_file(algorithm, sample, out):
vfile = sample.get("vrn_file")
if vfile:
ftype = "vcf.gz" if vfile.endswith(".gz") else "vcf"
out.append({"path": vfile,
"type": ftype})
if utils.file_exists(vfile + ".tbi"):
out.append({"path": vfile + ".tbi",
"type": "vcf.gz.tbi",
"index": True})
return out
def _maybe_add_callable(data, out):
"""Add callable and depth regions to output folder.
"""
callable_bed = dd.get_sample_callable(data)
if callable_bed:
out.append({"path": callable_bed, "type": "bed", "ext": "callable"})
perbase_bed = tz.get_in(["depth", "variant_regions", "per_base"], data)
if perbase_bed:
out.append({"path": perbase_bed, "type": "bed.gz", "ext": "depth-per-base"})
return out
def _maybe_add_variant_file(algorithm, sample, out):
if sample.get("align_bam") is not None and sample.get("vrn_file"):
for x in sample["variants"]:
if not _sample_variant_file_in_population(x):
out.extend(_get_variant_file(x, ("vrn_file",)))
if x.get("bed_file"):
out.append({"path": x["bed_file"],
"type": "bed",
"ext": "%s-callregions" % x["variantcaller"],
"variantcaller": x["variantcaller"]})
if x.get("vrn_stats"):
for extra, fname in x["vrn_stats"].items():
ext = utils.splitext_plus(fname)[-1].replace(".", "")
out.append({"path": fname,
"type": ext,
"ext": "%s-%s" % (x["variantcaller"], extra),
"variantcaller": x["variantcaller"]})
if x.get("germline") and os.path.exists(x["germline"]):
out.extend(_get_variant_file(x, ("germline",), "-germline"))
return out
def _maybe_add_hla(algorithm, sample, out):
if sample.get("align_bam") is not None and sample.get("hla") and "call_file" in sample["hla"]:
out.append({"path": sample["hla"]["call_file"],
"type": "csv",
"ext": "hla-%s" % (sample["hla"]["hlacaller"])})
return out
def _maybe_add_heterogeneity(algorithm, sample, out):
for hetinfo in sample.get("heterogeneity", []):
report = hetinfo.get("report")
if report and os.path.exists(report):
out.append({"path": report,
"type": utils.splitext_plus(report)[-1].replace(".", "").replace("-", ""),
"ext": "%s-report" % (hetinfo["caller"])})
for plot_type, plot_file in hetinfo.get("plots", {}).items():
if plot_file and os.path.exists(plot_file):
out.append({"path": plot_file,
"type": utils.splitext_plus(plot_file)[-1].replace(".", ""),
"ext": "%s-%s-plot" % (hetinfo["caller"], plot_type)})
return out
def _get_batch_name(sample):
"""Retrieve batch name for use in SV calling outputs.
Handles multiple batches split via SV calling.
"""
batch = dd.get_batch(sample) or dd.get_sample_name(sample)
if isinstance(batch, (list, tuple)) and len(batch) > 1:
batch = dd.get_sample_name(sample)
return batch
def _maybe_add_sv(sample, out):
if sample.get("align_bam") is not None and sample.get("sv"):
batch = _get_batch_name(sample)
for svcall in sample["sv"]:
if svcall.get("variantcaller") == "seq2c":
out.extend(_get_variant_file(svcall, ("calls",), sample=batch))
out.extend(_get_variant_file(svcall, ("gender_predicted",), sample=batch))
elif svcall.get('variantcaller') == 'scramble':
out.extend(_get_variant_file(svcall, ('clusters_file',), suffix='-clusters',
sample=batch))
out.extend(_get_variant_file(svcall, ('mei_file',), suffix='-mei', sample=batch))
for key in ["vrn_file", "cnr", "cns", "seg", "gainloss",
"segmetrics", "vrn_bed", "vrn_bedpe"]:
out.extend(_get_variant_file(svcall, (key,), sample=batch))
out.extend(_get_variant_file(svcall, ("background",), suffix="-background", sample=batch))
out.extend(_get_variant_file(svcall, ("call_file",), suffix="-call", sample=batch))
out.extend(_get_variant_file(svcall, ("priority",), suffix="-priority", sample=batch))
if "plot" in svcall:
for plot_name, fname in svcall["plot"].items():
ext = os.path.splitext(fname)[-1].replace(".", "")
out.append({"path": fname,
"sample": batch,
"type": ext,
"ext": "%s-%s" % (svcall["variantcaller"], plot_name),
"variantcaller": svcall["variantcaller"]})
if "raw_files" in svcall:
for caller, fname in svcall["raw_files"].items():
ext = utils.splitext_plus(fname)[-1][1:]
out.append({"path": fname,
"sample": batch,
"type": ext,
"ext": "%s-%s" % (svcall["variantcaller"], caller),
"variantcaller": svcall["variantcaller"]})
if utils.file_exists(fname + ".tbi"):
out.append({"path": fname + ".tbi",
"sample": batch,
"type": "vcf.gz.tbi",
"index": True,
"ext": "%s-%s" % (svcall["variantcaller"], caller),
"variantcaller": svcall["variantcaller"]})
for extra in ["subclones", "contamination", "hetsummary", "lohsummary", "read_evidence"]:
svfile = svcall.get(extra)
if svfile and os.path.exists(svfile):
ftype = os.path.splitext(svfile)[-1].replace(".", "")
out.append({"path": svfile,
"sample": dd.get_sample_name(sample) if ftype == "bam" else batch,
"type": ftype,
"ext": "%s-%s" % (svcall["variantcaller"], extra),
"variantcaller": svcall["variantcaller"]})
fext = ".bai" if ftype == "bam" else ""
if fext and os.path.exists(svfile + fext):
out.append({"path": svfile + fext,
"sample": dd.get_sample_name(sample) if ftype == "bam" else batch,
"type": ftype + fext,
"index": True,
"ext": "%s-%s" % (svcall["variantcaller"], extra),
"variantcaller": svcall["variantcaller"]})
if "sv-validate" in sample:
for vkey in ["csv", "plot", "df"]:
vfile = tz.get_in(["sv-validate", vkey], sample)
if vfile:
to_u = []
if isinstance(vfile, dict):
for svtype, fname in vfile.items():
to_u.append((fname, "-%s" % svtype))
else:
to_u.append((vfile, "-%s" % vkey if vkey in ["df"] else ""))
for vfile, ext in to_u:
vext = os.path.splitext(vfile)[-1].replace(".", "")
out.append({"path": vfile,
"sample": batch,
"type": vext,
"ext": "sv-validate%s" % ext})
return out
def _sample_variant_file_in_population(x):
"""Check if a sample file is the same as the population file.
This is true for batches where we don't extract into samples and do not
run decomposition for gemini.
'"""
if "population" in x:
a = _get_project_vcf(x)
b = _get_variant_file(x, ("vrn_file",))
decomposed = tz.get_in(("population", "decomposed"), x)
if (a and b and not decomposed and len(a) > 0 and len(b) > 0 and
vcfutils.get_samples(a[0]["path"]) == vcfutils.get_samples(b[0]["path"])):
return True
return False
def _get_variant_file(x, key, suffix="", sample=None, ignore_do_upload=False):
"""Retrieve VCF file with the given key if it exists, handling bgzipped.
"""
out = []
fname = utils.get_in(x, key)
upload_key = list(key)
upload_key[-1] = "do_upload"
do_upload = tz.get_in(tuple(upload_key), x, True)
if fname and (ignore_do_upload or do_upload):
if fname.endswith(".vcf.gz"):
out.append({"path": fname,
"type": "vcf.gz",
"ext": "%s%s" % (x["variantcaller"], suffix),
"variantcaller": x["variantcaller"]})
if utils.file_exists(fname + ".tbi"):
out.append({"path": fname + ".tbi",
"type": "vcf.gz.tbi",
"index": True,
"ext": "%s%s" % (x["variantcaller"], suffix),
"variantcaller": x["variantcaller"]})
elif fname.endswith((".vcf", ".bed", ".bedpe", ".bedgraph", ".cnr", ".cns", ".cnn", ".txt", ".tsv")):
ftype = utils.splitext_plus(fname)[-1][1:]
if ftype == "txt":
extended_ftype = fname.split("-")[-1]
if "/" not in extended_ftype:
ftype = extended_ftype
out.append({"path": fname,
"type": ftype,
"ext": "%s%s" % (x["variantcaller"], suffix),
"variantcaller": x["variantcaller"]})
if sample:
out_sample = []
for x in out:
x["sample"] = sample
out_sample.append(x)
return out_sample
else:
return out
def _maybe_add_sailfish_files(algorithm, sample, out):
analysis = dd.get_analysis(sample)
sailfish_dir = os.path.join(dd.get_work_dir(sample), "sailfish",
dd.get_sample_name(sample), "quant")
if os.path.exists(sailfish_dir):
out.append({"path": sailfish_dir,
"type": "directory",
"ext": "sailfish"})
return out
def _maybe_add_salmon_files(algorithm, sample, out):
salmon_dir = os.path.join(dd.get_work_dir(sample), "salmon",
dd.get_sample_name(sample))
if os.path.exists(salmon_dir):
out.append({"path": salmon_dir,
"type": "directory",
"ext": "salmon"})
return out
def _maybe_add_kallisto_files(algorithm, sample, out):
kallisto_dir = os.path.join(dd.get_work_dir(sample), "kallisto",
dd.get_sample_name(sample), "quant")
if os.path.exists(kallisto_dir):
out.append({"path": kallisto_dir,
"type": "directory",
"ext": "kallisto"})
return out
def _maybe_add_ericscript_files(algorithm, sample, out):
config = EricScriptConfig(sample)
if os.path.exists(config.sample_out_dir):
out.append({
'path': config.sample_out_dir,
'type': 'directory',
'ext': 'ericscript',
})
return out
def _flatten_file_with_secondary(input, out_dir):
"""Flatten file representation with secondary indices (CWL-like)
"""
out = []
orig_dir = os.path.dirname(input["base"])
for finfo in [input["base"]] + input.get("secondary", []):
cur_dir = os.path.dirname(finfo)
if cur_dir != orig_dir and cur_dir.startswith(orig_dir):
cur_out_dir = os.path.join(out_dir, cur_dir.replace(orig_dir + "/", ""))
else:
cur_out_dir = out_dir
out.append({"path": finfo, "dir": cur_out_dir})
return out
def _maybe_add_summary(algorithm, sample, out):
out = []
if "summary" in sample:
if sample["summary"].get("pdf"):
out.append({"path": sample["summary"]["pdf"],
"type": "pdf",
"ext": "summary"})
if sample["summary"].get("qc"):
for program, finfo in sample["summary"]["qc"].items():
out.extend(_flatten_file_with_secondary(finfo, os.path.join("qc", program)))
if utils.get_in(sample, ("summary", "researcher")):
out.append({"path": sample["summary"]["researcher"],
"type": "tsv",
"sample": run_info.clean_name(utils.get_in(sample, ("upload", "researcher"))),
"ext": "summary"})
return out
def _maybe_add_alignment(algorithm, sample, out):
if _has_alignment_file(algorithm, sample) and dd.get_phenotype(sample) != "germline":
for (fname, ext, isplus) in [(sample.get("work_bam"), "ready", False),
(sample.get("umi_bam"), "umi", False),
(sample.get("bigwig"), "ready", False),
(dd.get_disc_bam(sample), "disc", True),
(dd.get_sr_bam(sample), "sr", True)]:
if fname and os.path.exists(fname):
if fname.endswith("bam"):
ftype, fext = "bam", ".bai"
elif fname.endswith("cram"):
ftype, fext = "cram", ".crai"
elif fname.endswith("bw"):
ftype, fext = "bw", ".bw"
else:
raise ValueError("Unexpected alignment file type %s" % fname)
out.append({"path": fname,
"type": ftype,
"plus": isplus,
"ext": ext})
if utils.file_exists(fname + fext):
out.append({"path": fname + fext,
"type": ftype + fext,
"plus": isplus,
"index": True,
"ext": ext})
return out
def _maybe_add_disambiguate(algorithm, sample, out):
if "disambiguate" in sample and _has_alignment_file(algorithm, sample):
for extra_name, fname in sample["disambiguate"].items():
ftype = os.path.splitext(fname)[-1].replace(".", "")
fext = ".bai" if ftype == "bam" else ""
if fname and os.path.exists(fname):
out.append({"path": fname,
"type": ftype,
"plus": True,
"ext": "disambiguate-%s" % extra_name})
if fext and utils.file_exists(fname + fext):
out.append({"path": fname + fext,
"type": ftype + fext,
"plus": True,
"index": True,
"ext": "disambiguate-%s" % extra_name})
return out
def _maybe_add_transcriptome_alignment(sample, out):
transcriptome_bam = dd.get_transcriptome_bam(sample)
if transcriptome_bam and utils.file_exists(transcriptome_bam):
out.append({"path": transcriptome_bam,
"type": "bam",
"ext": "transcriptome"})
return out
def _maybe_add_nucleosome_alignments(algorithm, sample, out):
"""
for ATAC-seq, also upload NF, MN, DN and TN bam files
"""
atac_align = tz.get_in(("atac", "align"), sample, {})
for alignment in atac_align.keys():
out.append({"path": atac_align[alignment],
"type": "bam",
"ext": alignment})
return out
def _maybe_add_counts(algorithm, sample, out):
if not dd.get_count_file(sample):
return out
out.append({"path": sample["count_file"],
"type": "counts",
"ext": "ready"})
stats_file = os.path.splitext(sample["count_file"])[0] + ".stats"
if utils.file_exists(stats_file):
out.append({"path": stats_file,
"type": "count_stats",
"ext": "ready"})
return out
def _maybe_add_scrnaseq(algorithm, sample, out):
count_file = dd.get_count_file(sample)
if not count_file:
return out
else:
out.append({"path": count_file,
"type": "mtx"})
out.append({"path": count_file + ".rownames",
"type": "rownames"})
out.append({"path": count_file + ".colnames",
"type": "colnames"})
umi_file = os.path.splitext(count_file)[0] + "-dupes.mtx"
if utils.file_exists(umi_file):
out.append({"path": umi_file,
"type": "mtx"})
out.append({"path": umi_file + ".rownames",
"type": "rownames"})
out.append({"path": umi_file + ".colnames",
"type": "colnames"})
return out
def _maybe_add_barcode_histogram(algorithm, sample, out):
if not dd.get_count_file(sample):
return out
count_file = sample["count_file"]
histogram_file = os.path.join(os.path.dirname(count_file), "cb-histogram.txt")
histogram_filtered_file = os.path.join(os.path.dirname(count_file), "cb-histogram-filtered.txt")
out.append({"path": histogram_file,
"type": "tsv",
"ext": "barcodes"})
out.append({"path": histogram_file,
"type": "tsv",
"ext": "barcodes-filtered"})
return out
def _maybe_add_oncofuse(algorithm, sample, out):
if sample.get("oncofuse_file", None) is not None:
out.append({"path": sample["oncofuse_file"],
"type": "tsv",
"dir": "oncofuse",
"ext": "ready"})
return out
def _maybe_add_pizzly(algorithm, sample, out):
pizzly_dir = dd.get_pizzly_dir(sample)
if pizzly_dir:
out.append({"path": pizzly_dir,
"type": "directory",
"ext": "pizzly"})
return out
def _maybe_add_arriba_files(algorithm, sample, out):
pizzly_dir = dd.get_pizzly_dir(sample)
arriba = dd.get_arriba(sample)
if arriba:
out.append({"path": arriba["fusions"],
"type": "tsv",
"ext": "arriba-fusions",
"dir": "arriba"})
out.append({"path": arriba["discarded"],
"type": "tsv",
"ext": "arriba-discarded-fusions",
"dir": "arriba"})
return out
def _maybe_add_junction_files(algorithm, sample, out):
"""
add splice junction files from STAR, if available
"""
junction_bed = dd.get_junction_bed(sample)
if junction_bed:
out.append({"path": junction_bed,
"type": "bed",
"ext": "SJ",
"dir": "STAR"})
chimeric_file = dd.get_chimericjunction(sample)
if chimeric_file:
out.append({"path": chimeric_file,
"type": "tsv",
"ext": "chimericSJ",
"dir": "STAR"})
sj_file = dd.get_starjunction(sample)
if sj_file:
out.append({"path": sj_file,
"type": "tab",
"ext": "SJ",
"dir": "STAR"})
star_summary = dd.get_summary_qc(sample).get("star", None)
if star_summary:
star_log = star_summary["base"]
if star_log:
out.append({"path": star_log,
"type": "log",
"dir": "STAR"})
return out
def _maybe_add_cufflinks(algorithm, sample, out):
if "cufflinks_dir" in sample:
out.append({"path": sample["cufflinks_dir"],
"type": "directory",
"ext": "cufflinks"})
return out
def _maybe_add_stringtie(algorithm, sample, out):
if "stringtie_dir" in sample:
out.append({"path": sample["stringtie_dir"],
"type": "directory",
"ext": "stringtie"})
return out
def _maybe_add_trimming(algorithm, sample, out):
fn = sample["collapse"] + "_size_stats"
if utils.file_exists(fn):
out.append({"path": fn,
"type": "trimming_stats",
"ext": "ready"})
return out
def _maybe_add_seqbuster(algorithm, sample, out):
if "seqbuster" not in sample:
return out
fn = sample["seqbuster"]
if utils.file_exists(fn):
out.append({"path": fn,
"type": "counts",
"ext": "mirbase-ready"})
fn = sample.get("seqbuster_novel")
fn = sample["mirtop"]
if utils.file_exists(fn):
out.append({"path": fn,
"type": "gff",
"ext": "mirbase-ready"})
if "seqbuster_novel" in sample and utils.file_exists(sample["seqbuster_novel"]):
fn = sample["seqbuster_novel"]
out.append({"path": fn,
"type": "counts",
"ext": "novel-ready"})
return out
def _maybe_add_trna(algorithm, sample, out):
if "trna" not in sample:
return out
fn = sample["trna"]
if utils.file_exists(fn):
out.append({"path": fn,
"type": "directory",
"ext": "mintmap"})
return out
def _maybe_add_peaks(algorithm, sample, out):
out_dir = sample.get("peaks_files", {})
if dd.get_chip_method(sample) == "atac":
for files in out_dir.values():
for caller in files:
if caller == "main":
continue
for fn in files[caller]:
if os.path.exists(fn):
out.append({"path": fn,
"dir": caller,
"ext": utils.splitext_plus(fn)[1]})
else:
for caller in out_dir:
if caller == "main":
continue
for fn in out_dir[caller]:
if os.path.exists(fn):
out.append({"path": fn,
"dir": caller,
"ext": utils.splitext_plus(fn)[1]})
return out
def _maybe_add_greylist(algorithm, sample, out):
greylist = sample.get("greylist", None)
if greylist:
out.append({"path": greylist,
"type": "directory",
"ext": "greylist"})
return out
def _has_alignment_file(algorithm, sample):
return (((algorithm.get("aligner") or algorithm.get("realign")
or algorithm.get("recalibrate") or algorithm.get("bam_clean")
or algorithm.get("mark_duplicates", algorithm.get("aligner")))) and
sample.get("work_bam") is not None and
"upload_alignment" not in dd.get_tools_off(sample))
# ## File information from full project
def _add_batch(x, sample):
"""Potentially add batch name to an upload file.
"""
added = False
for batch in sorted(dd.get_batches(sample) or [], key=len, reverse=True):
if batch and os.path.basename(x["path"]).startswith(("%s-" % batch, "%s.vcf" % batch)):
x["batch"] = batch
added = True
break
if not added:
x["batch"] = dd.get_sample_name(sample)
return x
def _get_project_vcf(x, suffix=""):
"""Get our project VCF, either from the population or the variant batch file.
"""
vcfs = _get_variant_file(x, ("population", "vcf"), suffix=suffix)
if not vcfs:
vcfs = _get_variant_file(x, ("vrn_file_batch", ), suffix=suffix, ignore_do_upload=True)
if not vcfs and x.get("variantcaller") == "ensemble":
vcfs = _get_variant_file(x, ("vrn_file", ), suffix=suffix)
return vcfs
def _get_files_project(sample, upload_config):
"""Retrieve output files associated with an entire analysis project.
"""
out = [{"path": sample["provenance"]["programs"]}]
if os.path.exists(tz.get_in(["provenance", "data"], sample) or ""):
out.append({"path": sample["provenance"]["data"]})
for fname in ["bcbio-nextgen.log", "bcbio-nextgen-commands.log"]:
if os.path.exists(os.path.join(log.get_log_dir(sample["config"]), fname)):
out.append({"path": os.path.join(log.get_log_dir(sample["config"]), fname),
"type": "external_command_log",
"ext": ""})
if "summary" in sample and sample["summary"].get("project"):
out.append({"path": sample["summary"]["project"]})
if "summary" in sample and sample["summary"].get("metadata"):
out.append({"path": sample["summary"]["metadata"]})
mixup_check = tz.get_in(["summary", "mixup_check"], sample)
if mixup_check:
out.append({"path": sample["summary"]["mixup_check"],
"type": "directory", "ext": "mixup_check"})
report = os.path.join(dd.get_work_dir(sample), "report")
if utils.file_exists(report):
out.append({"path": report,
"type": "directory", "ext": "report"})
multiqc = tz.get_in(["summary", "multiqc"], sample)
if multiqc:
out.extend(_flatten_file_with_secondary(multiqc, "multiqc"))
ataqv = tz.get_in(["ataqv_report"], sample)
if ataqv:
out.extend(_flatten_file_with_secondary(ataqv, "ataqv"))
if sample.get("seqcluster", {}):
out.append({"path": sample["seqcluster"].get("out_dir"),
"type": "directory", "ext": "seqcluster"})
if sample.get("mirge", {}):
for fn in sample["mirge"]:
out.append({"path": fn,
"dir": "mirge"})
if sample.get("report", None):
out.append({"path": os.path.dirname(sample["report"]),
"type": "directory", "ext": "seqclusterViz"})
for x in sample.get("variants", []):
if "pop_db" in x:
out.append({"path": x["pop_db"],
"type": "sqlite",
"variantcaller": x["variantcaller"]})
for x in sample.get("variants", []):
if "population" in x:
pop_db = tz.get_in(["population", "db"], x)
if pop_db:
out.append({"path": pop_db,
"type": "sqlite",
"variantcaller": x["variantcaller"]})
suffix = "-annotated-decomposed" if tz.get_in(("population", "decomposed"), x) else "-annotated"
vcfs = _get_project_vcf(x, suffix)
out.extend([_add_batch(f, sample) for f in vcfs])
for x in sample.get("variants", []):
if x.get("validate") and x["validate"].get("grading_summary"):
out.append({"path": x["validate"]["grading_summary"]})
break
sv_project = set()
pon_project = set()
for svcall in sample.get("sv", []):
if svcall.get("variantcaller") == "seq2c":
if svcall.get("calls_all") and svcall["calls_all"] not in sv_project:
out.append({"path": svcall["coverage_all"], "batch": "seq2c", "ext": "coverage", "type": "tsv"})
out.append({"path": svcall["read_mapping"], "batch": "seq2c", "ext": "read_mapping", "type": "txt"})
out.append({"path": svcall["calls_all"], "batch": "seq2c", "ext": "calls", "type": "tsv"})
sv_project.add(svcall["calls_all"])
if svcall.get("variantcaller") == "gatkcnv":
if svcall.get("pon") and svcall["pon"] not in pon_project:
out.append({"path": svcall["pon"], "batch": "gatkcnv", "ext": "pon", "type": "hdf5"})
pon_project.add(svcall.get("pon"))
purecn_pon = tz.get_in(["config", "algorithm", "purecn_pon_build"], sample)
genome_build = dd.get_genome_build(sample)
if purecn_pon:
work_dir = tz.get_in(["dirs", "work"], sample)
gemini_dir = os.path.join(work_dir, "gemini")
mapping_bias_filename = f"mapping_bias_{genome_build}.rds"
mapping_bias_file = os.path.join(gemini_dir, mapping_bias_filename)
normal_db_file = f"normalDB_{genome_build}.rds"
normal_db = os.path.join(gemini_dir, normal_db_file)
if mapping_bias_file and normal_db:
out.append({"path": mapping_bias_file})
out.append({"path": normal_db})
if "coverage" in sample:
cov_db = tz.get_in(["coverage", "summary"], sample)
if cov_db:
out.append({"path": cov_db, "type": "sqlite", "ext": "coverage"})
all_coverage = tz.get_in(["coverage", "all"], sample)
if all_coverage:
out.append({"path": all_coverage, "type": "bed", "ext": "coverage"})
if dd.get_mirna_counts(sample):
out.append({"path": dd.get_mirna_counts(sample)})
if dd.get_isomir_counts(sample):
out.append({"path": dd.get_isomir_counts(sample)})
if dd.get_novel_mirna_counts(sample):
out.append({"path": dd.get_novel_mirna_counts(sample)})
if dd.get_novel_isomir_counts(sample):
out.append({"path": dd.get_novel_isomir_counts(sample)})
if dd.get_combined_counts(sample):
count_file = dd.get_combined_counts(sample)
if sample["analysis"].lower() == "scrna-seq":
out.append({"path": count_file,
"type": "mtx"})
out.append({"path": count_file + ".rownames",
"type": "rownames"})
out.append({"path": count_file + ".colnames",
"type": "colnames"})
out.append({"path": count_file + ".metadata",
"type": "metadata"})
umi_file = os.path.splitext(count_file)[0] + "-dupes.mtx"
if utils.file_exists(umi_file):
out.append({"path": umi_file,
"type": "mtx"})
out.append({"path": umi_file + ".rownames",
"type": "rownames"})
out.append({"path": umi_file + ".colnames",
"type": "colnames"})
if dd.get_combined_histogram(sample):
out.append({"path": dd.get_combined_histogram(sample),
"type": "txt"})
rda = os.path.join(os.path.dirname(count_file), "se.rda")
if utils.file_exists(rda):
out.append({"path": rda,
"type": "rda"})
else:
out.append({"path": dd.get_combined_counts(sample), "dir": "featureCounts"})
if dd.get_summarized_experiment(sample):
out.append({"path": dd.get_summarized_experiment(sample), "dir": "counts"})
if dd.get_tximport(sample):
out.append({"path": dd.get_tximport(sample)["gene_tpm"], "dir": "tpm"})
out.append({"path": dd.get_tximport(sample)["gene_counts"], "dir": "counts"})
if dd.get_annotated_combined_counts(sample):
out.append({"path": dd.get_annotated_combined_counts(sample), "dir": "featureCounts"})
if dd.get_combined_fpkm(sample):
out.append({"path": dd.get_combined_fpkm(sample)})
if dd.get_combined_fpkm_isoform(sample):
out.append({"path": dd.get_combined_fpkm_isoform(sample)})
if dd.get_transcript_assembler(sample):
out.append({"path": dd.get_merged_gtf(sample)})
if dd.get_dexseq_counts(sample):
out.append({"path": dd.get_dexseq_counts(sample)})
out.append({"path": "%s.ann" % dd.get_dexseq_counts(sample)})
if dd.get_express_counts(sample):
out.append({"path": dd.get_express_counts(sample)})
if dd.get_express_fpkm(sample):
out.append({"path": dd.get_express_fpkm(sample)})
if dd.get_express_tpm(sample):
out.append({"path": dd.get_express_tpm(sample)})
if dd.get_isoform_to_gene(sample):
out.append({"path": dd.get_isoform_to_gene(sample)})
if dd.get_square_vcf(sample):
out.append({"path": dd.get_square_vcf(sample)})
if dd.get_sailfish_transcript_tpm(sample):
out.append({"path": dd.get_sailfish_transcript_tpm(sample)})
if dd.get_sailfish_gene_tpm(sample):
out.append({"path": dd.get_sailfish_gene_tpm(sample)})
if dd.get_tx2gene(sample):
out.append({"path": dd.get_tx2gene(sample)})
if dd.get_spikein_counts(sample):
out.append({"path": dd.get_spikein_counts(sample)})
if tz.get_in(("peaks_files", "consensus", "main"), sample):
out.append({"path": tz.get_in(("peaks_files", "consensus", "main"), sample), "dir": "consensus"})
if tz.get_in(("peak_counts", "peaktable"), sample):
out.append({"path": tz.get_in(("peak_counts", "peaktable"), sample), "dir": "consensus"})
transcriptome_dir = os.path.join(dd.get_work_dir(sample), "inputs",
"transcriptome")
if os.path.exists(transcriptome_dir):
out.append({"path": transcriptome_dir, "type": "directory",
"ext": "transcriptome"})
rnaseq_se_qc_file = os.path.join(dd.get_work_dir(sample), "qc", "bcbio-se.html")
if os.path.exists(rnaseq_se_qc_file):
out.append({"path": rnaseq_se_qc_file})
return _add_meta(out, config=upload_config)
| lbeltrame/bcbio-nextgen | bcbio/upload/__init__.py | Python | mit | 41,834 | [
"Galaxy"
] | dab92ce1d481c4bb79abc63bd52eed6ad3241efd3854f14598329cda85afe3bd |
#!/usr/bin/env python
'''Run a simple, single-neuron-from-each-population simulation.'''
from __future__ import absolute_import, print_function, division
from grid_cell_model.submitting.base.parsers import GenericSubmissionParser
from grid_cell_model.submitting.arguments import ArgumentCreator
from grid_cell_model.submitting.factory import SubmitterFactory
from default_params import defaultParameters as dp
parser = GenericSubmissionParser()
o = parser.parse_args()
p = dp.copy()
# Submitting
ENV = o.env
simRootDir = o.where
simLabel = 'single_neuron'
appName = 'simulation_single_neuron.py'
rtLimit = '00:02:00' if o.rtLimit is None else o.rtLimit
numCPU = 1
blocking = True
timePrefix = False
numRepeat = 1
dry_run = o.dry_run
p['master_seed'] = 123456
p['time'] = 10e3 if o.time is None else o.time # ms
p['nthreads'] = 1
p['ntrials'] = 5 if o.ntrials is None else o.ntrials
p['verbosity'] = o.verbosity
###############################################################################
ac = ArgumentCreator(p, printout=True)
iterparams = {
'noise_sigma' : [0.0, 150.0, 300.0] # pA
}
ac.insertDict(iterparams, mult=False)
###############################################################################
submitter = SubmitterFactory.getSubmitter(ac, appName, envType=ENV,
rtLimit=rtLimit,
output_dir=simRootDir,
label=simLabel, blocking=blocking,
timePrefix=timePrefix, numCPU=numCPU)
ac.setOption('output_dir', submitter.outputDir())
startJobNum = 0
submitter.submitAll(startJobNum, numRepeat, dry_run=dry_run)
submitter.saveIterParams(iterparams, ['noise_sigma'], [3], dry_run=dry_run)
| MattNolanLab/ei-attractor | grid_cell_model/simulations/007_noise/submit_single_neuron.py | Python | gpl-3.0 | 1,822 | [
"NEURON"
] | bd5bb2dd3b717cc4a86e30928ad4ac76487c19235de78802f9302ed3178cc3b3 |
import re, os, sys
from Tester import Tester
from RunParallel import RunParallel # For TIMEOUT value
class RunApp(Tester):
@staticmethod
def validParams():
params = Tester.validParams()
params.addRequiredParam('input', "The input file to use for this test.")
params.addParam('test_name', "The name of the test - populated automatically")
params.addParam('skip_test_harness_cli_args', False, "Skip adding global TestHarness CLI Args for this test")
params.addParam('input_switch', '-i', "The default switch used for indicating an input to the executable")
params.addParam('errors', ['ERROR', 'command not found', 'erminate called after throwing an instance of'], "The error messages to detect a failed run")
params.addParam('expect_out', "A regular expression that must occur in the input in order for the test to be considered passing.")
params.addParam('match_literal', False, "Treat expect_out as a string not a regular expression.")
params.addParam('should_crash', False, "Inidicates that the test is expected to crash or otherwise terminate early")
params.addParam('executable_pattern', "A test that only runs if the exectuable name matches the given pattern")
params.addParam('walltime', "The max time as pbs understands it")
params.addParam('job_name', "The test name as pbs understands it")
params.addParam('no_copy', "The tests file as pbs understands it")
# Parallel/Thread testing
params.addParam('max_parallel', 1000, "Maximum number of MPI processes this test can be run with (Default: 1000)")
params.addParam('min_parallel', 1, "Minimum number of MPI processes that this test can be run with (Default: 1)")
params.addParam('max_threads', 16, "Max number of threads (Default: 16)")
params.addParam('min_threads', 1, "Min number of threads (Default: 1)")
params.addParam('allow_warnings', False, "If the test harness is run --error warnings become errors, setting this to true will disable this an run the test without --error");
# Valgrind
params.addParam('valgrind', 'NORMAL', "Set to (NONE, NORMAL, HEAVY) to determine which configurations where valgrind will run.")
params.addParam('post_command', "Command to be run after the MOOSE job is run")
return params
def __init__(self, name, params):
Tester.__init__(self, name, params)
if os.environ.has_key("MOOSE_MPI_COMMAND"):
self.mpi_command = os.environ['MOOSE_MPI_COMMAND']
self.force_mpi = True
else:
self.mpi_command = 'mpiexec -host localhost'
self.force_mpi = False
def checkRunnable(self, options):
if options.enable_recover:
if self.specs.isValid('expect_out') or self.specs['should_crash'] == True:
reason = 'skipped (expect_out RECOVER)'
return (False, reason)
if self.specs.isValid('executable_pattern') and re.search(self.specs['executable_pattern'], self.specs['executable']) == None:
reason = 'skipped (EXECUTABLE PATTERN)'
return (False, reason)
return (True, '')
def getCommand(self, options):
specs = self.specs
# Create the command line string to run
command = ''
# Check for built application
if not options.dry_run and not os.path.exists(specs['executable']):
print 'Application not found: ' + str(specs['executable'])
sys.exit(1)
if options.parallel == None:
default_ncpus = 1
else:
default_ncpus = options.parallel
if options.error and not specs["allow_warnings"]:
specs['cli_args'].append('--error')
timing_string = ' '
if options.timing:
specs['cli_args'].append('--timing')
if options.colored == False:
specs['cli_args'].append('--no-color')
if options.cli_args and not specs['skip_test_harness_cli_args']:
specs['cli_args'].insert(0, options.cli_args)
if options.scaling and specs['scale_refine'] > 0:
specs['cli_args'].insert(0, ' -r ' + str(specs['scale_refine']))
# Raise the floor
ncpus = max(default_ncpus, int(specs['min_parallel']))
# Lower the ceiling
ncpus = min(ncpus, int(specs['max_parallel']))
#Set number of threads to be used lower bound
nthreads = max(options.nthreads, int(specs['min_threads']))
#Set number of threads to be used upper bound
nthreads = min(nthreads, int(specs['max_threads']))
caveats = []
if nthreads > options.nthreads:
caveats.append('min_threads=' + str(nthreads))
elif nthreads < options.nthreads:
caveats.append('max_threads=' + str(nthreads))
# TODO: Refactor this caveats business
if ncpus > default_ncpus:
caveats.append('min_cpus=' + str(ncpus))
elif ncpus < default_ncpus:
caveats.append('max_cpus=' + str(ncpus))
if len(caveats) > 0:
self.specs['caveats'] = caveats
if self.force_mpi or options.parallel or ncpus > 1 or nthreads > 1:
command = self.mpi_command + ' -n ' + str(ncpus) + ' ' + specs['executable'] + ' --n-threads=' + str(nthreads) + ' ' + specs['input_switch'] + ' ' + specs['input'] + ' ' + ' '.join(specs['cli_args'])
elif options.valgrind_mode == specs['valgrind'] or options.valgrind_mode == 'HEAVY' and specs[VALGRIND] == 'NORMAL':
command = 'valgrind --suppressions=' + os.path.join(specs['moose_dir'], 'python', 'TestHarness', 'suppressions', 'errors.supp') + ' --leak-check=full --tool=memcheck --dsymutil=yes --track-origins=yes -v ' + specs['executable'] + ' ' + specs['input_switch'] + ' ' + specs['input'] + ' ' + ' '.join(specs['cli_args'])
else:
command = specs['executable'] + timing_string + specs['input_switch'] + ' ' + specs['input'] + ' ' + ' '.join(specs['cli_args'])
if options.pbs:
return self.getPBSCommand(options)
if self.specs.isValid('post_command'):
command += ';\n'
command += self.specs['post_command']
return command
def getPBSCommand(self, options):
if options.parallel == None:
default_ncpus = 1
else:
default_ncpus = options.parallel
# Raise the floor
ncpus = max(default_ncpus, int(self.specs['min_parallel']))
# Lower the ceiling
ncpus = min(ncpus, int(self.specs['max_parallel']))
#Set number of threads to be used lower bound
nthreads = max(options.nthreads, int(self.specs['min_threads']))
#Set number of threads to be used upper bound
nthreads = min(nthreads, int(self.specs['max_threads']))
extra_args = ''
if options.parallel or ncpus > 1 or nthreads > 1:
extra_args = ' --n-threads=' + str(nthreads) + ' ' + ' '.join(self.specs['cli_args'])
# Append any extra args to the cluster_launcher
if extra_args != '':
self.specs['cli_args'] = extra_args
else:
self.specs['cli_args'] = ' '.join(self.specs['cli_args'])
self.specs['cli_args'] = self.specs['cli_args'].strip()
# Open our template. This should probably be done at the same time as cluster_handle.
template_script = open(os.path.join(self.specs['moose_dir'], 'python', 'TestHarness', 'pbs_template.i'), 'r')
content = template_script.read()
template_script.close()
# Convert MAX_TIME to hours:minutes for walltime use
hours = int(int(self.specs['max_time']) / 3600)
minutes = int(int(self.specs['max_time']) / 60) % 60
self.specs['walltime'] = '{0:02d}'.format(hours) + ':' + '{0:02d}'.format(minutes) + ':00'
# Truncate JOB_NAME. PBS can only accept 13 character (6 characters from test name + _## (test serial number) + _### (serialized number generated by cluster_launcher) = the 13 character limit)
self.specs['job_name'] = self.specs['input'][:6] + '_' + str(options.test_serial_number).zfill(2)
self.specs['job_name'] = self.specs['job_name'].replace('.', '')
self.specs['job_name'] = self.specs['job_name'].replace('-', '')
# Convert TEST_NAME to input tests file name (normally just 'tests')
self.specs['no_copy'] = options.input_file_name
# Do all of the replacements for the valid parameters
for spec in self.specs.valid_keys():
if spec in self.specs.substitute:
self.specs[spec] = self.specs.substitute[spec].replace(spec.upper(), self.specs[spec])
content = content.replace('<' + spec.upper() + '>', str(self.specs[spec]))
# Make sure we strip out any string substitution parameters that were not supplied
for spec in self.specs.substitute_keys():
if not self.specs.isValid(spec):
content = content.replace('<' + spec.upper() + '>', '')
# Write the cluster_launcher input file
options.cluster_handle.write(content + '\n')
return os.path.join(self.specs['moose_dir'], 'scripts', 'cluster_launcher.py') + ' ' + options.pbs + '.cluster'
def processResults(self, moose_dir, retcode, options, output):
reason = ''
specs = self.specs
if specs.isValid('expect_out'):
if specs['match_literal']:
out_ok = self.checkOutputForLiteral(output, specs['expect_out'])
else:
out_ok = self.checkOutputForPattern(output, specs['expect_out'])
if (out_ok and retcode != 0):
reason = 'OUT FOUND BUT CRASH'
elif (not out_ok):
reason = 'NO EXPECTED OUT'
if reason == '':
# We won't pay attention to the ERROR strings if EXPECT_ERR is set (from the derived class)
# since a message to standard error might actually be a real error. This case should be handled
# in the derived class.
if options.valgrind_mode == '' and not specs.isValid('expect_err') and len( filter( lambda x: x in output, specs['errors'] ) ) > 0:
reason = 'ERRMSG'
elif retcode == RunParallel.TIMEOUT:
reason = 'TIMEOUT'
elif retcode == 0 and specs['should_crash'] == True:
reason = 'NO CRASH'
elif retcode != 0 and specs['should_crash'] == False:
reason = 'CRASH'
# Valgrind runs
elif retcode == 0 and options.valgrind_mode != '' and 'ERROR SUMMARY: 0 errors' not in output:
reason = 'MEMORY ERROR'
# PBS runs
elif retcode == 0 and options.pbs and 'command not found' in output:
reason = 'QSUB NOT FOUND'
return (reason, output)
def checkOutputForPattern(self, output, re_pattern):
if re.search(re_pattern, output, re.MULTILINE | re.DOTALL) == None:
return False
else:
return True
def checkOutputForLiteral(self, output, literal):
if output.find(literal) == -1:
return False
else:
return True
| waxmanr/moose | python/TestHarness/testers/RunApp.py | Python | lgpl-2.1 | 10,527 | [
"MOOSE"
] | e3fa525a09c7c250a49b5a69af861606ac292bcf9f25b6359c84178f29811216 |
####################################################################
# Class PROGRAM: contral External programs #
####################################################################
# External modules
import os,sys,signal
import re,shutil
import subprocess
import numpy
import time
# Internal modules
import auxfun as af
class PROGRAM:
def __init__(self):
self._ProgName =''
self._Command =''
self._ComPath =''
self._InputFile ={}
self._InputVar =[]
self._OutputFile={}
self._OutputVar ={}
self._BoundVar =[]
self._InFileID =[]
self._OutFileID =[]
self._InFilVar ={}
self._InRepVar ={}
self._InPosVar ={}
self._InLabVar ={}
self._InSLHAVar ={}
self._OutFileVar ={}
self._OutPosVar ={}
self._OutLabelVar ={}
self._OutSLHAVar ={}
self.invar = {}
self.outvar = {}
self.boundvar = {}
self.cgauvar = {}
self.cffchi2var= {}
self._Count = 0
self._executor = True
self._outputclean = True
self._timelimit = 60
def setProgName(self, name):
self._ProgName=name
af.Info('...............................................')
af.Info('Program name = %s'% self._ProgName)
def setCommand(self, command):
self._Command=af.string2nestlist(command)
af.Info('Execute command = %s'% self._Command)
def setComPath(self, cpath):
if cpath.startswith('/home') or cpath.startswith('~'):
self._ComPath=cpath
else:
self._ComPath=os.path.join(af.CurrentPath, cpath)
if not os.path.exists(self._ComPath):
af.ErrorStop('Command path "%s" do not exist.'%self._ComPath)
af.Info('Command path = %s'% self._ComPath)
def setInputFile(self, inputfile):
inputfile=af.string2nestlist(inputfile)
self._InFileID = [x[0] for x in inputfile ]
if self._InFileID != list(set(self._InFileID)):
af.ErrorStop('Input file in program "%s" have same File ID.'%self._ProgName)
af.Info('Input file = ')
for ii in inputfile:
if len(ii) != 2:
if ii[0] == '':
break
af.ErrorStop('The input file of %s need two items (File ID, File path).'%self._ProgName)
if not (ii[1].startswith('/home') or ii[1].startswith('~')):
ii[1]=os.path.join(af.CurrentPath, ii[1])
self._InputFile[ii[0]]=ii[1]
af.Info(' fileID= %s \tFile= %s'%(ii[0],ii[1]))
## check functions for whether contents in input variable and output variable in configure is matching with contents in corresponding input file and output file with methods "file", "position", "label", "slha""
def checkVar_file(self, fileID):
## For 'File' method
## check the input vars that use 'File' method
## file_flag stands for existing the checked input file which is created by user-self and not output by the previous program(s).
ii = fileID
file_flag = True
self._InFilVar[ii] = [x for x in self._InputVar if (x[1] == ii) and (x[2].lower() == 'file')]
for jj in self._InFilVar[ii]:
if len(jj) != 4 :
af.ErrorStop( 'For input variable "%s" in program "%s" with "File" method, 4 items (Name, FileID, "File", Method) need to be provived.'%(jj[0],self._ProgName) )
if not jj[3].upper() in ['PREVIOUS', 'SAVE', 'REPLACE', 'ADD']:
af.ErrorStop( 'For input variable "%s" in program "%s" with "File" method, the 4th item must be "PREVIOUS", "SAVE", "REPLACE" or "ADD". If you can use other formats, please contact with the authors.'%(jj[0], self._ProgName) )
if jj[3].upper() == "PREVIOUS":
file_flag = False
af.Info( 'Becasue file (ID=%s) in program "%s" is obtained from previous program(s), check this input file is ignored.'%(jj[1], self._ProgName))
af.Info(' Name= %s \tFileID= %s \t"File"= %s \tMethod %s'%(jj[0],jj[1],jj[2],jj[3]))
return file_flag
def checkVar_position(self, fileID, fileFlag, status):
## For 'Position' method
## check the input vars that use 'Position' method
ii=fileID
file_flag=fileFlag
if status.lower()=="invar":
self._InPosVar[ii] = [x for x in self._InputVar if (x[1] == ii) and (x[2].lower() == 'position')]
elif status.lower()=="outvar":
self._OutPosVar[ii] = [x for x in self._OutputVar if (x[1] == ii) and (x[2].lower() == 'position')]
else:
return
for jj in self._InPosVar[ii]:
if len(jj) != 5 :
af.ErrorStop('For input/output variable "%s" in program "%s" with "Position" method, 5 items ( Name ID, Input file ID, Method, Line number, Column number ) need to be provived.'%(jj[0], self._ProgName) )
af.Info(' varID= %s \t fileID= %s \t Method= %s \t Line num= %s \t Column num= %s'%(jj[0],jj[1],jj[2],jj[3],jj[4]) )
if file_flag:
## inlines is a list of all lines
## invars is a list of list of words for all lines
if status.lower()=="invar":
inlines = open(self._InputFile[ii]).readlines()
elif status.lower()=="outvar":
inlines = open(self._OutputFile[ii]).readlines()
else:
return
invar = [ss.split() for ss in inlines]
af.Debug('Position len(invar)',len(invar))
af.Debug('Position line num',jj[3])
if len(invar) < jj[3]:
af.ErrorStop('For input variable "%s" in program "%s" with "Position" method, the line number is larger than the number of lines of inoput file "%s". Please check your input file.'%(jj[0],self._ProgName,self._InputFile[ii]) )
af.Debug('Position len(invar[line num])',len(invar[jj[3]-1]))
af.Debug('Position Column number',jj[4])
if len(invar[jj[3]-1]) < jj[4]:
af.ErrorStop('For input variable "%s" in program "%s" with "Position" method, the column number is larger than the number of columns in line %s of input file "%s". Please check your input file.'%(jj[0],self._ProgName,jj[3],self._InputFile[ii]) )
def checkVar_label(self, fileID, fileFlag):
## For 'Label' method
## save the input vars that use 'Label' method
ii=fileID
file_flag=fileFlag
self._InLabVar[ii] = [x for x in self._InputVar if (x[1] == ii) and (x[2].lower() == 'label')]
for jj in self._InLabVar[ii]:
if len(jj) != 5 :
af.ErrorStop('For input variable "%s" in program "%s" with "Label" method, 5 items ( Name ID, Input file ID, Method, Label name, Input variable column number ) need to be provived.'%(jj[0],self._ProgName) )
if int(jj[4]) - jj[4] != 0 or jj[4] == 0:
af.ErrorStop('For input variable "%s" in program "%s" with "Label" method, the fourth item Input variable column number need to be an integer and not zero.'%(jj[0],self._ProgName) )
af.Info(' varID= %s \tfileID= %s \tMethod= %s \tLabel= %s \tColumn= %s'%(jj[0],jj[1],jj[2],jj[3],jj[4]))
if file_flag:
## inlines is a list of all lines
## invars is a list of list of words for all lines
inlines = open(self._InputFile[ii]).readlines()
invar = [ss.split() for ss in inlines]
## enumerate return object that could by use by for loop by parsing list where xxi is id and xx is value in list.
labelinum = [xxi for xxi,xx in enumerate(inlines) if jj[3] in xx]
if len(labelinum)==0:
af.ErrorStop( 'For input variable "%s" in program "%s" with "Label" method, there is no "%s" in the input file "%s". Please check your input file.'%(jj[0],self._ProgName,jj[3],self._InputFile[ii]) )
if len(labelinum)!=1:
af.ErrorStop( 'For input variable "%s" in program "%s" with "Label" method, there is more than one line with label "%s" in the input file "%s". Please check your input file.'%(jj[0],self._ProgName,jj[3],self._InputFile[ii]) )
for kk in labelinum:
af.Debug('Labeled line',inlines[kk].strip('\n'))
if len(invar[kk]) < jj[4]:
af.ErrorStop( 'For input variable "%s" in program "%s" with "Label" method, the column number "%i" defined by user is larger than the number of columns "%i" in the corresponding labeled line "%s" of input file "%s".'%(jj[0], self._ProgName, jj[4], len(invar[labelinum[0]]), invar[labelinum[0]], self._InputFile[ii]) )
if jj[4] > 0 and invar[kk][int(jj[4]-1)] == jj[3]:
af.ErrorStop( 'For input variable "%s" in program "%s" with "Label" method, the extracting item with column ID "%i" is just the label "%s" in the corresponding labeled line "%s" of input file "%s".'%(jj[0], self._ProgName, jj[4], jj[3], invar[labelinum[0]], self._InputFile[ii]) )
if jj[4] < 0 and invar[kk][int(jj[4])] == jj[3]:
af.ErrorStop( 'For input variable "%s" in program "%s" with "Label" method, the ex tracting item with column ID "%i" is just the label "%s" in the corresponding labeled line "%s" of input file "%s".'%(jj[0], self._ProgName, jj[4], jj[3], invar[labelinum[0]], self._InputFile[ii]) )
def checkVar_slha(self, fileID, fileFlag):
## For 'SLHA' method
## check the input vars that use 'SLHA' method
ii=fileID
file_flag=fileFlag
self._InSLHAVar[ii] = [x for x in self._InputVar if (x[1] == ii) and (x[2].lower() == 'slha')]
for jj in self._InSLHAVar[ii]:
if len(jj) < 6 :
af.ErrorStop('For input variable "%s" in program "%s" with "SLHA" method, at least 6 items ( Name ID, Input file ID, Method, BLOCK/DECAY, Block name/PDG, Keys) need to be provived.'%(jj[0],self._ProgName) )
if not jj[3].upper() in ['BLOCK','DECAY']:
af.ErrorStop( 'For input variable "%s" in program "%s" with "SLHA" method, the 4th item must be "BLOCK" or "DECAY". If you can use other formats, please contact with the authors.'%(jj[0],self._ProgName) )
af.Info(' varID= %s \t fileID= %s \t Method= %s \t Block/Decay= %s \tName= %s \tKeys= %s'%(jj[0], jj[1], jj[2], jj[3], jj[4], jj[5:]))
if file_flag:
## inlines is a list of all lines
## invars is a list of list of words for all lines
inlines = open(self._InputFile[ii]).readlines()
invar = [ss.split() for ss in inlines]
## list[i:], begin with i to the end involved; list[i:j], begin with i to the end j but not involved.
## string.split() could get list with seperated string] ## jj[4] may be \"a\" or \"a b\"
blk = str(jj[4]).split()
blk_flag = False
#ks = str(jj[5]).split()
ks = list(map(str, jj[5:]))
ks_flag = False
for kk in invar:
if len(kk)==0: continue
if kk[0].startswith('#'): continue
if blk_flag:
## quick break for loop if no data in block (but not effect on decay, because decay could not have no data)
## quick break for loop if finished iterating over data in block or decay
## if there are two same block or decay, only return data in the first one.
if kk[0].upper() in ['BLOCK','DECAY']:
break
## smart select condition, if len(kk) < len(jj)-4, this line in SLHA file could not give desired info in any case.
if len(kk) < len(jj)-4:
continue
if jj[3].upper() == 'BLOCK' and ''.join(ks) == ''.join(kk[0:len(ks)]):
ks_flag = True
af.Debug('SLHA match data line', kk)
if jj[3].upper() == 'DECAY' and ''.join(ks) == ''.join(kk[1:len(ks)+1]):
ks_flag = True
af.Debug('SLHA match data line', kk)
if jj[3].upper() == kk[0].upper() and ''.join(blk) == ''.join(kk[1:len(blk)+1]) :
blk_flag = True
af.Debug('SLHA match line',kk)
if jj[3].upper() == 'DECAY' and jj[5] == 0:
if len(kk) < 3 :
af.ErrorStop( 'For input variable "%s" in program "%s" with "SLHA" method, there are only %i column is the head line of "%s %s" in the input file.'%(jj[0],self._ProgName,len(kk),jj[3],jj[4],self._InputFile[ii]) )
else:
af.Debug('SLHA match data line', kk)
ks_flag = True
break
if not blk_flag:
af.ErrorStop( 'For input variable "%s" in program "%s" with "SLHA" method, can not find "%s %s" in the input file "%s".'%(jj[0], self._ProgName, jj[3], jj[4], self._InputFile[ii]) )
if not ks_flag:
af.ErrorStop( 'For input variable "%s" in program "%s" with "SLHA" method, can not find required line with key "%s" in "%s %s" of the input file "%s".'%(jj[0], self._ProgName, jj[5:], jj[3], jj[4], self._InputFile[ii]) )
def checkVar_replace(self, fileID, fileFlag):
## For 'Replace' method
## check the input vars that use 'Replace' method
ii=fileID
file_flag=fileFlag
## if the input file is not obtained from previous program(s), open the file and check it.
if file_flag:
try :
## Note infile and infile_bk stands for content of file
infile = open(self._InputFile[ii]).read()
## if 'Replace' method is used and the last run of the same program stop by accident, there will exist a ESbackup file which is generated by easyscan and contains the replaced item.
if os.path.exists(self._InputFile[ii]+'.ESbackup'):
try:
infile_bk = open(self._InputFile[ii]+'.ESbackup').read()
BackupFlag = True
except:
BackupFlag = False
else:
BackupFlag = False
## Stop if easyscan could not read input file or input file.ESbackup.
except:
af.ErrorStop('Can not find/open the input file "%s" in program "%s".'%(self._InputFile[ii],self._ProgName))
self._InRepVar[ii] = [x for x in self._InputVar if (x[1] == ii) and (x[2].lower() == 'replace')]
for jj in self._InRepVar[ii]:
if len(jj) != 4 :
af.ErrorStop( 'For input variable "%s" in program "%s" with "Replace" method, 4 items ( Name ID, Input file ID, Method, Name of replaced parameter ) need to be provived.'%(jj[0], self._ProgName) )
af.Info(' varID= %s \tfileID= %s \tMethod= %s \tName= %s'%(jj[0],jj[1],jj[2],jj[3]))
if file_flag:
## re.findall used for returning a list of matching object.
match = re.findall(r"\b%s\b"%jj[3], infile)
if len(match)==0:
if not BackupFlag:
af.ErrorStop( 'For input variable "%s" in program "%s" with "Replace" method, can not find "%s" in coressponding input file "%s" and its ESbackup file.'%(jj[0],self._ProgName,jj[3],self._InputFile[ii]) )
else:
bk_match = re.findall(r"\b%s\b"%jj[3], infile_bk)
if len(bk_match)==0:
af.ErrorStop( 'For input variable "%s" in program "%s" with "Replace" method, can not find "%s" in coressponding input file "%s" and its ESbackup file.'%(jj[0], self._ProgName, jj[3], self._InputFile[ii]) )
else:
## the input file is wrong, use the ESbackup file
infile = infile_bk
match = re.findall(r"\b%s\b"%jj[3], infile)
## there is no backup file for the following par
BackupFlag = False
af.WarningNoWait( 'For input variable "%s" in program "%s" with "Replace" method, the input file "%s" does not contain "%s", but ESbackup file exists and contain it. In the following, ESbackup file will replace the content of the original input file.'%(jj[0],self._ProgName,self._InputFile[ii],jj[3]) )
if len(match)>1:
af.WarningNoWait( 'For input variable "%s" in program "%s" with "Replace" method, find %i "%s" in coressponding input file "%s". They will all be replaced by variable "%s" in the following program.'%(jj[0],self._ProgName,len(match),jj[3],self._InputFile[ii],jj[0]) )
## if the fist var do not use Backup file, the next var can not use
## if the fist var use Backup, BackupFlag is already False
BackupFlag = False
## auto backup input file which involving replaced items, because the replaced items in origin input file would be replaced by values.
if file_flag:
if len(self._InRepVar[ii])>0:
open(self._InputFile[ii]+'.ESbackup','w').write(infile)
open(self._InputFile[ii],'w').write(infile)
def setInputVar(self, inputvar):
## inputvar is a string of all content in input variable section of configure file
self._InputVar=af.string2nestlist(inputvar)
for ii in self._InputVar:
if len(ii) <4:
if ii[0] == '': ## Program can have zero input parameters
return
af.ErrorStop( 'The input variables in program "%s" must have at least 4 items (Name ID, Input file ID, Method, Note).'%self._ProgName )
## self._InFileID is file ID.
if not ii[1] in self._InFileID:
af.ErrorStop( 'For input variable "%s" in program "%s", There is no input file with ID "%s"'%(ii[0],self._ProgName, ii[1]) )
## add for "math ..." in "Input variable" in [programX]
self.invar[ii[0]] = af.NaN
af.Info('Input variable = ')
#file_flag = True
for ii in self._InFileID:
## For 'File' method
## check the input vars that use 'File' method
file_flag=self.checkVar_file(ii)
## For 'Replace' method
## check the input vars that use 'Replace' method
self.checkVar_replace(ii, file_flag)
## For 'Position' method
## check the input vars that use 'Position' method
self.checkVar_position(ii, file_flag, "invar")
## For 'Label' method
## check the input vars that use 'Label' method
self.checkVar_label(ii, file_flag)
## For 'SLHA' method
## check the input vars that use 'SLHA' method
self.checkVar_slha(ii, file_flag)
def setOutputFile(self, outputfile):
if outputfile=='':
af.Debug('No OutFile in program %s'%self._ProgName)
self._OutFileID = []
return
outputfile=af.string2nestlist(outputfile)
self._OutFileID = [x[0] for x in outputfile ]
af.Debug('OutFileID',self._OutFileID)
af.Info('Output file = ')
for ii in outputfile:
if not (ii[1].startswith('/home') or ii[1].startswith('~')):
ii[1] = os.path.join(af.CurrentPath, ii[1])
self._OutputFile[ii[0]]=ii[1]
af.Info(' ID= %s \tFile= %s'%(ii[0],ii[1]))
def setOutputVar(self, outputvar):
if len(self._OutFileID)==0:
af.Debug('No OutFile for program %s'%self._ProgName)
return
outputvar=af.string2nestlist(outputvar)
for ii in outputvar:
if ii[1] not in self._OutFileID:
af.ErrorStop( 'ID of output variable "%s" in program "%s" is wrong.'%(ii[0],self._ProgName, ii[1]))
if ii[2].upper() not in ['FILE', 'POSITION', 'LABEL', 'SLHA']:
af.ErrorStop( 'Method "%s" of output variable "%s" in program "%s" is not supported'%(ii[2],ii[0],self._ProgName))
self.outvar[ii[0]] = af.NaN
af.Info('Output variable = ')
for ii in self._OutFileID:
self._OutputVar[ii] = [x for x in outputvar if (x[1] == ii) and (x[2].lower() != 'file')]
## For 'File' method
self._OutFileVar[ii] = [x for x in outputvar if (x[1] == ii) and (x[2].lower() == 'file')]
if len(self._OutFileVar[ii])>1:
af.ErrorStop( 'In program "%s", there is no need to use more than one vars to stand the output file "%s" where you use %s.'%(self._ProgName, self._OutputFile[ii],' '.join(self._OutFileVar[ii])) )
for jj in self._OutFileVar[ii]:
if len(jj) != 4 :
af.ErrorStop( 'For output variable "%s" in program "%s" with "File" method, 4 items ( Name, FileID, "File", Method ) need to be provived.'%(jj[0],self._ProgName) )
if jj[3].upper() != 'SAVE' :
af.ErrorStop( 'For output variable "%s" in program "%s" with "File" method, 4 items ( Name, FileID, "File", Method ) need to be provived. Note Method=save only supporting now.'%(jj[0],self._ProgName) )
af.Info(' varID= %s \tfileID= %s \t"File"= %s Method=%s'%(jj[0],jj[1],jj[2],jj[3]))
## For 'Position' method
self._OutPosVar[ii] = [x for x in outputvar if (x[1] == ii) and (x[2].lower() == 'position')]
for jj in self._OutPosVar[ii]:
if len(jj) != 5 :
af.ErrorStop( 'For output variable "%s" in program "%s" with "Position" method, 5 items ( Name ID, Input file ID, Method, Line number, Column number ) need to be provived.'%(jj[0],self._ProgName) )
if int(jj[4]) - jj[4] != 0 or jj[4] == 0:
af.ErrorStop('For output variable "%s" in program "%s" with "Label" method, the fourth item Input variable column number need to be an integer and not zero.'%(jj[0], self._ProgName) )
af.Info(' varID= %s \tfileID= %s \tMethod= %s \tLine num= %s \tColumn num= %s'%(jj[0],jj[1],jj[2],jj[3],jj[4]))
## For 'label' method
self._OutLabelVar[ii] = [x for x in outputvar if (x[1] == ii) and (x[2].lower() == 'label')]
for jj in self._OutLabelVar[ii]:
if len(jj) != 5 :
af.ErrorStop( 'For output variable "%s" in program "%s" with "Label" method, 5 items ( Name ID, Input file ID, Method, Label name, Input variable column number ) need to be provived.'%(jj[0],self._ProgName) )
af.Info(' varID= %s \tfileID= %s \tMethod= %s \tLabel= %s \tColumn= %s'%(jj[0],jj[1],jj[2],jj[3],jj[4]))
## For 'slha' method
self._OutSLHAVar[ii] = [x for x in outputvar if (x[1] == ii) and (x[2].lower() == 'slha')]
for jj in self._OutSLHAVar[ii]:
if len(jj) < 6 :
af.ErrorStop( 'For output variable "%s" in program "%s" with "SLHA" method, at least 6 items ( Name ID, Input file ID, Method, BLOCK/DECAY, Block name/PDG, Keys) need to be provived.'%(jj[0],self._ProgName) )
if not jj[3].upper() in ['BLOCK','DECAY']:
af.ErrorStop( 'For input variable "%s" in program "%s" with "SLHA" method, the 4th item must be "BLOCK" or "DECAY". If you can to used other formats, please contact with the authors.'%(jj[0],self._ProgName) )
af.Info(' varID= %s \tfileID= %s \tMethod= %s \tB/D= %s \tName= %s \tKeys= %s'%(jj[0],jj[1],jj[2],jj[3],jj[4],jj[5]))
def setExecutor(self, executor):
if executor.lower() == 'os.system':
self._executor = True
af.Info('Use "%s" execute commands.'%executor)
elif executor.lower() == 'subprocess.popen':
self._executor = False
af.Info('Use "%s" execute commands.'%executor)
else:
af.Info('The command executor for program "%s" should be either "os.system" or "subprocess.popen", not "%s".'%(self._ProgName,executor))
self._executor = True
af.WarningNoWait('Use "os.system" execute commands.')
def setOutputClean(self, outputclean):
if outputclean.lower() in ['yes','y','t','true']:
self._outputclean = True
af.Info('Delete the output file of program "%s" before execute it. '%self._ProgName)
elif outputclean.lower() in ['no','n','f','false']:
self._outputclean = False
af.Info('Keep the output file of program "%s" before execute it. '%self._ProgName)
else:
af.WarningNoWait('The item "Output clean" for program "%s" should be either "Yes" or "No", not "%s".'%(self._ProgName,outputclean))
self._outputclean = True
af.Info('Delete the output file of program "%s" before execute it. '%self._ProgName)
def setTimeLimit(self, timelimit):
self._executor = False
self._timelimit = timelimit
af.Info('Time limit = %i minutes.'%self._timelimit)
def getProgName(self):
return self._ProgName
def getCommand(self):
return self._Command
def getComPath(self):
return self._ComPath
def getInputFile(self):
return self._InputFile
def getInputVar(self):
return self._InputVar
def getOutputFile(self):
return self._OutputFile
def getOutputVar(self):
return self._OutputVar
def WriteInputFile(self,par):
if self._InFileID ==['']:
return
af.parseMath(par)
## self._InFileID is list of file ID in Input file in [programX]
for ii in self._InFileID:
## For 'File' method
#file_flag = False
file_flag = True
for jj in self._InFilVar[ii]:
#file_flag = True
if jj[3].lower()=='previous':
file_flag = False
## "save" not support now
elif jj[3].lower()=='save':
pass
elif jj[3].lower()=='replace':
af.Debug("For program",self._ProgName)
af.Debug("Copied file",par[jj[0]])
af.Debug("Copy file",self._InputFile[ii])
shutil.copy(par[jj[0]],self._InputFile[ii])
else:
try:
open(self._InputFile[ii],'a').write( open(par[jj[0]].read()) )
except:
af.ErrorStop('Can not open input file "%s" or "%s" in program "%s", by "%s".'%(self._InputFile[ii], par[jj[0]], self._ProgName, self._InFilVar[ii]))
## Open the input file
if file_flag:
try:
if len(self._InRepVar[ii])>0:
infile = open(self._InputFile[ii]+'.ESbackup','r').read()
else:
infile = open(self._InputFile[ii],'r').read()
except:
af.ErrorStop('Can not open the input file "%s" in program "%s".'%(self._InputFile[ii], self._ProgName))
else:
try:
infile = open(self._InputFile[ii],'r').read()
except:
af.ErrorStop('Can not open the input file "%s" in program "%s", which is obtained from previous program(s).'%(self._InputFile[ii],self._ProgName))
## For 'Replace' method
for jj in self._InRepVar[ii]:
#if file_flag:
if True:
match = re.findall(r"\b%s\b"%jj[3],infile)
if len(match)==0:
af.ErrorStop('For input variable "%s" in program "%s" with "Replace" method, can not find "%s" in coressponding input file "%s", which is obtained from previous program(s).'%(jj[0],self._ProgName,jj[3],self._InputFile[ii]) )
## jj[3] is something being replaced and par is a dictionary and par[jj[0]] (value) will replace jj[3].
## "\b" will make sure ES_lam in ES_lamT would not be replaced
infile = re.sub(r"\b%s\b"%jj[3],str(par[jj[0]]),infile)
if len(self._InRepVar[ii])>0:
open(self._InputFile[ii],'w').write(infile)
## inlines is a list of all lines
## invars is a list of list of words for all lines
inlines = open(self._InputFile[ii]).readlines()
## new 20180425 liang
#invar = [ss.split() for ss in inlines]
invar = [re.split(r'[ \t,]+', ss.strip()) for ss in inlines]
## invarNotModify for giving beutiful file as same as possible compared to the original input file.
## new 20180425 liang
#invarNotModify = [ss.split() for ss in inlines]
invarNotModify = [re.split(r'[ \t,]+', ss.strip()) for ss in inlines]
## if return, only support "replace" method
#return
## For 'Position' method
## self_InPosVar[ii] is a list of input variable with position method
for jj in self._InPosVar[ii]:
invar[jj[3]-1][jj[4]-1] = str(par[jj[0]])
## For 'Label' method
for jj in self._InLabVar[ii]:
labelinum = [xxi for xxi,xx in enumerate(inlines) if jj[3] in xx]
for kk in labelinum:
if jj[4] > 0:
invar[kk][int(jj[4]-1)] = str(par[jj[0]])
else:
invar[kk][int(jj[4])] = str(par[jj[0]])
## For 'SLHA' method
for jj in self._InSLHAVar[ii]:
blk = str(jj[4]).split()
blk_flag = False
ks = list(map(str, jj[5:]))
ks_flag = False
for kki, kk in enumerate( invar ):
if kk[0].startswith('#'): continue
if blk_flag:
if kk[0].upper() in ['BLOCK','DECAY']:
break
if len(kk) < len(jj)-4:
continue
if jj[3].upper() == 'BLOCK' and ''.join(ks) == ''.join(kk[0:len(ks)]):
ks_flag = True
invar[kki][len(ks)]=str(par[jj[0]])
if jj[3].upper() == 'DECAY' and ''.join(ks) == ''.join(kk[1:len(ks)+1]):
ks_flag = True
invar[kki][0]=str(par[jj[0]])
if jj[3].upper() == kk[0].upper() and ''.join(blk) == ''.join(kk[1:len(blk)+1]) :
blk_flag = True
if jj[3].upper() == 'DECAY' and jj[5] == 0:
invar[kki][2]=str(par[jj[0]])
ks_flag = True
break
## input file with replacing value, line not changed is same as origin one in format.
outlines=[]
for xxi,xx in enumerate(inlines):
if invar[xxi] == invarNotModify[xxi]:
outlines.append( inlines[xxi] )
else:
## keep format unchanged 20180512
patt=re.compile(r'[ \t,\n]+')
joinList=patt.findall(inlines[xxi])
#print joinList, invar[xxi]; raw_input()
newList=[]
if len(joinList) == len(invar[xxi]):
for yyi in range(len(invar[xxi])):
newList.append(invar[xxi][yyi])
newList.append(joinList[yyi])
elif len(joinList)-1 == len(invar[xxi]):
for yyi in range(len(invar[xxi])):
newList.append(joinList[yyi])
newList.append(invar[xxi][yyi])
newList.append(joinList[-1])
else:
af.ErrorStop("Keep format unchanged Failed! Check src/program.py at Line about 559.")
#outlines.append( " ".join(invar[xxi])+"\n" )
outlines.append( "".join(newList))
open(self._InputFile[ii],'w').writelines(outlines)
def RunProgram(self):
af.Debug('Be about to run Program %s'%self._ProgName)
cwd=self._ComPath
# remove output file
if self._outputclean:
self.RemoveOutputFile()
for cmd in self._Command:
af.Debug('Runing Program %s with command'%self._ProgName,cmd)
if self._executor:
ncwd = os.getcwd()
os.chdir(cwd)
os.system(cmd[0])
os.chdir(ncwd)
else:
try:
t_begining = time.time()
process=subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, cwd=cwd, preexec_fn=os.setpgrp,
shell=True)
while process.poll() == None:
process.stdout.flush()
output = process.stdout.readline()
if output:
print(output.strip())
seconds_passed = time.time() - t_begining
if seconds_passed > self._timelimit*60:
os.killpg(os.getpgid(process.pid), signal.SIGTERM)
af.WarningWait("Program %s has been running more than %f minutes, It will be killed."%(self._ProgName, self._timelimit))
return
except OSError as error:
if cwd and not os.path.exists(cwd):
raise Exception('Directory %s doesn\'t exist. Impossible to run' % cwd)
else:
error_text = 'Impossible to run \'%s\'\n' % cmd
error_text += ' in the directory \'%s\'\n' % cwd
error_text += 'Trying to recover from:\n'
error_text += ' %s\n' % str(error)
raise Exception(err)
# print p.returncode
#
# if p.returncode:
# error_msg = 'The run \'%s\' fails with following message:\n' % cmd
# error_msg += ' '+out.replace('\n','\n ')+'\n'
# error_msg += 'Please try to fix this issue and retry.\n'
# error_msg += 'Help might be found at mail@mail.com\n'
#
# raise Exception(error_msg)
#
# return p.returncode
def RemoveOutputFile(self):
for ii in self._OutFileID:
af.Debug('Remove remaining output file %s before running program %s'%(self._OutputFile[ii], self._ProgName))
if not os.path.exists(self._OutputFile[ii]):
af.Debug('No remaining output file %s for program %s'%(self._OutputFile[ii], self._ProgName))
return False
else:
os.remove(self._OutputFile[ii])
af.Debug('Successful remaining output file %s for program %s'%(self._OutputFile[ii], self._ProgName))
def ReadOutputFile(self,par,path):
for ii in self._OutFileID:
if not os.path.exists(self._OutputFile[ii]):
af.Debug('No output file "%s" for program %s'%(self._OutputFile[ii], self._ProgName))
return False
## For 'File' method
for jj in self._OutFileVar[ii]:
par[jj[0]] = self._OutputFile[ii]
if len(self._OutPosVar[ii])+ len(self._OutLabelVar[ii]) + len(self._OutSLHAVar[ii])>0 :
oulines = open(self._OutputFile[ii]).readlines()
ouvar = [re.split(r'[ \t,]+', ss.strip()) for ss in oulines]
## For 'Position' method
for jj in self._OutPosVar[ii]:
try:
par[jj[0]] = af.autotype(ouvar[jj[3]-1][jj[4]-1])
af.Debug('Output - %s='%jj[0],par[jj[0]])
except:
af.Info('Can not read the output var %s'%jj)
return False
## For 'Label' method
for jj in self._OutLabelVar[ii]:
labeline = [xx for xx in oulines if re.search(str(jj[3]),xx)]
if len(labeline)>1:
af.ErrorStop( 'For output variable "%s" in program "%s" with "Label" method, there is %d "%s" in output file "%s". Please choose other method.'%(jj[0],self._ProgName,len(labelinum),jj[3],self._OutputFile[ii]) )
try:
## new 20180425 liang
#par[jj[0]] = float(re.split(r'[ \t]+',labeline[0].strip())[int(jj[4]-1)])
# if jj[4] > 0:
# par[jj[0]] = re.split(r'[ \t]+',labeline[0].strip())[int(jj[4]-1)]
# else:
# par[jj[0]] = re.split(r'[ \t]+',labeline[0].strip())[int(jj[4])]
# TODO: why jj[4] <= 0?
par[jj[0]] = af.autotype(re.split(r'[ \t]+',labeline[0].strip())[int(jj[4]-1)])
af.Debug('Output - %s='%jj[0],par[jj[0]])
except:
af.Debug('Can not read the output var',jj[0])
return False
## For 'SLHA' method
for jj in self._OutSLHAVar[ii]:
blk = str(jj[4]).split()
blk_flag = False
ks = list(map(str, jj[5:]))
ks_flag = False
for kki, kk in enumerate( ouvar ):
if len(kk)==0: continue
if kk[0].startswith('#'): continue
if blk_flag:
if kk[0].upper() in ['BLOCK','DECAY']:
break
if len(kk) < len(jj)-4:
continue
if jj[3].upper() == 'BLOCK' and ''.join(ks) == ''.join(kk[0:len(ks)]):
ks_flag = True
par[jj[0]] = af.autotype(ouvar[kki][len(ks)])
if jj[3].upper() == 'DECAY' and ''.join(ks).replace('.0','') == ''.join(kk[1:len(ks)+1]):
ks_flag = True
par[jj[0]] = af.autotype(ouvar[kki][0])
if jj[3].upper() == kk[0].upper() and ''.join(blk) == ''.join(kk[1:len(blk)+1]) :
blk_flag = True
if jj[3].upper() == 'DECAY' and jj[5] == 0:
if len(kk) < 3 :
af.Debug('Can not read the output var',jj)
return False
else:
par[jj[0]]=af.autotype(ouvar[kki][2])
ks_flag = True
break
af.Debug('Output - %s='%jj[0],par[jj[0]])
if not ks_flag:
if jj[3].upper() == 'DECAY':
af.Debug('Can not read the output var',jj)
af.Debug('In DECAY mode, set it as zero!')
par[jj[0]]=0
else:
af.Debug('Can not read the output var',jj)
return False
return True
def Recover(self):
for ii in self._InFileID:
if (ii!= '') and os.path.isfile(self._InputFile[ii]+".ESbackup"):
os.system("mv %s.ESbackup %s" %(self._InputFile[ii],self._InputFile[ii]))
## new function to use "math .." in [constrain]
## in order to add new variable in self.AllPar
def setGaussian(self,var):
var = af.string2nestlist(var)
af.Info('Gaussian Constraint:')
for ii in var:
if len(ii) in [3]:
pass
elif len(ii) in [4,5]:
if not ii[3].lower() in ['symm','lower','upper']:
af.ErrorStop( 'For the "Gaussian" constraint on "%s", the "Type" can only be "symm", "upper" or "lower", not "%s".'%(ii[0],ii[3]) )
else:
af.ErrorStop( 'The "Gaussian" constraint on "%s" need 4 or 5 items( VarID, Mean, Deviation, Type [, Name] ).'%(ii[0]) )
self.cgauvar[ii[0]] = af.NaN
## new 20180430 liang
def setFreeFormChi2(self,var):
var = af.string2nestlist(var)
af.Info('FreeFormChi2:')
for ii in var:
if (len(ii)==1 and ii[0] ) or len(ii)==2:
pass
else:
af.ErrorStop( 'The "FreeFormChi2" constraint on "%s" need 1 item or 2 items( VarID [, Name] ).'%(ii[0]) )
self.cffchi2var[ii[0]] = af.NaN
## for "Bound" in [programX]
def setBound(self, boundvar):
## boundvar is a string of all content in "Bound" in [programX] of configure file
self._BoundVar=af.string2nestlist(boundvar)
af.Info('Bound condition in program %s:'%self._ProgName)
for ii in self._BoundVar:
if len(ii) <3:
if ii[0] == '': ## Program can have zero input parameters
return
af.ErrorStop( 'The "Bound" in program "%s" must have at least 3 items.'%self._ProgName )
elif len(ii) == 3:
if ii[1] not in ["<=", ">=", ">", "<", "==", "!="]:
af.ErrorStop( 'The second item "%s" in "Bound" in program "%s" must be "<=", ">=", ">", "<", "==", "!=" or a real number at 3 items.'%(ii[1], self._ProgName) )
try:
float(ii[2])
except:
self.boundvar[ii[2]] = af.NaN
self.boundvar[ii[0]] = af.NaN
af.Info(' NameID= %s \tOperator= %s \tLimit= %s'%(ii[0],ii[1],ii[2]))
elif len(ii) in [4,5]:
if ii[2].lower() not in ["upper", "lower"]:
af.ErrorStop( 'The third item "%s" in "Bound" in program "%s" must be "lower" or "upper" at 4 items.'%(ii[2], self._ProgName) )
if not (ii[3].startswith('/home') or ii[3].startswith('~')):
ii[3]=os.path.join(af.CurrentPath, ii[3])
try:
open(ii[3])
except:
af.ErrorStop('Can not find/open the limit file "%s" in "Bound" in program "%s".'%(ii[3],self._ProgName))
try:
boundfile = numpy.loadtxt(ii[3])
except:
af.ErrorStop('Find string in the limit file "%s" in "Bound" in program "%s".'%(ii[3],self._ProgName))
try:
numpy.shape(boundfile)[1]
except:
af.ErrorStop('Only one row or column in the limit file "%s" in "Bound" in program "%s".'%(ii[3],self._ProgName))
if numpy.shape(boundfile)[1] < 2:
af.ErrorStop('Less than two columns in the limit file "%s" in "Bound" in program "%s".'%(ii[3],self._ProgName))
af.Info(' NameID= %s \tNameID= %s \tBound= %s \tBoundFile= %s'%(ii[0],ii[1],ii[2],ii[3]))
## new 20180429 liang
if len(ii) == 4:
jj = ii + ['Bound_%s_%s_%s'%(ii[0], ii[1], ii[2])]
else:
jj = ii
self.boundvar[jj[4]] = af.NaN
else:
af.ErrorStop( 'The "Bound" in program "%s" have at most 5 items.'%self._ProgName )
## for "Bound" in [programX]
## ReadBound() have survived conditions in SetBound()
def ReadBound(self,par):
## return if no bound
## If no bound, self._BoundVar = [['']], self._BoundVar[0][0]=''.
if not self._BoundVar:
return True
if not self._BoundVar[0][0]:
return True
## add for "math ..." in "Bound" in [programX]
af.parseMath(par)
## new 20180429 liang
phy=True
for ii in self._BoundVar:
if len(ii)== 3:
af.Debug('"%s=%f" compare to the limit "%s" in "Bound" for program %s'%(ii[0], par[ii[0]], ii[1:], self._ProgName))
try:
float(ii[2])
except:
phy = phy and eval("%f%s%f"%(par[ii[0]], ii[1], par[ii[2]]))
else:
phy = phy and eval("%f%s%s"%(par[ii[0]], ii[1], ii[2]))
elif len(ii) in [4,5]:
if len(ii) == 4:
jj = ii + ['Bound_%s_%s_%s'%(ii[0], ii[1], ii[2])]
else:
jj = ii
if not (ii[3].startswith('/home') or ii[3].startswith('~')):
ii[3]=os.path.join(af.CurrentPath, ii[3])
boundfile = numpy.loadtxt(ii[3])
x=boundfile[:,0]
y=boundfile[:,1]
if par[ii[0]] < numpy.amin(x) or par[ii[0]] > numpy.amax(x):
af.WarningNoWait('"%s" less(greater) than min(max) of the first column in limit file "%s" with method "%s" in "Bound" in program "%s".'%(ii[0], ii[3], ii[2], self._ProgName))
if ii[2].lower() == 'lower':
yinter = af.log_zero
elif ii[2].lower() == 'upper':
yinter = -1.0*af.log_zero
af.WarningNoWait(' So we set "%s=%e"'%(jj[4], yinter))
else:
yinter = numpy.interp(par[ii[0]], x, y)
par[jj[4]] = yinter
af.Debug('"x-axis: %s=%f, y-axis: %s=%f" compare to the %s limit "y-interplotion: %s=%f" by interplotion in "Bound" for program %s'%(ii[0], par[ii[0]], ii[1], par[ii[1]], ii[2].lower(), ii[1], yinter, self._ProgName))
if ii[2].lower() == "upper":
phy = phy and eval("%f%s%s"%(par[ii[1]], '<=', par[jj[4]]))
elif ii[2].lower() == "lower":
phy = phy and eval("%f%s%s"%(par[ii[1]], '>=', par[jj[4]]))
return phy
| phyzhangyang/EasyScan_HEP | src/program.py | Python | apache-2.0 | 47,915 | [
"Gaussian"
] | 13e27039fec751a656a68e3a8374bc094e8b411389e2cd999999ca58ea7fcdae |
# -*- coding: utf-8 -*-
"""
Copyright (C) 2010 Dariusz Suchojad <dsuch at zato.io>
Licensed under LGPLv3, see LICENSE.txt for terms and conditions.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
# stdlib
import os, random, stat
from collections import OrderedDict
from copy import deepcopy
from itertools import count
from uuid import uuid4
# Bunch
from bunch import Bunch
# Zato
from zato.cli import common_odb_opts, kvdb_opts, ca_create_ca, ca_create_lb_agent, ca_create_server, \
ca_create_web_admin, create_cluster, create_lb, create_odb, create_server, create_web_admin, ZatoCommand
from zato.common.defaults import http_plain_server_port
from zato.common.markov_passwords import generate_password
from zato.common.util import make_repr
random.seed()
DEFAULT_NO_SERVERS=2
# Taken from http://stackoverflow.com/a/246128
script_dir = """SOURCE="${BASH_SOURCE[0]}"
BASE_DIR="$( dirname "$SOURCE" )"
while [ -h "$SOURCE" ]
do
SOURCE="$(readlink "$SOURCE")"
[[ $SOURCE != /* ]] && SOURCE="$BASE_DIR/$SOURCE"
BASE_DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
done
BASE_DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
"""
sanity_checks_template = """$ZATO_BIN check-config $BASE_DIR/{server_name}"""
start_servers_template = """
cd $BASE_DIR/{server_name}
$ZATO_BIN start .
echo [{step_number}/$STEPS] {server_name} started
"""
zato_qs_start_head_template = """#!/bin/bash
set -e
export ZATO_CLI_DONT_SHOW_OUTPUT=1
{script_dir}
ZATO_BIN={zato_bin}
STEPS={start_steps}
CLUSTER={cluster_name}
echo Starting the Zato cluster $CLUSTER
echo Running sanity checks
"""
zato_qs_start_body_template = """
{sanity_checks}
echo [1/$STEPS] Redis connection OK
echo [2/$STEPS] SQL ODB connection OK
# Start the load balancer first ..
cd $BASE_DIR/load-balancer
$ZATO_BIN start .
echo [3/$STEPS] Load-balancer started
# .. servers ..
{start_servers}
"""
zato_qs_start_tail = """
# .. web admin comes as the last one because it may ask Django-related questions.
cd $BASE_DIR/web-admin
$ZATO_BIN start .
echo [$STEPS/$STEPS] Web admin started
cd $BASE_DIR
echo Zato cluster $CLUSTER started
echo Visit https://zato.io/support for more information and support options
exit 0
"""
stop_servers_template = """
cd $BASE_DIR/{server_name}
$ZATO_BIN stop .
echo [{step_number}/$STEPS] {server_name} stopped
"""
zato_qs_stop_template = """#!/bin/bash
export ZATO_CLI_DONT_SHOW_OUTPUT=1
{script_dir}
if [[ "$1" = "--delete-pidfiles" ]]
then
echo Deleting PID files
rm -f $BASE_DIR/load-balancer/pidfile
rm -f $BASE_DIR/load-balancer/zato-lb-agent.pid
rm -f $BASE_DIR/server1/pidfile
rm -f $BASE_DIR/server2/pidfile
rm -f $BASE_DIR/web-admin/pidfile
echo PID files deleted
fi
ZATO_BIN={zato_bin}
STEPS={stop_steps}
CLUSTER={cluster_name}
echo Stopping the Zato cluster $CLUSTER
# Start the load balancer first ..
cd $BASE_DIR/load-balancer
$ZATO_BIN stop .
echo [1/$STEPS] Load-balancer stopped
# .. servers ..
{stop_servers}
cd $BASE_DIR/web-admin
$ZATO_BIN stop .
echo [$STEPS/$STEPS] Web admin stopped
cd $BASE_DIR
echo Zato cluster $CLUSTER stopped
"""
zato_qs_restart = """#!/bin/bash
{script_dir}
cd $BASE_DIR
$BASE_DIR/zato-qs-stop.sh
$BASE_DIR/zato-qs-start.sh
"""
class CryptoMaterialLocation(object):
""" Locates and remembers location of various crypto material for Zato components.
"""
def __init__(self, ca_dir, component_pattern):
self.ca_dir = ca_dir
self.component_pattern = component_pattern
self.ca_certs_path = os.path.join(self.ca_dir, 'ca-material', 'ca-cert.pem')
self.cert_path = None
self.pub_path = None
self.priv_path = None
self.locate()
def __repr__(self):
return make_repr(self)
def locate(self):
for crypto_name in('cert', 'priv', 'pub'):
path = os.path.join(self.ca_dir, 'out-{}'.format(crypto_name))
for name in os.listdir(path):
full_path = os.path.join(path, name)
if '{}-{}'.format(self.component_pattern, crypto_name) in full_path:
setattr(self, '{}_path'.format(crypto_name), full_path)
################################################################################
class Create(ZatoCommand):
""" Quickly creates a working cluster
"""
needs_empty_dir = True
allow_empty_secrets = True
opts = deepcopy(common_odb_opts) + deepcopy(kvdb_opts)
opts.append({'name':'--cluster_name', 'help':'Name to be given to the new cluster'})
opts.append({'name':'--servers', 'help':'Number of servers to be created'})
def _bunch_from_args(self, args, cluster_name):
bunch = Bunch()
bunch.path = args.path
bunch.verbose = args.verbose
bunch.store_log = args.store_log
bunch.store_config = args.store_config
bunch.odb_type = args.odb_type
bunch.odb_host = args.odb_host
bunch.odb_port = args.odb_port
bunch.odb_user = args.odb_user
bunch.odb_db_name = args.odb_db_name
bunch.kvdb_host = args.kvdb_host
bunch.kvdb_port = args.kvdb_port
bunch.sqlite_path = getattr(args, 'sqlite_path', None)
bunch.postgresql_schema = getattr(args, 'postgresql_schema', None)
bunch.odb_password = args.odb_password
bunch.kvdb_password = args.kvdb_password
bunch.cluster_name = cluster_name
return bunch
def execute(self, args):
""" Quickly creates Zato components
1) CA and crypto material
2) ODB
3) ODB initial data
4) servers
5) load-balancer
6) Web admin
7) Scripts
"""
if args.odb_type == 'sqlite':
args.sqlite_path = os.path.abspath(os.path.join(args.path, 'zato.db'))
next_step = count(1)
next_port = count(http_plain_server_port)
cluster_name = getattr(args, 'cluster_name', None) or 'quickstart-{}'.format(random.getrandbits(20)).zfill(7)
servers = int(getattr(args, 'servers', 0) or DEFAULT_NO_SERVERS)
server_names = OrderedDict()
for idx in range(1, servers+1):
server_names['{}'.format(idx)] = 'server{}'.format(idx)
total_steps = 6 + servers
admin_invoke_password = uuid4().hex
broker_host = 'localhost'
broker_port = 6379
lb_host = 'localhost'
lb_port = 11223
lb_agent_port = 20151
args_path = os.path.abspath(args.path)
# This could've been set to True by user in the command-line so we'd want
# to unset it so that individual commands quickstart invokes don't attempt
# to store their own configs.
args.store_config = False
#
# 1) CA
#
ca_path = os.path.join(args_path, 'ca')
os.mkdir(ca_path)
ca_args = self._bunch_from_args(args, cluster_name)
ca_args.path = ca_path
ca_create_ca.Create(ca_args).execute(ca_args, False)
ca_create_lb_agent.Create(ca_args).execute(ca_args, False)
ca_create_web_admin.Create(ca_args).execute(ca_args, False)
server_crypto_loc = {}
for name in server_names:
ca_args_server = deepcopy(ca_args)
ca_args_server.server_name = server_names[name]
ca_create_server.Create(ca_args_server).execute(ca_args_server, False)
server_crypto_loc[name] = CryptoMaterialLocation(ca_path, '{}-{}'.format(cluster_name, server_names[name]))
lb_agent_crypto_loc = CryptoMaterialLocation(ca_path, 'lb-agent')
web_admin_crypto_loc = CryptoMaterialLocation(ca_path, 'web-admin')
self.logger.info('[{}/{}] Certificate authority created'.format(next_step.next(), total_steps))
#
# 2) ODB
#
if create_odb.Create(args).execute(args, False) == self.SYS_ERROR.ODB_EXISTS:
self.logger.info('[{}/{}] ODB schema already exists'.format(next_step.next(), total_steps))
else:
self.logger.info('[{}/{}] ODB schema created'.format(next_step.next(), total_steps))
#
# 3) ODB initial data
#
create_cluster_args = self._bunch_from_args(args, cluster_name)
create_cluster_args.broker_host = broker_host
create_cluster_args.broker_port = broker_port
create_cluster_args.lb_host = lb_host
create_cluster_args.lb_port = lb_port
create_cluster_args.lb_agent_port = lb_agent_port
create_cluster_args.admin_invoke_password = admin_invoke_password
create_cluster.Create(create_cluster_args).execute(create_cluster_args, False)
self.logger.info('[{}/{}] ODB initial data created'.format(next_step.next(), total_steps))
#
# 4) servers
#
for name in server_names:
server_path = os.path.join(args_path, server_names[name])
os.mkdir(server_path)
create_server_args = self._bunch_from_args(args, cluster_name)
create_server_args.server_name = server_names[name]
create_server_args.path = server_path
create_server_args.cert_path = server_crypto_loc[name].cert_path
create_server_args.pub_key_path = server_crypto_loc[name].pub_path
create_server_args.priv_key_path = server_crypto_loc[name].priv_path
create_server_args.ca_certs_path = server_crypto_loc[name].ca_certs_path
create_server.Create(create_server_args).execute(create_server_args, next_port.next(), False)
self.logger.info('[{}/{}] server{} created'.format(next_step.next(), total_steps, name))
#
# 5) load-balancer
#
lb_path = os.path.join(args_path, 'load-balancer')
os.mkdir(lb_path)
create_lb_args = self._bunch_from_args(args, cluster_name)
create_lb_args.path = lb_path
create_lb_args.cert_path = lb_agent_crypto_loc.cert_path
create_lb_args.pub_key_path = lb_agent_crypto_loc.pub_path
create_lb_args.priv_key_path = lb_agent_crypto_loc.priv_path
create_lb_args.ca_certs_path = lb_agent_crypto_loc.ca_certs_path
# Need to substract 1 because we've already called .next() twice
# when creating servers above.
servers_port = next_port.next() - 1
create_lb.Create(create_lb_args).execute(create_lb_args, True, servers_port, False)
self.logger.info('[{}/{}] Load-balancer created'.format(next_step.next(), total_steps))
#
# 6) Web admin
#
web_admin_path = os.path.join(args_path, 'web-admin')
os.mkdir(web_admin_path)
create_web_admin_args = self._bunch_from_args(args, cluster_name)
create_web_admin_args.path = web_admin_path
create_web_admin_args.cert_path = web_admin_crypto_loc.cert_path
create_web_admin_args.pub_key_path = web_admin_crypto_loc.pub_path
create_web_admin_args.priv_key_path = web_admin_crypto_loc.priv_path
create_web_admin_args.ca_certs_path = web_admin_crypto_loc.ca_certs_path
create_web_admin_args.admin_invoke_password = admin_invoke_password
password = generate_password()
admin_created = create_web_admin.Create(create_web_admin_args).execute(
create_web_admin_args, False, password, True)
# Need to reset the logger here because executing the create_web_admin command
# loads the web admin's logger which doesn't like that of ours.
self.reset_logger(args, True)
self.logger.info('[{}/{}] Web admin created'.format(next_step.next(), total_steps))
#
# 7) Scripts
#
zato_bin = 'zato'
zato_qs_start_path = os.path.join(args_path, 'zato-qs-start.sh')
zato_qs_stop_path = os.path.join(args_path, 'zato-qs-stop.sh')
zato_qs_restart_path = os.path.join(args_path, 'zato-qs-restart.sh')
sanity_checks = []
start_servers = []
stop_servers = []
for name in server_names:
sanity_checks.append(sanity_checks_template.format(server_name=server_names[name]))
start_servers.append(start_servers_template.format(server_name=server_names[name], step_number=int(name)+3))
stop_servers.append(stop_servers_template.format(server_name=server_names[name], step_number=int(name)+1))
sanity_checks = '\n'.join(sanity_checks)
start_servers = '\n'.join(start_servers)
stop_servers = '\n'.join(stop_servers)
start_steps = 4 + servers
stop_steps = 2 + servers
zato_qs_start_head = zato_qs_start_head_template.format(zato_bin=zato_bin, script_dir=script_dir, cluster_name=cluster_name, start_steps=start_steps)
zato_qs_start_body = zato_qs_start_body_template.format(sanity_checks=sanity_checks, start_servers=start_servers)
zato_qs_start = zato_qs_start_head + zato_qs_start_body + zato_qs_start_tail
zato_qs_stop = zato_qs_stop_template.format(zato_bin=zato_bin, script_dir=script_dir, cluster_name=cluster_name, stop_steps=stop_steps, stop_servers=stop_servers)
open(zato_qs_start_path, 'w').write(zato_qs_start)
open(zato_qs_stop_path, 'w').write(zato_qs_stop)
open(zato_qs_restart_path, 'w').write(zato_qs_restart.format(script_dir=script_dir, cluster_name=cluster_name))
file_mod = stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP
os.chmod(zato_qs_start_path, file_mod)
os.chmod(zato_qs_stop_path, file_mod)
os.chmod(zato_qs_restart_path, file_mod)
self.logger.info('[{}/{}] Management scripts created'.format(next_step.next(), total_steps))
self.logger.info('Quickstart cluster {} created'.format(cluster_name))
if admin_created:
self.logger.info('Web admin user:[admin], password:[{}]'.format(password))
else:
self.logger.info('User [admin] already exists in the ODB')
start_command = os.path.join(args_path, 'zato-qs-start.sh')
self.logger.info('Start the cluster by issuing the {} command'.format(start_command))
self.logger.info('Visit https://zato.io/support for more information and support options')
| AmrnotAmr/zato | code/zato-cli/src/zato/cli/quickstart.py | Python | gpl-3.0 | 14,479 | [
"VisIt"
] | 031fd049712fe48f2322cf8fd869b736d6db1b2c7eb90992519466cfdb74a2b7 |
"""
Module provides utility functions to manipulate netCDF dataset.
- classify variable types of coordinate, coordinate bounds, grid mapping, scientific data,
auxiliary coordinate
- show original metadata of a variable
Reference code
http://netcdf4-python.googlecode.com/svn/trunk/docs/netCDF4-module.html
"""
import re
from collections import OrderedDict
import osr
import netCDF4
import numpy
# Functions for General Purpose
def get_nc_dataset(nc_file_name):
"""
(string)-> object
Return: the netCDF dataset
"""
try:
nc_dataset = netCDF4.Dataset(nc_file_name, 'r')
except Exception:
nc_dataset = None
return nc_dataset
def get_nc_variable(nc_file_name, nc_variable_name):
"""
(string, string) -> obj
Return: netcdf variable by the given variable name
"""
if isinstance(nc_file_name, netCDF4.Dataset):
nc_dataset = nc_file_name
else:
nc_dataset = get_nc_dataset(nc_file_name)
if nc_variable_name in nc_dataset.variables.keys():
nc_variable = nc_dataset.variables[nc_variable_name]
else:
nc_variable = None
return nc_variable
def get_nc_variable_original_meta(nc_dataset, nc_variable_name):
"""
(object, string)-> OrderedDict
Return: netCDF variable original metadata information defined in the netCDF file
"""
nc_variable = nc_dataset.variables[nc_variable_name]
nc_variable_original_meta = OrderedDict([('dimension', str(nc_variable.dimensions)),
('shape', str(nc_variable.shape)),
('data_type', str(nc_variable.dtype))])
for key, value in nc_variable.__dict__.items():
nc_variable_original_meta[key] = str(value)
return nc_variable_original_meta
# Functions for coordinate information of the dataset
# The functions below will call functions defined for auxiliary, coordinate and bounds variables.
def get_nc_variables_coordinate_type_mapping(nc_dataset):
"""
(object)-> dict
Return: XC,YC,ZC,TC, Unknown_C for coordinate variable
XA, YA, ZA, TA Unknown_A for auxiliary variable
XC_bnd, YC_bnd, ZC_bnd, TC_bnd, Unknown_bnd for coordinate bounds variable
XA_bnd, YA_bnd, ZA_bnd, TA_bnd, Unknown_A_bnd for auxiliary coordinate bounds variable
"""
nc_variables_dict = {
"C": get_nc_coordinate_variables(nc_dataset),
"A": get_nc_auxiliary_coordinate_variables(nc_dataset)
}
nc_variables_coordinate_type_mapping = {}
for variables_type, variables_dict in nc_variables_dict.items():
for var_name, var_obj in variables_dict.items():
var_coor_type_name = get_nc_variable_coordinate_type(var_obj) + variables_type
nc_variables_coordinate_type_mapping[var_name] = var_coor_type_name
if hasattr(var_obj, 'bounds') and nc_dataset.variables.get(var_obj.bounds, None):
var_coor_bounds_type_name = var_coor_type_name+'_bnd'
nc_variables_coordinate_type_mapping[var_obj.bounds] = var_coor_bounds_type_name
return nc_variables_coordinate_type_mapping
def get_nc_variable_coordinate_type(nc_variable):
"""
(object)-> string
Return: One of X, Y, Z, T is assigned to variable.
If not discerned as X, Y, Z, T, Unknown is returned
"""
if hasattr(nc_variable, 'axis') and nc_variable.axis:
return nc_variable.axis
nc_variable_standard_name = getattr(nc_variable, 'standard_name',
getattr(nc_variable, 'long_name', None))
if nc_variable_standard_name:
compare_dict = {
'latitude': 'Y',
'longitude': 'X',
'time': 'T',
'projection_x_coordinate': 'X',
'projection_y_coordinate': 'Y'
}
for standard_name, coor_type in compare_dict.items():
if re.match(standard_name, nc_variable_standard_name, re.I):
return coor_type
if hasattr(nc_variable, 'positive'):
return 'Z'
if hasattr(nc_variable, 'units') and nc_variable.units:
if re.match('degree(s)?.e(ast)?', nc_variable.units, re.I):
return 'X'
elif re.match('degree(s)?.n(orth)?', nc_variable.units, re.I):
return 'Y'
else:
info = nc_variable.units.split(' ')
time_units = ['days', 'hours', 'minutes', 'seconds',
'milliseconds', 'microseconds'] # see python netcdf4
if len(info) >= 3 and (info[0].lower() in time_units) and info[1].lower() == 'since':
return 'T'
return 'Unknown'
def get_nc_variable_coordinate_meta(nc_dataset, nc_variable_name):
"""
(object)-> dict
Return: coordinate meta data if the variable is related to a coordinate type:
coordinate or auxiliary coordinate variable or bounds variable
"""
nc_variables_coordinate_type_mapping = get_nc_variables_coordinate_type_mapping(nc_dataset)
nc_variable_coordinate_meta = {}
if nc_variable_name in nc_variables_coordinate_type_mapping.keys():
nc_variable = nc_dataset.variables[nc_variable_name]
nc_variable_data = nc_variable[:]
nc_variable_coordinate_type = nc_variables_coordinate_type_mapping[nc_variable_name]
coordinate_max = None
coordinate_min = None
if nc_variable_data.size:
coordinate_min = nc_variable_data[numpy.unravel_index(nc_variable_data.argmin(),
nc_variable_data.shape)]
coordinate_max = nc_variable_data[numpy.unravel_index(nc_variable_data.argmax(),
nc_variable_data.shape)]
coordinate_units = nc_variable.units if hasattr(nc_variable, 'units') else ''
if nc_variable_coordinate_type in ['TC', 'TA', 'TC_bnd', 'TA_bnd']:
index = nc_variables_coordinate_type_mapping.values().index(
nc_variable_coordinate_type[:2])
var_name = nc_variables_coordinate_type_mapping.keys()[index]
var_obj = nc_dataset.variables[var_name]
time_units = var_obj.units if hasattr(var_obj, 'units') else ''
time_calendar = var_obj.calendar if hasattr(var_obj, 'calendar') else 'standard'
if time_units and time_calendar:
try:
coordinate_min = netCDF4.num2date(coordinate_min, units=time_units,
calendar=time_calendar)
coordinate_max = netCDF4.num2date(coordinate_max, units=time_units,
calendar=time_calendar)
coordinate_units = time_units
except Exception:
pass
nc_variable_coordinate_meta = {
'coordinate_type': nc_variable_coordinate_type,
'coordinate_units': coordinate_units,
'coordinate_start': coordinate_min,
'coordinate_end': coordinate_max
}
return nc_variable_coordinate_meta
# Functions for Coordinate Variable
# coordinate variable has the following attributes:
# 1) it has 1 dimension
# 2) its name is the same as its dimension name (COARDS convention)
# 3) coordinate variable sometimes doesn't represent the real lat lon time vertical info
# 4) coordinate variable sometimes has associated bound variable if it represents
# the real lat lon time vertical info
def get_nc_coordinate_variables(nc_dataset):
"""
(object)-> dict
Return netCDF coordinate variable
"""
nc_all_variables = nc_dataset.variables
nc_coordinate_variables = {}
for var_name, var_obj in nc_all_variables.items():
if len(var_obj.shape) == 1 and var_name == var_obj.dimensions[0]:
nc_coordinate_variables[var_name] = nc_dataset.variables[var_name]
return nc_coordinate_variables
def get_nc_coordinate_variable_namelist(nc_dataset):
"""
(object)-> list
Return netCDF coordinate variable names
"""
nc_coordinate_variables = get_nc_coordinate_variables(nc_dataset)
nc_coordinate_variable_namelist = nc_coordinate_variables.keys()
return nc_coordinate_variable_namelist
# Functions for Auxiliary Coordinate Variable
# auxiliary variable has the following attributes:
# 1) it is used when the variable dimensions are not representing the lat, lon,
# time and vertical coordinate
# 2) the data variable will include 'coordinates' attribute to store the name of the
# auxiliary coordinate variable
def get_nc_auxiliary_coordinate_variable_namelist(nc_dataset):
"""
(object) -> list
Return: the netCDF auxiliary coordinate variable names
"""
nc_all_variables = nc_dataset.variables
raw_namelist = []
for var_name, var_obj in nc_all_variables.items():
if hasattr(var_obj, 'coordinates'):
raw_namelist.extend(var_obj.coordinates.split(' '))
nc_auxiliary_coordinate_variable_namelist = list(set(raw_namelist))
return nc_auxiliary_coordinate_variable_namelist
def get_nc_auxiliary_coordinate_variables(nc_dataset):
"""
(object) -> dict
Return: the netCDF auxiliary coordinate variables
Format: {'var_name': var_obj}
"""
nc_auxiliary_coordinate_variable_namelist = \
get_nc_auxiliary_coordinate_variable_namelist(nc_dataset)
nc_auxiliary_coordinate_variables = {}
for name in nc_auxiliary_coordinate_variable_namelist:
if nc_dataset.variables.get(name, ''):
nc_auxiliary_coordinate_variables[name] = nc_dataset.variables[name]
return nc_auxiliary_coordinate_variables
# Functions for Bounds Variable
# the Bounds variable has the following attributes:
# 1) bounds variable is used to define the cell
# 2) It is associated with the coordinate or auxiliary coordinate variable.
# 3) If a coordinate or an auxiliary coordinate variable has bounds variable,
# the has the attributes 'bounds'
def get_nc_coordinate_bounds_variables(nc_dataset):
"""
(object) -> dict
Return: the netCDF coordinate bounds variable
Format: {'var_name': var_obj}
"""
nc_coordinate_variables = get_nc_coordinate_variables(nc_dataset)
nc_auxiliary_coordinate_variables = get_nc_auxiliary_coordinate_variables(nc_dataset)
nc_coordinate_bounds_variables = {}
for var_name, var_obj in \
dict(nc_coordinate_variables.items() +
nc_auxiliary_coordinate_variables.items()).items():
if hasattr(var_obj, 'bounds') and nc_dataset.variables.get(var_obj.bounds, None):
nc_coordinate_bounds_variables[var_obj.bounds] = nc_dataset.variables[var_obj.bounds]
return nc_coordinate_bounds_variables
def get_nc_coordinate_bounds_variable_namelist(nc_dataset):
"""
(object) -> list
Return: the netCDF coordinate bound variable names
"""
nc_coordinate_bounds_variables = get_nc_coordinate_bounds_variables(nc_dataset)
nc_coordinate_bounds_variable_namelist = nc_coordinate_bounds_variables.keys()
return nc_coordinate_bounds_variable_namelist
# Function for Data Variable
def get_nc_data_variables(nc_dataset):
"""
(object) -> dict
Return: the netCDF Data variables
"""
nc_non_data_variables_namelist = get_nc_variables_coordinate_type_mapping(nc_dataset).keys()
nc_data_variables = {}
for var_name, var_obj in nc_dataset.variables.items():
if (var_name not in nc_non_data_variables_namelist) and (len(var_obj.shape) >= 1):
nc_data_variables[var_name] = var_obj
return nc_data_variables
def get_nc_data_variable_namelist(nc_dataset):
"""
(object) -> list
Return: the netCDF Data variables names
"""
nc_data_variables = get_nc_data_variables(nc_dataset)
nc_data_variable_namelist = list(nc_data_variables.keys())
return nc_data_variable_namelist
# Functions for Grid Mapping Variable
def get_nc_grid_mapping_variable_name(nc_dataset):
"""
(object)-> string
Return: the netCDF grid mapping variable name
"""
nc_all_variables = nc_dataset.variables
nc_grid_mapping_variable_name = ''
for var_name, var_obj in nc_all_variables.items():
if hasattr(var_obj, 'grid_mapping_name')and var_obj.grid_mapping_name:
nc_grid_mapping_variable_name = var_name
return nc_grid_mapping_variable_name
def get_nc_grid_mapping_variable(nc_dataset):
"""
(object)-> object
Return: the netCDF grid mapping variable object
"""
nc_all_variables = nc_dataset.variables
nc_grid_mapping_variable = None
for var_name, var_obj in nc_all_variables.items():
if hasattr(var_obj, 'grid_mapping_name'):
nc_grid_mapping_variable = var_obj
return nc_grid_mapping_variable
def get_nc_grid_mapping_projection_name(nc_dataset):
"""
(object)-> string
Return: the netCDF grid mapping projection name
"""
nc_grid_mapping_variable = get_nc_grid_mapping_variable(nc_dataset)
nc_grid_mapping_projection_name = getattr(nc_grid_mapping_variable, 'grid_mapping_name', '')
return nc_grid_mapping_projection_name
def get_nc_grid_mapping_crs_name(nc_dataset):
"""
(object)-> string
Return: the netCDF grid mapping crs projection name. This will take the wkt name as
the first option and then take the grid mapping name as the second option.
"""
nc_grid_mapping_variable = get_nc_grid_mapping_variable(nc_dataset)
nc_grid_mapping_crs_name = ''
for attribute_name in ['crs_wkt', 'spatial_ref', 'esri_pe_string']:
if hasattr(nc_grid_mapping_variable, attribute_name):
projection_string = getattr(nc_grid_mapping_variable, attribute_name)
try:
spatial_ref = osr.SpatialReference()
spatial_ref.ImportFromWkt(projection_string)
if spatial_ref.IsProjected():
nc_grid_mapping_crs_name = spatial_ref.GetAttrValue('projcs', 0)
else:
nc_grid_mapping_crs_name = spatial_ref.GetAttrValue('geogcs', 0)
break
except Exception:
break
if nc_grid_mapping_crs_name == '':
nc_grid_mapping_crs_name = get_nc_grid_mapping_projection_name(nc_dataset)
return nc_grid_mapping_crs_name
def get_nc_grid_mapping_projection_import_string_dict(nc_dataset):
"""
(object)-> dict
Return: the netCDF grid mapping info dictionary proj4 or WKT string used for creating projection
object with pyproj.Proj() or gdal
Reference: Cf convention for grid mapping projection
http://cfconventions.org/Data/cf-conventions/cf-conventions-1.7/build/ch05s06.html
"""
projection_import_string_dict = {}
# get the proj name, proj variable
nc_grid_mapping_projection_name = get_nc_grid_mapping_projection_name(nc_dataset)
nc_grid_mapping_variable = get_nc_grid_mapping_variable(nc_dataset)
# get the projection string and type
projection_string = ''
for attribute_name in ['crs_wkt', 'spatial_ref', 'esri_pe_string']:
if hasattr(nc_grid_mapping_variable, attribute_name):
projection_string = getattr(nc_grid_mapping_variable, attribute_name)
break
if projection_string:
projection_type = 'WKT String'
try:
spatial_ref = osr.SpatialReference()
spatial_ref.ImportFromWkt(projection_string)
datum = spatial_ref.GetAttrValue("DATUM", 0) \
if spatial_ref.GetAttrValue("DATUM", 0) else ''
except Exception:
datum = ''
else:
proj_names = {
'albers_conical_equal_area': 'aea',
'azimuthal_equidistant': 'aeqd',
'lambert_azimuthal_equal_area': 'laea',
'lambert_conformal_conic': 'lcc', # tested with prcp.nc
'lambert_cylindrical_equal_area': 'cea',
'mercator': 'merc',
'orthographic': 'ortho',
'polar_stereographic': 'stere',
'stereographic': 'stere',
'transverse_mercator': 'tmerc', # test with swe.nc
'vertical_perspective': 'geos',
}
proj_paras = {
'+y_0': 'false_northing',
'+x_0': 'false_easting',
'+k_0': 'scale_factor_at_projection_origin,scale_factor_at_central_meridian',
'+lat_0': 'latitude_of_projection_origin',
'+lon_0': 'longitude_of_projection_origin,longitude_of_central_meridian,'
'straight_vertical_longitude_from_pole',
'+h': 'perspective_point_height',
'+a': 'semi_major_axis',
'+b': 'semi_minor_axis',
}
standard_parallel_types = ['albers_conical_equal_area', 'lambert_conformal_conic']
# create the projection import string
proj_info_list = []
if nc_grid_mapping_projection_name in proj_names .keys():
# add projection name
proj_info_list.append('+proj={0}'.format(proj_names[nc_grid_mapping_projection_name]))
# add basic parameters
for proj4_para, cf_para in proj_paras.items():
for para in cf_para.split(','):
if hasattr(nc_grid_mapping_variable, para):
proj_info_list.append(
'{0}={1}'.format(proj4_para, getattr(nc_grid_mapping_variable, para)))
break
# add standard parallel para
if hasattr(nc_grid_mapping_variable, 'standard_parallel'):
if nc_grid_mapping_projection_name in standard_parallel_types:
str_value = str(nc_grid_mapping_variable.standard_parallel).strip('[]').split()
try:
num_value = sorted([float(x) for x in str_value])
if num_value.__len__() <= 2:
proj_info_list.extend(['lat_{0}={1}'.format(i+1, j)
for i, j in enumerate(num_value)])
except Exception:
pass
else:
proj_info_list.append(
'{0}={1}'.format('+lat_ts', nc_grid_mapping_variable.standard_parallel))
projection_string = ' '.join(proj_info_list)
projection_type = 'Proj4 String'
datum = ''
if projection_string:
projection_import_string_dict = {
'text': projection_string,
'type': projection_type,
'datum': datum,
}
return projection_import_string_dict
| RENCI/xDCIShare | hs_file_types/nc_functions/nc_utils.py | Python | bsd-3-clause | 18,904 | [
"NetCDF"
] | 67128b009cc95431ba30501d64ae588eb3bb0f2ea2504e0d516b4f403df1d9c6 |
# (c) 2015 - Jaguar Land Rover.
#
# Mozilla Public License 2.0
#
# Python-based partition manager PoC
import gtk
import dbus
import dbus.service
from dbus.mainloop.glib import DBusGMainLoop
import sys
import time
import swm
#
# Partition manager service
#
class PartMgrService(dbus.service.Object):
def __init__(self):
bus_name = dbus.service.BusName('org.genivi.PartitionManager', bus=dbus.SessionBus())
dbus.service.Object.__init__(self, bus_name, '/org/genivi/PartitionManager')
@dbus.service.method('org.genivi.PartitionManager',
async_callbacks=('send_reply', 'send_error'))
def createDiskPartition(self,
transaction_id,
disk,
partition_number,
partition_type,
start,
size,
guid,
name,
send_reply,
send_error):
print "Partition Manager: createDiskPartition()"
print " Operfation Transaction ID: {}".format(transaction_id)
print " Disk: {}".format(disk)
print " Partition Number: {}".format(partition_number)
print " Partition Type: {}".format(partition_type)
print " Start: {}".format(start)
print " Size: {}".format(size)
print " GUID: {}".format(guid)
print " Name: {}".format(name)
print "---"
#
# Send back an immediate reply since DBUS
# doesn't like python dbus-invoked methods to do
# their own calls (nested calls).
#
send_reply(True)
# Simulate install
print "Create partition: disk({}) partiton({}) (3 sec)".format(disk, partition_number)
for i in xrange(1,30):
sys.stdout.write('.')
sys.stdout.flush()
time.sleep(0.1)
print
print "Done"
swm.send_operation_result(transaction_id,
swm.SWMResult.SWM_RES_OK,
"Partition create successful. Disk: {}:{}".format(disk, partition_number))
return None
@dbus.service.method('org.genivi.PartitionManager',
async_callbacks=('send_reply', 'send_error'))
def resizeDiskPartition(self,
transaction_id,
disk,
partition_number,
start,
size,
send_reply,
send_error):
print "Partition Manager: resizeDiskPartition()"
print " Operfation Transaction ID: {}".format(transaction_id)
print " Disk: {}".format(disk)
print " Partition Number: {}".format(partition_number)
print " Start: {}".format(start)
print " Size: {}".format(size)
print "---"
#
# Send back an immediate reply since DBUS
# doesn't like python dbus-invoked methods to do
# their own calls (nested calls).
#
send_reply(True)
# Simulate install
print "Resizing partition: disk({}) partiton({}) (10 sec)".format(disk, partition_number)
for i in xrange(1,50):
sys.stdout.write('.')
sys.stdout.flush()
time.sleep(0.2)
print
print "Done"
swm.send_operation_result(transaction_id,
swm.SWMResult.SWM_RES_OK,
"Partition resize success. Disk: {}:{}".format(disk, partition_number))
return None
@dbus.service.method('org.genivi.PartitionManager',
async_callbacks=('send_reply', 'send_error'))
def deleteDiskPartition(self,
transaction_id,
disk,
send_reply,
send_error):
print "Partition Manager: deleteDiskPartition()"
print " Operation Transaction ID: {}".format(transaction_id)
print " Disk: {}".format(disk)
print " Partition Number: {}".format(partition_number)
print "---"
#
# Send back an immediate reply since DBUS
# doesn't like python dbus-invoked methods to do
# their own calls (nested calls).
#
send_reply(True)
# Simulate install
print "Delete partition: disk({}) partiton({}) (5 sec)".format(disk, partition_number)
for i in xrange(1,10):
sys.stdout.write('.')
sys.stdout.flush()
time.sleep(0.2)
print
print "Done"
swm.send_operation_result(transaction_id,
swm.SWMResult.SWM_RES_OK,
"Partition delete success. Disk: {}:{}".format(disk, partition_number))
return None
@dbus.service.method('org.genivi.PartitionManager',
async_callbacks=('send_reply', 'send_error'))
def writeDiskPartition(self,
transaction_id,
disk,
partition_number,
image_path,
blacklisted_partitions,
send_reply,
send_error):
print "Partition Manager: writeDiskPartition()"
print " Operfation Transaction ID: {}".format(transaction_id)
print " Disk: {}".format(disk)
print " Partition Number: {}".format(partition_number)
print " Image Path: {}".format(image_path)
print " Blacklisted Partitions: {}".format(blacklisted_partitions)
print "---"
#
# Send back an immediate reply since DBUS
# doesn't like python dbus-invoked methods to do
# their own calls (nested calls).
#
send_reply(True)
# Simulate write
print "Writing partition: disk({}) partition({}) (10 sec)".format(disk, partition_number)
for i in xrange(1,50):
sys.stdout.write('.')
sys.stdout.flush()
time.sleep(0.2)
print
print "Done"
swm.send_operation_result(transaction_id,
swm.SWMResult.SWM_RES_OK,
"Partition write success. Disk: {}:{} Image: {}".
format(disk, partition_number, image_path))
return None
@dbus.service.method('org.genivi.PartitionManager',
async_callbacks=('send_reply', 'send_error'))
def patchDiskPartition(self,
transaction_id,
disk,
partition_number,
image_path,
blacklisted_partitions,
send_reply,
send_error):
print "Partition Manager: patchDiskPartition()"
print " Operfation Transaction ID: {}".format(transaction_id)
print " Disk: {}".format(disk)
print " Partition Number: {}".format(partition_number)
print " Image Path: {}".format(image_path)
print " Blacklisted Partitions: {}".format(blacklisted_partitions)
print "---"
#
# Send back an immediate reply since DBUS
# doesn't like python dbus-invoked methods to do
# their own calls (nested calls).
#
send_reply(True)
# Simulate patch
print "Patching partition: disk({}) partiton({}) (10 sec)".format(disk, partition_number)
for i in xrange(1,50):
sys.stdout.patch('.')
sys.stdout.flush()
time.sleep(0.2)
print
print "Done"
swm.send_operation_result(transaction_id,
swm.SWMResult.SWM_RES_OK,
"Partition patch success. Disk: {}:{} Image: {}".
format(disk, partition_number, image_path))
return None
print
print "Partition Manager."
print
DBusGMainLoop(set_as_default=True)
part_mgr = PartMgrService()
while True:
gtk.main_iteration()
| magnusfeuer/genivi_software_management | partition_manager/partition_manager.py | Python | mpl-2.0 | 8,847 | [
"Jaguar"
] | 0b25768284a2305c3fbfa1bd76ecf5bf42ea302ec80d44846f35350ca71e0ea6 |
from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import (
TestCase, run_module_suite, assert_, assert_raises, assert_equal,
assert_warns, assert_array_equal, assert_array_almost_equal,
suppress_warnings)
from numpy import random
from numpy.compat import asbytes
import sys
import warnings
class TestSeed(TestCase):
def test_scalar(self):
s = np.random.RandomState(0)
assert_equal(s.randint(1000), 684)
s = np.random.RandomState(4294967295)
assert_equal(s.randint(1000), 419)
def test_array(self):
s = np.random.RandomState(range(10))
assert_equal(s.randint(1000), 468)
s = np.random.RandomState(np.arange(10))
assert_equal(s.randint(1000), 468)
s = np.random.RandomState([0])
assert_equal(s.randint(1000), 973)
s = np.random.RandomState([4294967295])
assert_equal(s.randint(1000), 265)
def test_invalid_scalar(self):
# seed must be an unsigned 32 bit integer
assert_raises(TypeError, np.random.RandomState, -0.5)
assert_raises(ValueError, np.random.RandomState, -1)
def test_invalid_array(self):
# seed must be an unsigned 32 bit integer
assert_raises(TypeError, np.random.RandomState, [-0.5])
assert_raises(ValueError, np.random.RandomState, [-1])
assert_raises(ValueError, np.random.RandomState, [4294967296])
assert_raises(ValueError, np.random.RandomState, [1, 2, 4294967296])
assert_raises(ValueError, np.random.RandomState, [1, -2, 4294967296])
class TestBinomial(TestCase):
def test_n_zero(self):
# Tests the corner case of n == 0 for the binomial distribution.
# binomial(0, p) should be zero for any p in [0, 1].
# This test addresses issue #3480.
zeros = np.zeros(2, dtype='int')
for p in [0, .5, 1]:
assert_(random.binomial(0, p) == 0)
assert_array_equal(random.binomial(zeros, p), zeros)
def test_p_is_nan(self):
# Issue #4571.
assert_raises(ValueError, random.binomial, 1, np.nan)
class TestMultinomial(TestCase):
def test_basic(self):
random.multinomial(100, [0.2, 0.8])
def test_zero_probability(self):
random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0])
def test_int_negative_interval(self):
assert_(-5 <= random.randint(-5, -1) < -1)
x = random.randint(-5, -1, 5)
assert_(np.all(-5 <= x))
assert_(np.all(x < -1))
def test_size(self):
# gh-3173
p = [0.5, 0.5]
assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.multinomial(1, p, [2, 2]).shape, (2, 2, 2))
assert_equal(np.random.multinomial(1, p, (2, 2)).shape, (2, 2, 2))
assert_equal(np.random.multinomial(1, p, np.array((2, 2))).shape,
(2, 2, 2))
assert_raises(TypeError, np.random.multinomial, 1, p,
np.float(1))
class TestSetState(TestCase):
def setUp(self):
self.seed = 1234567890
self.prng = random.RandomState(self.seed)
self.state = self.prng.get_state()
def test_basic(self):
old = self.prng.tomaxint(16)
self.prng.set_state(self.state)
new = self.prng.tomaxint(16)
assert_(np.all(old == new))
def test_gaussian_reset(self):
# Make sure the cached every-other-Gaussian is reset.
old = self.prng.standard_normal(size=3)
self.prng.set_state(self.state)
new = self.prng.standard_normal(size=3)
assert_(np.all(old == new))
def test_gaussian_reset_in_media_res(self):
# When the state is saved with a cached Gaussian, make sure the
# cached Gaussian is restored.
self.prng.standard_normal()
state = self.prng.get_state()
old = self.prng.standard_normal(size=3)
self.prng.set_state(state)
new = self.prng.standard_normal(size=3)
assert_(np.all(old == new))
def test_backwards_compatibility(self):
# Make sure we can accept old state tuples that do not have the
# cached Gaussian value.
old_state = self.state[:-2]
x1 = self.prng.standard_normal(size=16)
self.prng.set_state(old_state)
x2 = self.prng.standard_normal(size=16)
self.prng.set_state(self.state)
x3 = self.prng.standard_normal(size=16)
assert_(np.all(x1 == x2))
assert_(np.all(x1 == x3))
def test_negative_binomial(self):
# Ensure that the negative binomial results take floating point
# arguments without truncation.
self.prng.negative_binomial(0.5, 0.5)
class TestRandint(TestCase):
rfunc = np.random.randint
# valid integer/boolean types
itype = [np.bool_, np.int8, np.uint8, np.int16, np.uint16,
np.int32, np.uint32, np.int64, np.uint64]
def test_unsupported_type(self):
assert_raises(TypeError, self.rfunc, 1, dtype=np.float)
def test_bounds_checking(self):
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
assert_raises(ValueError, self.rfunc, lbnd - 1, ubnd, dtype=dt)
assert_raises(ValueError, self.rfunc, lbnd, ubnd + 1, dtype=dt)
assert_raises(ValueError, self.rfunc, ubnd, lbnd, dtype=dt)
assert_raises(ValueError, self.rfunc, 1, 0, dtype=dt)
def test_rng_zero_and_extremes(self):
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
tgt = ubnd - 1
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
tgt = lbnd
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
tgt = (lbnd + ubnd)//2
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
def test_in_bounds_fuzz(self):
# Don't use fixed seed
np.random.seed()
for dt in self.itype[1:]:
for ubnd in [4, 8, 16]:
vals = self.rfunc(2, ubnd, size=2**16, dtype=dt)
assert_(vals.max() < ubnd)
assert_(vals.min() >= 2)
vals = self.rfunc(0, 2, size=2**16, dtype=np.bool)
assert_(vals.max() < 2)
assert_(vals.min() >= 0)
def test_repeatability(self):
import hashlib
# We use a md5 hash of generated sequences of 1000 samples
# in the range [0, 6) for all but np.bool, where the range
# is [0, 2). Hashes are for little endian numbers.
tgt = {'bool': '7dd3170d7aa461d201a65f8bcf3944b0',
'int16': '1b7741b80964bb190c50d541dca1cac1',
'int32': '4dc9fcc2b395577ebb51793e58ed1a05',
'int64': '17db902806f448331b5a758d7d2ee672',
'int8': '27dd30c4e08a797063dffac2490b0be6',
'uint16': '1b7741b80964bb190c50d541dca1cac1',
'uint32': '4dc9fcc2b395577ebb51793e58ed1a05',
'uint64': '17db902806f448331b5a758d7d2ee672',
'uint8': '27dd30c4e08a797063dffac2490b0be6'}
for dt in self.itype[1:]:
np.random.seed(1234)
# view as little endian for hash
if sys.byteorder == 'little':
val = self.rfunc(0, 6, size=1000, dtype=dt)
else:
val = self.rfunc(0, 6, size=1000, dtype=dt).byteswap()
res = hashlib.md5(val.view(np.int8)).hexdigest()
assert_(tgt[np.dtype(dt).name] == res)
# bools do not depend on endianess
np.random.seed(1234)
val = self.rfunc(0, 2, size=1000, dtype=np.bool).view(np.int8)
res = hashlib.md5(val).hexdigest()
assert_(tgt[np.dtype(np.bool).name] == res)
def test_respect_dtype_singleton(self):
# See gh-7203
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
sample = self.rfunc(lbnd, ubnd, dtype=dt)
self.assertEqual(sample.dtype, np.dtype(dt))
for dt in (np.bool, np.int, np.long):
lbnd = 0 if dt is np.bool else np.iinfo(dt).min
ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1
# gh-7284: Ensure that we get Python data types
sample = self.rfunc(lbnd, ubnd, dtype=dt)
self.assertFalse(hasattr(sample, 'dtype'))
self.assertEqual(type(sample), dt)
class TestRandomDist(TestCase):
# Make sure the random distribution returns the correct value for a
# given seed
def setUp(self):
self.seed = 1234567890
def test_rand(self):
np.random.seed(self.seed)
actual = np.random.rand(3, 2)
desired = np.array([[0.61879477158567997, 0.59162362775974664],
[0.88868358904449662, 0.89165480011560816],
[0.4575674820298663, 0.7781880808593471]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_randn(self):
np.random.seed(self.seed)
actual = np.random.randn(3, 2)
desired = np.array([[1.34016345771863121, 1.73759122771936081],
[1.498988344300628, -0.2286433324536169],
[2.031033998682787, 2.17032494605655257]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_randint(self):
np.random.seed(self.seed)
actual = np.random.randint(-99, 99, size=(3, 2))
desired = np.array([[31, 3],
[-52, 41],
[-48, -66]])
assert_array_equal(actual, desired)
def test_random_integers(self):
np.random.seed(self.seed)
with suppress_warnings() as sup:
w = sup.record(DeprecationWarning)
actual = np.random.random_integers(-99, 99, size=(3, 2))
assert_(len(w) == 1)
desired = np.array([[31, 3],
[-52, 41],
[-48, -66]])
assert_array_equal(actual, desired)
def test_random_integers_max_int(self):
# Tests whether random_integers can generate the
# maximum allowed Python int that can be converted
# into a C long. Previous implementations of this
# method have thrown an OverflowError when attempting
# to generate this integer.
with suppress_warnings() as sup:
w = sup.record(DeprecationWarning)
actual = np.random.random_integers(np.iinfo('l').max,
np.iinfo('l').max)
assert_(len(w) == 1)
desired = np.iinfo('l').max
assert_equal(actual, desired)
def test_random_integers_deprecated(self):
with warnings.catch_warnings():
warnings.simplefilter("error", DeprecationWarning)
# DeprecationWarning raised with high == None
assert_raises(DeprecationWarning,
np.random.random_integers,
np.iinfo('l').max)
# DeprecationWarning raised with high != None
assert_raises(DeprecationWarning,
np.random.random_integers,
np.iinfo('l').max, np.iinfo('l').max)
def test_random_sample(self):
np.random.seed(self.seed)
actual = np.random.random_sample((3, 2))
desired = np.array([[0.61879477158567997, 0.59162362775974664],
[0.88868358904449662, 0.89165480011560816],
[0.4575674820298663, 0.7781880808593471]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_choice_uniform_replace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 4)
desired = np.array([2, 3, 2, 3])
assert_array_equal(actual, desired)
def test_choice_nonuniform_replace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1])
desired = np.array([1, 1, 2, 2])
assert_array_equal(actual, desired)
def test_choice_uniform_noreplace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 3, replace=False)
desired = np.array([0, 1, 3])
assert_array_equal(actual, desired)
def test_choice_nonuniform_noreplace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 3, replace=False,
p=[0.1, 0.3, 0.5, 0.1])
desired = np.array([2, 3, 1])
assert_array_equal(actual, desired)
def test_choice_noninteger(self):
np.random.seed(self.seed)
actual = np.random.choice(['a', 'b', 'c', 'd'], 4)
desired = np.array(['c', 'd', 'c', 'd'])
assert_array_equal(actual, desired)
def test_choice_exceptions(self):
sample = np.random.choice
assert_raises(ValueError, sample, -1, 3)
assert_raises(ValueError, sample, 3., 3)
assert_raises(ValueError, sample, [[1, 2], [3, 4]], 3)
assert_raises(ValueError, sample, [], 3)
assert_raises(ValueError, sample, [1, 2, 3, 4], 3,
p=[[0.25, 0.25], [0.25, 0.25]])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2])
assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4])
assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False)
assert_raises(ValueError, sample, [1, 2, 3], 2,
replace=False, p=[1, 0, 0])
def test_choice_return_shape(self):
p = [0.1, 0.9]
# Check scalar
assert_(np.isscalar(np.random.choice(2, replace=True)))
assert_(np.isscalar(np.random.choice(2, replace=False)))
assert_(np.isscalar(np.random.choice(2, replace=True, p=p)))
assert_(np.isscalar(np.random.choice(2, replace=False, p=p)))
assert_(np.isscalar(np.random.choice([1, 2], replace=True)))
assert_(np.random.choice([None], replace=True) is None)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(np.random.choice(arr, replace=True) is a)
# Check 0-d array
s = tuple()
assert_(not np.isscalar(np.random.choice(2, s, replace=True)))
assert_(not np.isscalar(np.random.choice(2, s, replace=False)))
assert_(not np.isscalar(np.random.choice(2, s, replace=True, p=p)))
assert_(not np.isscalar(np.random.choice(2, s, replace=False, p=p)))
assert_(not np.isscalar(np.random.choice([1, 2], s, replace=True)))
assert_(np.random.choice([None], s, replace=True).ndim == 0)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(np.random.choice(arr, s, replace=True).item() is a)
# Check multi dimensional array
s = (2, 3)
p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2]
assert_(np.random.choice(6, s, replace=True).shape, s)
assert_(np.random.choice(6, s, replace=False).shape, s)
assert_(np.random.choice(6, s, replace=True, p=p).shape, s)
assert_(np.random.choice(6, s, replace=False, p=p).shape, s)
assert_(np.random.choice(np.arange(6), s, replace=True).shape, s)
def test_bytes(self):
np.random.seed(self.seed)
actual = np.random.bytes(10)
desired = asbytes('\x82Ui\x9e\xff\x97+Wf\xa5')
assert_equal(actual, desired)
def test_shuffle(self):
# Test lists, arrays (of various dtypes), and multidimensional versions
# of both, c-contiguous or not:
for conv in [lambda x: np.array([]),
lambda x: x,
lambda x: np.asarray(x).astype(np.int8),
lambda x: np.asarray(x).astype(np.float32),
lambda x: np.asarray(x).astype(np.complex64),
lambda x: np.asarray(x).astype(object),
lambda x: [(i, i) for i in x],
lambda x: np.asarray([[i, i] for i in x]),
lambda x: np.vstack([x, x]).T,
# gh-4270
lambda x: np.asarray([(i, i) for i in x],
[("a", object, 1),
("b", np.int32, 1)])]:
np.random.seed(self.seed)
alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
np.random.shuffle(alist)
actual = alist
desired = conv([0, 1, 9, 6, 2, 4, 5, 8, 7, 3])
assert_array_equal(actual, desired)
def test_shuffle_masked(self):
# gh-3263
a = np.ma.masked_values(np.reshape(range(20), (5, 4)) % 3 - 1, -1)
b = np.ma.masked_values(np.arange(20) % 3 - 1, -1)
a_orig = a.copy()
b_orig = b.copy()
for i in range(50):
np.random.shuffle(a)
assert_equal(
sorted(a.data[~a.mask]), sorted(a_orig.data[~a_orig.mask]))
np.random.shuffle(b)
assert_equal(
sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask]))
def test_beta(self):
np.random.seed(self.seed)
actual = np.random.beta(.1, .9, size=(3, 2))
desired = np.array(
[[1.45341850513746058e-02, 5.31297615662868145e-04],
[1.85366619058432324e-06, 4.19214516800110563e-03],
[1.58405155108498093e-04, 1.26252891949397652e-04]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_binomial(self):
np.random.seed(self.seed)
actual = np.random.binomial(100.123, .456, size=(3, 2))
desired = np.array([[37, 43],
[42, 48],
[46, 45]])
assert_array_equal(actual, desired)
def test_chisquare(self):
np.random.seed(self.seed)
actual = np.random.chisquare(50, size=(3, 2))
desired = np.array([[63.87858175501090585, 68.68407748911370447],
[65.77116116901505904, 47.09686762438974483],
[72.3828403199695174, 74.18408615260374006]])
assert_array_almost_equal(actual, desired, decimal=13)
def test_dirichlet(self):
np.random.seed(self.seed)
alpha = np.array([51.72840233779265162, 39.74494232180943953])
actual = np.random.mtrand.dirichlet(alpha, size=(3, 2))
desired = np.array([[[0.54539444573611562, 0.45460555426388438],
[0.62345816822039413, 0.37654183177960598]],
[[0.55206000085785778, 0.44793999914214233],
[0.58964023305154301, 0.41035976694845688]],
[[0.59266909280647828, 0.40733090719352177],
[0.56974431743975207, 0.43025568256024799]]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_dirichlet_size(self):
# gh-3173
p = np.array([51.72840233779265162, 39.74494232180943953])
assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.dirichlet(p, [2, 2]).shape, (2, 2, 2))
assert_equal(np.random.dirichlet(p, (2, 2)).shape, (2, 2, 2))
assert_equal(np.random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2))
assert_raises(TypeError, np.random.dirichlet, p, np.float(1))
def test_exponential(self):
np.random.seed(self.seed)
actual = np.random.exponential(1.1234, size=(3, 2))
desired = np.array([[1.08342649775011624, 1.00607889924557314],
[2.46628830085216721, 2.49668106809923884],
[0.68717433461363442, 1.69175666993575979]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_exponential_0(self):
assert_equal(np.random.exponential(scale=0), 0)
assert_raises(ValueError, np.random.exponential, scale=-0.)
def test_f(self):
np.random.seed(self.seed)
actual = np.random.f(12, 77, size=(3, 2))
desired = np.array([[1.21975394418575878, 1.75135759791559775],
[1.44803115017146489, 1.22108959480396262],
[1.02176975757740629, 1.34431827623300415]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_gamma(self):
np.random.seed(self.seed)
actual = np.random.gamma(5, 3, size=(3, 2))
desired = np.array([[24.60509188649287182, 28.54993563207210627],
[26.13476110204064184, 12.56988482927716078],
[31.71863275789960568, 33.30143302795922011]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_gamma_0(self):
assert_equal(np.random.gamma(shape=0, scale=0), 0)
assert_raises(ValueError, np.random.gamma, shape=-0., scale=-0.)
def test_geometric(self):
np.random.seed(self.seed)
actual = np.random.geometric(.123456789, size=(3, 2))
desired = np.array([[8, 7],
[17, 17],
[5, 12]])
assert_array_equal(actual, desired)
def test_gumbel(self):
np.random.seed(self.seed)
actual = np.random.gumbel(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[0.19591898743416816, 0.34405539668096674],
[-1.4492522252274278, -1.47374816298446865],
[1.10651090478803416, -0.69535848626236174]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_gumbel_0(self):
assert_equal(np.random.gumbel(scale=0), 0)
assert_raises(ValueError, np.random.gumbel, scale=-0.)
def test_hypergeometric(self):
np.random.seed(self.seed)
actual = np.random.hypergeometric(10.1, 5.5, 14, size=(3, 2))
desired = np.array([[10, 10],
[10, 10],
[9, 9]])
assert_array_equal(actual, desired)
# Test nbad = 0
actual = np.random.hypergeometric(5, 0, 3, size=4)
desired = np.array([3, 3, 3, 3])
assert_array_equal(actual, desired)
actual = np.random.hypergeometric(15, 0, 12, size=4)
desired = np.array([12, 12, 12, 12])
assert_array_equal(actual, desired)
# Test ngood = 0
actual = np.random.hypergeometric(0, 5, 3, size=4)
desired = np.array([0, 0, 0, 0])
assert_array_equal(actual, desired)
actual = np.random.hypergeometric(0, 15, 12, size=4)
desired = np.array([0, 0, 0, 0])
assert_array_equal(actual, desired)
def test_laplace(self):
np.random.seed(self.seed)
actual = np.random.laplace(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[0.66599721112760157, 0.52829452552221945],
[3.12791959514407125, 3.18202813572992005],
[-0.05391065675859356, 1.74901336242837324]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_laplace_0(self):
assert_equal(np.random.laplace(scale=0), 0)
assert_raises(ValueError, np.random.laplace, scale=-0.)
def test_logistic(self):
np.random.seed(self.seed)
actual = np.random.logistic(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[1.09232835305011444, 0.8648196662399954],
[4.27818590694950185, 4.33897006346929714],
[-0.21682183359214885, 2.63373365386060332]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_laplace_0(self):
assert_(np.random.laplace(scale=0) in [0, 1])
assert_raises(ValueError, np.random.laplace, scale=-0.)
def test_lognormal(self):
np.random.seed(self.seed)
actual = np.random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2))
desired = np.array([[16.50698631688883822, 36.54846706092654784],
[22.67886599981281748, 0.71617561058995771],
[65.72798501792723869, 86.84341601437161273]])
assert_array_almost_equal(actual, desired, decimal=13)
def test_lognormal_0(self):
assert_equal(np.random.lognormal(sigma=0), 1)
assert_raises(ValueError, np.random.lognormal, sigma=-0.)
def test_logseries(self):
np.random.seed(self.seed)
actual = np.random.logseries(p=.923456789, size=(3, 2))
desired = np.array([[2, 2],
[6, 17],
[3, 6]])
assert_array_equal(actual, desired)
def test_multinomial(self):
np.random.seed(self.seed)
actual = np.random.multinomial(20, [1/6.]*6, size=(3, 2))
desired = np.array([[[4, 3, 5, 4, 2, 2],
[5, 2, 8, 2, 2, 1]],
[[3, 4, 3, 6, 0, 4],
[2, 1, 4, 3, 6, 4]],
[[4, 4, 2, 5, 2, 3],
[4, 3, 4, 2, 3, 4]]])
assert_array_equal(actual, desired)
def test_multivariate_normal(self):
np.random.seed(self.seed)
mean = (.123456789, 10)
# Hmm... not even symmetric.
cov = [[1, 0], [1, 0]]
size = (3, 2)
actual = np.random.multivariate_normal(mean, cov, size)
desired = np.array([[[-1.47027513018564449, 10.],
[-1.65915081534845532, 10.]],
[[-2.29186329304599745, 10.],
[-1.77505606019580053, 10.]],
[[-0.54970369430044119, 10.],
[0.29768848031692957, 10.]]])
assert_array_almost_equal(actual, desired, decimal=15)
# Check for default size, was raising deprecation warning
actual = np.random.multivariate_normal(mean, cov)
desired = np.array([-0.79441224511977482, 10.])
assert_array_almost_equal(actual, desired, decimal=15)
# Check that non positive-semidefinite covariance raises warning
mean = [0, 0]
cov = [[1, 1 + 1e-10], [1 + 1e-10, 1]]
assert_warns(RuntimeWarning, np.random.multivariate_normal, mean, cov)
def test_negative_binomial(self):
np.random.seed(self.seed)
actual = np.random.negative_binomial(n=100, p=.12345, size=(3, 2))
desired = np.array([[848, 841],
[892, 611],
[779, 647]])
assert_array_equal(actual, desired)
def test_noncentral_chisquare(self):
np.random.seed(self.seed)
actual = np.random.noncentral_chisquare(df=5, nonc=5, size=(3, 2))
desired = np.array([[23.91905354498517511, 13.35324692733826346],
[31.22452661329736401, 16.60047399466177254],
[5.03461598262724586, 17.94973089023519464]])
assert_array_almost_equal(actual, desired, decimal=14)
actual = np.random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2))
desired = np.array([[1.47145377828516666, 0.15052899268012659],
[0.00943803056963588, 1.02647251615666169],
[0.332334982684171, 0.15451287602753125]])
assert_array_almost_equal(actual, desired, decimal=14)
np.random.seed(self.seed)
actual = np.random.noncentral_chisquare(df=5, nonc=0, size=(3, 2))
desired = np.array([[9.597154162763948, 11.725484450296079],
[10.413711048138335, 3.694475922923986],
[13.484222138963087, 14.377255424602957]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_noncentral_f(self):
np.random.seed(self.seed)
actual = np.random.noncentral_f(dfnum=5, dfden=2, nonc=1,
size=(3, 2))
desired = np.array([[1.40598099674926669, 0.34207973179285761],
[3.57715069265772545, 7.92632662577829805],
[0.43741599463544162, 1.1774208752428319]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_normal(self):
np.random.seed(self.seed)
actual = np.random.normal(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[2.80378370443726244, 3.59863924443872163],
[3.121433477601256, -0.33382987590723379],
[4.18552478636557357, 4.46410668111310471]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_normal_0(self):
assert_equal(np.random.normal(scale=0), 0)
assert_raises(ValueError, np.random.normal, scale=-0.)
def test_pareto(self):
np.random.seed(self.seed)
actual = np.random.pareto(a=.123456789, size=(3, 2))
desired = np.array(
[[2.46852460439034849e+03, 1.41286880810518346e+03],
[5.28287797029485181e+07, 6.57720981047328785e+07],
[1.40840323350391515e+02, 1.98390255135251704e+05]])
# For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this
# matrix differs by 24 nulps. Discussion:
# http://mail.scipy.org/pipermail/numpy-discussion/2012-September/063801.html
# Consensus is that this is probably some gcc quirk that affects
# rounding but not in any important way, so we just use a looser
# tolerance on this test:
np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30)
def test_poisson(self):
np.random.seed(self.seed)
actual = np.random.poisson(lam=.123456789, size=(3, 2))
desired = np.array([[0, 0],
[1, 0],
[0, 0]])
assert_array_equal(actual, desired)
def test_poisson_exceptions(self):
lambig = np.iinfo('l').max
lamneg = -1
assert_raises(ValueError, np.random.poisson, lamneg)
assert_raises(ValueError, np.random.poisson, [lamneg]*10)
assert_raises(ValueError, np.random.poisson, lambig)
assert_raises(ValueError, np.random.poisson, [lambig]*10)
def test_power(self):
np.random.seed(self.seed)
actual = np.random.power(a=.123456789, size=(3, 2))
desired = np.array([[0.02048932883240791, 0.01424192241128213],
[0.38446073748535298, 0.39499689943484395],
[0.00177699707563439, 0.13115505880863756]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_rayleigh(self):
np.random.seed(self.seed)
actual = np.random.rayleigh(scale=10, size=(3, 2))
desired = np.array([[13.8882496494248393, 13.383318339044731],
[20.95413364294492098, 21.08285015800712614],
[11.06066537006854311, 17.35468505778271009]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_rayleigh_0(self):
assert_equal(np.random.rayleigh(scale=0), 0)
assert_raises(ValueError, np.random.rayleigh, scale=-0.)
def test_standard_cauchy(self):
np.random.seed(self.seed)
actual = np.random.standard_cauchy(size=(3, 2))
desired = np.array([[0.77127660196445336, -6.55601161955910605],
[0.93582023391158309, -2.07479293013759447],
[-4.74601644297011926, 0.18338989290760804]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_exponential(self):
np.random.seed(self.seed)
actual = np.random.standard_exponential(size=(3, 2))
desired = np.array([[0.96441739162374596, 0.89556604882105506],
[2.1953785836319808, 2.22243285392490542],
[0.6116915921431676, 1.50592546727413201]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_gamma(self):
np.random.seed(self.seed)
actual = np.random.standard_gamma(shape=3, size=(3, 2))
desired = np.array([[5.50841531318455058, 6.62953470301903103],
[5.93988484943779227, 2.31044849402133989],
[7.54838614231317084, 8.012756093271868]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_standard_gamma_0(self):
assert_equal(np.random.standard_gamma(shape=0), 0)
assert_raises(ValueError, np.random.standard_gamma, shape=-0.)
def test_standard_normal(self):
np.random.seed(self.seed)
actual = np.random.standard_normal(size=(3, 2))
desired = np.array([[1.34016345771863121, 1.73759122771936081],
[1.498988344300628, -0.2286433324536169],
[2.031033998682787, 2.17032494605655257]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_t(self):
np.random.seed(self.seed)
actual = np.random.standard_t(df=10, size=(3, 2))
desired = np.array([[0.97140611862659965, -0.08830486548450577],
[1.36311143689505321, -0.55317463909867071],
[-0.18473749069684214, 0.61181537341755321]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_triangular(self):
np.random.seed(self.seed)
actual = np.random.triangular(left=5.12, mode=10.23, right=20.34,
size=(3, 2))
desired = np.array([[12.68117178949215784, 12.4129206149193152],
[16.20131377335158263, 16.25692138747600524],
[11.20400690911820263, 14.4978144835829923]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_uniform(self):
np.random.seed(self.seed)
actual = np.random.uniform(low=1.23, high=10.54, size=(3, 2))
desired = np.array([[6.99097932346268003, 6.73801597444323974],
[9.50364421400426274, 9.53130618907631089],
[5.48995325769805476, 8.47493103280052118]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_uniform_range_bounds(self):
fmin = np.finfo('float').min
fmax = np.finfo('float').max
func = np.random.uniform
assert_raises(OverflowError, func, -np.inf, 0)
assert_raises(OverflowError, func, 0, np.inf)
assert_raises(OverflowError, func, fmin, fmax)
assert_raises(OverflowError, func, [-np.inf], [0])
assert_raises(OverflowError, func, [0], [np.inf])
# (fmax / 1e17) - fmin is within range, so this should not throw
# account for i386 extended precision DBL_MAX / 1e17 + DBL_MAX >
# DBL_MAX by increasing fmin a bit
np.random.uniform(low=np.nextafter(fmin, 1), high=fmax / 1e17)
def test_vonmises(self):
np.random.seed(self.seed)
actual = np.random.vonmises(mu=1.23, kappa=1.54, size=(3, 2))
desired = np.array([[2.28567572673902042, 2.89163838442285037],
[0.38198375564286025, 2.57638023113890746],
[1.19153771588353052, 1.83509849681825354]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_vonmises_small(self):
# check infinite loop, gh-4720
np.random.seed(self.seed)
r = np.random.vonmises(mu=0., kappa=1.1e-8, size=10**6)
np.testing.assert_(np.isfinite(r).all())
def test_wald(self):
np.random.seed(self.seed)
actual = np.random.wald(mean=1.23, scale=1.54, size=(3, 2))
desired = np.array([[3.82935265715889983, 5.13125249184285526],
[0.35045403618358717, 1.50832396872003538],
[0.24124319895843183, 0.22031101461955038]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_weibull(self):
np.random.seed(self.seed)
actual = np.random.weibull(a=1.23, size=(3, 2))
desired = np.array([[0.97097342648766727, 0.91422896443565516],
[1.89517770034962929, 1.91414357960479564],
[0.67057783752390987, 1.39494046635066793]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_weibull_0(self):
assert_equal(np.random.weibull(a=0), 0)
assert_raises(ValueError, np.random.weibull, a=-0.)
def test_zipf(self):
np.random.seed(self.seed)
actual = np.random.zipf(a=1.23, size=(3, 2))
desired = np.array([[66, 29],
[1, 1],
[3, 13]])
assert_array_equal(actual, desired)
class TestBroadcast(TestCase):
# tests that functions that broadcast behave
# correctly when presented with non-scalar arguments
def setUp(self):
self.seed = 123456789
def setSeed(self):
np.random.seed(self.seed)
# TODO: Include test for randint once it can broadcast
# Can steal the test written in PR #6938
def test_uniform(self):
low = [0]
high = [1]
uniform = np.random.uniform
desired = np.array([0.53283302478975902,
0.53413660089041659,
0.50955303552646702])
self.setSeed()
actual = uniform(low * 3, high)
assert_array_almost_equal(actual, desired, decimal=14)
self.setSeed()
actual = uniform(low, high * 3)
assert_array_almost_equal(actual, desired, decimal=14)
def test_normal(self):
loc = [0]
scale = [1]
bad_scale = [-1]
normal = np.random.normal
desired = np.array([2.2129019979039612,
2.1283977976520019,
1.8417114045748335])
self.setSeed()
actual = normal(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, normal, loc * 3, bad_scale)
self.setSeed()
actual = normal(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, normal, loc, bad_scale * 3)
def test_beta(self):
a = [1]
b = [2]
bad_a = [-1]
bad_b = [-2]
beta = np.random.beta
desired = np.array([0.19843558305989056,
0.075230336409423643,
0.24976865978980844])
self.setSeed()
actual = beta(a * 3, b)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, beta, bad_a * 3, b)
assert_raises(ValueError, beta, a * 3, bad_b)
self.setSeed()
actual = beta(a, b * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, beta, bad_a, b * 3)
assert_raises(ValueError, beta, a, bad_b * 3)
def test_exponential(self):
scale = [1]
bad_scale = [-1]
exponential = np.random.exponential
desired = np.array([0.76106853658845242,
0.76386282278691653,
0.71243813125891797])
self.setSeed()
actual = exponential(scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, exponential, bad_scale * 3)
def test_standard_gamma(self):
shape = [1]
bad_shape = [-1]
std_gamma = np.random.standard_gamma
desired = np.array([0.76106853658845242,
0.76386282278691653,
0.71243813125891797])
self.setSeed()
actual = std_gamma(shape * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, std_gamma, bad_shape * 3)
def test_gamma(self):
shape = [1]
scale = [2]
bad_shape = [-1]
bad_scale = [-2]
gamma = np.random.gamma
desired = np.array([1.5221370731769048,
1.5277256455738331,
1.4248762625178359])
self.setSeed()
actual = gamma(shape * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, bad_shape * 3, scale)
assert_raises(ValueError, gamma, shape * 3, bad_scale)
self.setSeed()
actual = gamma(shape, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, bad_shape, scale * 3)
assert_raises(ValueError, gamma, shape, bad_scale * 3)
def test_f(self):
dfnum = [1]
dfden = [2]
bad_dfnum = [-1]
bad_dfden = [-2]
f = np.random.f
desired = np.array([0.80038951638264799,
0.86768719635363512,
2.7251095168386801])
self.setSeed()
actual = f(dfnum * 3, dfden)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, bad_dfnum * 3, dfden)
assert_raises(ValueError, f, dfnum * 3, bad_dfden)
self.setSeed()
actual = f(dfnum, dfden * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, bad_dfnum, dfden * 3)
assert_raises(ValueError, f, dfnum, bad_dfden * 3)
def test_noncentral_f(self):
dfnum = [2]
dfden = [3]
nonc = [4]
bad_dfnum = [0]
bad_dfden = [-1]
bad_nonc = [-2]
nonc_f = np.random.noncentral_f
desired = np.array([9.1393943263705211,
13.025456344595602,
8.8018098359100545])
self.setSeed()
actual = nonc_f(dfnum * 3, dfden, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum * 3, dfden, nonc)
assert_raises(ValueError, nonc_f, dfnum * 3, bad_dfden, nonc)
assert_raises(ValueError, nonc_f, dfnum * 3, dfden, bad_nonc)
self.setSeed()
actual = nonc_f(dfnum, dfden * 3, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum, dfden * 3, nonc)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden * 3, nonc)
assert_raises(ValueError, nonc_f, dfnum, dfden * 3, bad_nonc)
self.setSeed()
actual = nonc_f(dfnum, dfden, nonc * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3)
def test_chisquare(self):
df = [1]
bad_df = [-1]
chisquare = np.random.chisquare
desired = np.array([0.57022801133088286,
0.51947702108840776,
0.1320969254923558])
self.setSeed()
actual = chisquare(df * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, chisquare, bad_df * 3)
def test_noncentral_chisquare(self):
df = [1]
nonc = [2]
bad_df = [-1]
bad_nonc = [-2]
nonc_chi = np.random.noncentral_chisquare
desired = np.array([9.0015599467913763,
4.5804135049718742,
6.0872302432834564])
self.setSeed()
actual = nonc_chi(df * 3, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, bad_df * 3, nonc)
assert_raises(ValueError, nonc_chi, df * 3, bad_nonc)
self.setSeed()
actual = nonc_chi(df, nonc * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, bad_df, nonc * 3)
assert_raises(ValueError, nonc_chi, df, bad_nonc * 3)
def test_standard_t(self):
df = [1]
bad_df = [-1]
t = np.random.standard_t
desired = np.array([3.0702872575217643,
5.8560725167361607,
1.0274791436474273])
self.setSeed()
actual = t(df * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, t, bad_df * 3)
def test_vonmises(self):
mu = [2]
kappa = [1]
bad_kappa = [-1]
vonmises = np.random.vonmises
desired = np.array([2.9883443664201312,
-2.7064099483995943,
-1.8672476700665914])
self.setSeed()
actual = vonmises(mu * 3, kappa)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, vonmises, mu * 3, bad_kappa)
self.setSeed()
actual = vonmises(mu, kappa * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, vonmises, mu, bad_kappa * 3)
def test_pareto(self):
a = [1]
bad_a = [-1]
pareto = np.random.pareto
desired = np.array([1.1405622680198362,
1.1465519762044529,
1.0389564467453547])
self.setSeed()
actual = pareto(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, pareto, bad_a * 3)
def test_weibull(self):
a = [1]
bad_a = [-1]
weibull = np.random.weibull
desired = np.array([0.76106853658845242,
0.76386282278691653,
0.71243813125891797])
self.setSeed()
actual = weibull(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, weibull, bad_a * 3)
def test_power(self):
a = [1]
bad_a = [-1]
power = np.random.power
desired = np.array([0.53283302478975902,
0.53413660089041659,
0.50955303552646702])
self.setSeed()
actual = power(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, power, bad_a * 3)
def test_laplace(self):
loc = [0]
scale = [1]
bad_scale = [-1]
laplace = np.random.laplace
desired = np.array([0.067921356028507157,
0.070715642226971326,
0.019290950698972624])
self.setSeed()
actual = laplace(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, laplace, loc * 3, bad_scale)
self.setSeed()
actual = laplace(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, laplace, loc, bad_scale * 3)
def test_gumbel(self):
loc = [0]
scale = [1]
bad_scale = [-1]
gumbel = np.random.gumbel
desired = np.array([0.2730318639556768,
0.26936705726291116,
0.33906220393037939])
self.setSeed()
actual = gumbel(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gumbel, loc * 3, bad_scale)
self.setSeed()
actual = gumbel(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gumbel, loc, bad_scale * 3)
def test_logistic(self):
loc = [0]
scale = [1]
bad_scale = [-1]
logistic = np.random.logistic
desired = np.array([0.13152135837586171,
0.13675915696285773,
0.038216792802833396])
self.setSeed()
actual = logistic(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, logistic, loc * 3, bad_scale)
self.setSeed()
actual = logistic(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, logistic, loc, bad_scale * 3)
def test_lognormal(self):
mean = [0]
sigma = [1]
bad_sigma = [-1]
lognormal = np.random.lognormal
desired = np.array([9.1422086044848427,
8.4013952870126261,
6.3073234116578671])
self.setSeed()
actual = lognormal(mean * 3, sigma)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, lognormal, mean * 3, bad_sigma)
self.setSeed()
actual = lognormal(mean, sigma * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, lognormal, mean, bad_sigma * 3)
def test_rayleigh(self):
scale = [1]
bad_scale = [-1]
rayleigh = np.random.rayleigh
desired = np.array([1.2337491937897689,
1.2360119924878694,
1.1936818095781789])
self.setSeed()
actual = rayleigh(scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, rayleigh, bad_scale * 3)
def test_wald(self):
mean = [0.5]
scale = [1]
bad_mean = [0]
bad_scale = [-2]
wald = np.random.wald
desired = np.array([0.11873681120271318,
0.12450084820795027,
0.9096122728408238])
self.setSeed()
actual = wald(mean * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, wald, bad_mean * 3, scale)
assert_raises(ValueError, wald, mean * 3, bad_scale)
self.setSeed()
actual = wald(mean, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, wald, bad_mean, scale * 3)
assert_raises(ValueError, wald, mean, bad_scale * 3)
def test_triangular(self):
left = [1]
right = [3]
mode = [2]
bad_left_one = [3]
bad_mode_one = [4]
bad_left_two, bad_mode_two = right * 2
triangular = np.random.triangular
desired = np.array([2.03339048710429,
2.0347400359389356,
2.0095991069536208])
self.setSeed()
actual = triangular(left * 3, mode, right)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one * 3, mode, right)
assert_raises(ValueError, triangular, left * 3, bad_mode_one, right)
assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two, right)
self.setSeed()
actual = triangular(left, mode * 3, right)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, mode * 3, right)
assert_raises(ValueError, triangular, left, bad_mode_one * 3, right)
assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3, right)
self.setSeed()
actual = triangular(left, mode, right * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, mode, right * 3)
assert_raises(ValueError, triangular, left, bad_mode_one, right * 3)
assert_raises(ValueError, triangular, bad_left_two, bad_mode_two, right * 3)
def test_binomial(self):
n = [1]
p = [0.5]
bad_n = [-1]
bad_p_one = [-1]
bad_p_two = [1.5]
binom = np.random.binomial
desired = np.array([1, 1, 1])
self.setSeed()
actual = binom(n * 3, p)
assert_array_equal(actual, desired)
assert_raises(ValueError, binom, bad_n * 3, p)
assert_raises(ValueError, binom, n * 3, bad_p_one)
assert_raises(ValueError, binom, n * 3, bad_p_two)
self.setSeed()
actual = binom(n, p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, binom, bad_n, p * 3)
assert_raises(ValueError, binom, n, bad_p_one * 3)
assert_raises(ValueError, binom, n, bad_p_two * 3)
def test_negative_binomial(self):
n = [1]
p = [0.5]
bad_n = [-1]
bad_p_one = [-1]
bad_p_two = [1.5]
neg_binom = np.random.negative_binomial
desired = np.array([1, 0, 1])
self.setSeed()
actual = neg_binom(n * 3, p)
assert_array_equal(actual, desired)
assert_raises(ValueError, neg_binom, bad_n * 3, p)
assert_raises(ValueError, neg_binom, n * 3, bad_p_one)
assert_raises(ValueError, neg_binom, n * 3, bad_p_two)
self.setSeed()
actual = neg_binom(n, p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, neg_binom, bad_n, p * 3)
assert_raises(ValueError, neg_binom, n, bad_p_one * 3)
assert_raises(ValueError, neg_binom, n, bad_p_two * 3)
def test_poisson(self):
max_lam = np.random.RandomState().poisson_lam_max
lam = [1]
bad_lam_one = [-1]
bad_lam_two = [max_lam * 2]
poisson = np.random.poisson
desired = np.array([1, 1, 0])
self.setSeed()
actual = poisson(lam * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, poisson, bad_lam_one * 3)
assert_raises(ValueError, poisson, bad_lam_two * 3)
def test_zipf(self):
a = [2]
bad_a = [0]
zipf = np.random.zipf
desired = np.array([2, 2, 1])
self.setSeed()
actual = zipf(a * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, zipf, bad_a * 3)
def test_geometric(self):
p = [0.5]
bad_p_one = [-1]
bad_p_two = [1.5]
geom = np.random.geometric
desired = np.array([2, 2, 2])
self.setSeed()
actual = geom(p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, geom, bad_p_one * 3)
assert_raises(ValueError, geom, bad_p_two * 3)
def test_hypergeometric(self):
ngood = [1]
nbad = [2]
nsample = [2]
bad_ngood = [-1]
bad_nbad = [-2]
bad_nsample_one = [0]
bad_nsample_two = [4]
hypergeom = np.random.hypergeometric
desired = np.array([1, 1, 1])
self.setSeed()
actual = hypergeom(ngood * 3, nbad, nsample)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood * 3, nbad, nsample)
assert_raises(ValueError, hypergeom, ngood * 3, bad_nbad, nsample)
assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_one)
assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_two)
self.setSeed()
actual = hypergeom(ngood, nbad * 3, nsample)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood, nbad * 3, nsample)
assert_raises(ValueError, hypergeom, ngood, bad_nbad * 3, nsample)
assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_one)
assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_two)
self.setSeed()
actual = hypergeom(ngood, nbad, nsample * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood, nbad, nsample * 3)
assert_raises(ValueError, hypergeom, ngood, bad_nbad, nsample * 3)
assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_one * 3)
assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_two * 3)
def test_logseries(self):
p = [0.5]
bad_p_one = [2]
bad_p_two = [-1]
logseries = np.random.logseries
desired = np.array([1, 1, 1])
self.setSeed()
actual = logseries(p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, logseries, bad_p_one * 3)
assert_raises(ValueError, logseries, bad_p_two * 3)
class TestThread(TestCase):
# make sure each state produces the same sequence even in threads
def setUp(self):
self.seeds = range(4)
def check_function(self, function, sz):
from threading import Thread
out1 = np.empty((len(self.seeds),) + sz)
out2 = np.empty((len(self.seeds),) + sz)
# threaded generation
t = [Thread(target=function, args=(np.random.RandomState(s), o))
for s, o in zip(self.seeds, out1)]
[x.start() for x in t]
[x.join() for x in t]
# the same serial
for s, o in zip(self.seeds, out2):
function(np.random.RandomState(s), o)
# these platforms change x87 fpu precision mode in threads
if np.intp().dtype.itemsize == 4 and sys.platform == "win32":
assert_array_almost_equal(out1, out2)
else:
assert_array_equal(out1, out2)
def test_normal(self):
def gen_random(state, out):
out[...] = state.normal(size=10000)
self.check_function(gen_random, sz=(10000,))
def test_exp(self):
def gen_random(state, out):
out[...] = state.exponential(scale=np.ones((100, 1000)))
self.check_function(gen_random, sz=(100, 1000))
def test_multinomial(self):
def gen_random(state, out):
out[...] = state.multinomial(10, [1/6.]*6, size=10000)
self.check_function(gen_random, sz=(10000, 6))
# See Issue #4263
class TestSingleEltArrayInput(TestCase):
def setUp(self):
self.argOne = np.array([2])
self.argTwo = np.array([3])
self.argThree = np.array([4])
self.tgtShape = (1,)
def test_one_arg_funcs(self):
funcs = (np.random.exponential, np.random.standard_gamma,
np.random.chisquare, np.random.standard_t,
np.random.pareto, np.random.weibull,
np.random.power, np.random.rayleigh,
np.random.poisson, np.random.zipf,
np.random.geometric, np.random.logseries)
probfuncs = (np.random.geometric, np.random.logseries)
for func in funcs:
if func in probfuncs: # p < 1.0
out = func(np.array([0.5]))
else:
out = func(self.argOne)
self.assertEqual(out.shape, self.tgtShape)
def test_two_arg_funcs(self):
funcs = (np.random.uniform, np.random.normal,
np.random.beta, np.random.gamma,
np.random.f, np.random.noncentral_chisquare,
np.random.vonmises, np.random.laplace,
np.random.gumbel, np.random.logistic,
np.random.lognormal, np.random.wald,
np.random.binomial, np.random.negative_binomial)
probfuncs = (np.random.binomial, np.random.negative_binomial)
for func in funcs:
if func in probfuncs: # p <= 1
argTwo = np.array([0.5])
else:
argTwo = self.argTwo
out = func(self.argOne, argTwo)
self.assertEqual(out.shape, self.tgtShape)
out = func(self.argOne[0], argTwo)
self.assertEqual(out.shape, self.tgtShape)
out = func(self.argOne, argTwo[0])
self.assertEqual(out.shape, self.tgtShape)
# TODO: Uncomment once randint can broadcast arguments
# def test_randint(self):
# itype = [np.bool, np.int8, np.uint8, np.int16, np.uint16,
# np.int32, np.uint32, np.int64, np.uint64]
# func = np.random.randint
# high = np.array([1])
# low = np.array([0])
#
# for dt in itype:
# out = func(low, high, dtype=dt)
# self.assert_equal(out.shape, self.tgtShape)
#
# out = func(low[0], high, dtype=dt)
# self.assert_equal(out.shape, self.tgtShape)
#
# out = func(low, high[0], dtype=dt)
# self.assert_equal(out.shape, self.tgtShape)
def test_three_arg_funcs(self):
funcs = [np.random.noncentral_f, np.random.triangular,
np.random.hypergeometric]
for func in funcs:
out = func(self.argOne, self.argTwo, self.argThree)
self.assertEqual(out.shape, self.tgtShape)
out = func(self.argOne[0], self.argTwo, self.argThree)
self.assertEqual(out.shape, self.tgtShape)
out = func(self.argOne, self.argTwo[0], self.argThree)
self.assertEqual(out.shape, self.tgtShape)
if __name__ == "__main__":
run_module_suite()
| asnorkin/sentiment_analysis | site/lib/python2.7/site-packages/numpy/random/tests/test_random.py | Python | mit | 61,831 | [
"Gaussian"
] | dead70b0a5a0cfb06d6b30f1e533b623b2a37bddd99bb36099ae1af3b0f57c6d |
from datetime import datetime
import unittest
import pandas as pd
from espresso import convert
class ConvertTest(unittest.TestCase):
def setUp(self):
self.df = pd.DataFrame([
{'datetime': datetime(2000,1,1,0,0), 'value': 0.0},
{'datetime': datetime(2000,1,1,2,0), 'value': 1.0},
{'datetime': datetime(2000,1,2,2,0), 'value': 1.0}
])
self.df['datetime'] = pd.to_datetime(self.df.datetime)
def test_aggregate_dataframe(self):
df = convert.aggregate_dataframe(self.df)
self.assertEqual(len(df), 2) | jsheedy/affogato | espresso/test/test_convert.py | Python | unlicense | 587 | [
"ESPResSo"
] | e87b3482e7290b4403802f9bbd43951ea9a80799f2efc9f654e51aff91c28e32 |
"""
========================
Decimating scalp surface
========================
This can be useful to reduce computation time when
using a cloud of digitization points for coordinate alignment
instead of e.g. EEG-cap positions.
"""
print(__doc__)
# Authors: Denis Engemann <denis.engemann@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
import mne
from mne.surface import decimate_surface
path = mne.datasets.sample.data_path()
surf = mne.read_bem_surfaces(path + '/subjects/sample/bem/sample-head.fif')[0]
points, triangles = surf['rr'], surf['tris']
# reduce to 30000 meshes equaling ${SUBJECT}-head-medium.fif output from
# mne_make_scalp_surfaces.py and mne_make_scalp_surfaces
points_dec, triangles_dec = decimate_surface(points, triangles,
n_triangles=30000)
try:
from enthought.mayavi import mlab
except:
from mayavi import mlab
head_col = (0.95, 0.83, 0.83) # light pink
p, t = points_dec, triangles_dec
mlab.triangular_mesh(p[:, 0], p[:, 1], p[:, 2], t, color=head_col)
| effigies/mne-python | examples/plot_decimate_head_surface.py | Python | bsd-3-clause | 1,105 | [
"Mayavi"
] | 3dcb18f09f4fb03935822cf8debc758b7cb45130278d7bb6f3f4dfcf27e8e38d |
from gaussfitter import gaussfit
import numpy as np
from util import utils
import matplotlib.pyplot as plt
import sys
def aperture(startpx,startpy,radius=3):
r = radius
length = 2*r
height = length
allx = xrange(startpx-int(np.ceil(length/2.0)),startpx+int(np.floor(length/2.0))+1)
ally = xrange(startpy-int(np.ceil(height/2.0)),startpy+int(np.floor(height/2.0))+1)
pixx = []
pixy = []
mask=np.ones((46,44))
for x in allx:
for y in ally:
if (np.abs(x-startpx))**2+(np.abs(y-startpy))**2 <= (r)**2 and 0 <= y and y < 46 and 0 <= x and x < 44:
mask[y,x]=0.
return mask
def gaussian(height, center_x, center_y, width_x, width_y,offset):
"""Returns a gaussian function with the given parameters"""
width_x = float(width_x)
width_y = float(width_y)
return lambda x,y: height*np.exp(-(((center_x-x)/width_x)**2+((center_y-y)/width_y)**2)/2)+offset
stackDict = np.load('nlttImageStackBlue15.npz')
stack = stackDict['stack']
if len(sys.argv) == 1:
print 'Useage: ',sys.argv[0],' iFrame'
print """
set0 Frames 0-179
"""
exit(1)
iFrame = int(sys.argv[1])
frame = stack[:,:,iFrame]
# plt.hist(np.ravel(frame),bins=100,range=(0,5000))
# plt.show()
nanMask = np.isnan(frame)
frame[nanMask] = 0
frame = np.ma.masked_array(frame,mask=nanMask)
utils.plotArray(frame,cbar=True)
| bmazin/ARCONS-pipeline | examples/Pal2012-nltt/view_frameBlue.py | Python | gpl-2.0 | 1,390 | [
"Gaussian"
] | c809e6d7503aa893d8987c8c3f1ceab334df04db5afc6c251b53cb4f13a69b0c |
import os
import time
import numpy as np
import OpenGL.GL as gl
import OpenGL.GLU as glu
import OpenGL.GLUT as glut
import Image
import PIL.ImageOps as iops
from fos.core.utils import list_indices as lind
from os.path import join as pjoin
from dipy.core import track_metrics as tm
import fos.core.collision as cll
data_path = pjoin(os.path.dirname(__file__), 'data')
#=======================================================
class Tracks3D(object):
def __init__(self,fname,colormap=None, line_width=3.):
self.position = (0,0,0)
self.fname = fname
self.manycolors = True
self.bbox = None
self.list_index = None
self.affine = None
self.data = None
self.list_index = None
self.rot_angle = 0
self.colormap = None
self.ambient = [0.0, 0.0, 0.2, .1]
self.diffuse = [0.0, 0.0, 0.7, .1]
self.specular = [0.2, 0.2, 0.7, .1]
self.shininess = 50.
self.emission = [0.2, 0.2, 0.2, 0]
self.min = None
self.max = None
self.mean = None
self.min_length = 20.
self.angle = 0.
self.angular_speed = .5
self.line_width = line_width
self.opacity = 1.
self.near_pick = None
self.far_pick = None
self.near_pick_prev = None
self.far_pick_prev = None
self.picked_track = None
self.pick_color = [1,1,0]
self.brain_color = [1,0,0]
self.yellow_indices = None
self.dummy_data = False
self.data_subset = None
self.picking_example = False
def init(self):
import dipy.io.trackvis as tv
lines,hdr = tv.read(self.fname)
ras = tv.aff_from_hdr(hdr)
self.affine=ras
tracks = [l[0] for l in lines]
if self.yellow_indices != None :
tracks = [t for t in tracks if tm.length(t) > 20]
print 'tracks loaded'
#self.data = [100*np.array([[0,0,0],[1,0,0],[2,0,0]]).astype(np.float32) ,100*np.array([[0,1,0],[0,2,0],[0,3,0]]).astype(np.float32)]#tracks[:20000]
if self.dummy_data:
self.data = [100*np.array([[0,0,0],[1,0,0],[2,0,0]]).astype(np.float32) ,100*np.array([[0,1,0],[0,2,0],[0,3,0]]).astype(np.float32)]
if self.data_subset!=None:
self.data = tracks[self.data_subset[0]:self.data_subset[1]]
else:
self.data = tracks
data_stats = np.concatenate(tracks)
self.min=np.min(data_stats,axis=0)
self.max=np.max(data_stats,axis=0)
self.mean=np.mean(data_stats,axis=0)
del data_stats
del lines
self.multiple_colors()
#self.material_color()
def display(self):
if self.near_pick!= None:
#print self.near_pick
if np.sum(np.equal(self.near_pick, self.near_pick_prev))< 3:
self.process_picking(self.near_pick, self.far_pick)
self.near_pick_prev = self.near_pick
self.far_pick_prev = self.far_pick
gl.glPushMatrix()
x,y,z=self.position
if self.picking_example!=True:
gl.glRotatef(-90,1,0,0)
gl.glRotatef(self.angle,0,0,1)
gl.glTranslatef(x,y,z)
if self.angle < 360.:
self.angle+=self.angular_speed
else:
self.angle=0.
#gl.glCullFace(gl.GL_FRONT)
gl.glCallList(self.list_index)
#gl.glCullFace(gl.GL_BACK)
#gl.glCallList(self.list_index)
if self.picked_track != None:
self.display_one_track(self.picked_track)
if self.yellow_indices != None:
#print len(self.data)
#print self.data[0].shape
for i in self.yellow_indices:
#print len(self.data[i])
self.display_one_track(i)
#print
#print
#print
gl.glPopMatrix()
gl.glFinish()
def process_picking(self,near,far):
print('process picking')
min_dist=[cll.mindistance_segment2track(near,far,xyz) for xyz in self.data]
min_dist=np.array(min_dist)
#print min_dist
self.picked_track=min_dist.argmin()
print self.picked_track
def display_one_track(self,track_index,color4=np.array([1,1,0,1],dtype=np.float32)):
gl.glPushMatrix()
gl.glDisable(gl.GL_LIGHTING)
gl.glEnable(gl.GL_LINE_SMOOTH)
gl.glEnable(gl.GL_BLEND)
gl.glBlendFunc(gl.GL_SRC_ALPHA,gl.GL_ONE_MINUS_SRC_ALPHA)
gl.glHint(gl.GL_LINE_SMOOTH_HINT,gl.GL_DONT_CARE)
gl.glLineWidth(7.)
gl.glEnableClientState(gl.GL_VERTEX_ARRAY)
gl.glColor4fv(color4)
#gl.glColor3fv(color3)
d=self.data[track_index].astype(np.float32)
gl.glVertexPointerf(d)
gl.glDrawArrays(gl.GL_LINE_STRIP, 0, len(d))
gl.glDisableClientState(gl.GL_VERTEX_ARRAY)
gl.glEnable(gl.GL_LIGHTING)
gl.glPopMatrix()
def multiple_colors(self):
from dipy.viz.colormaps import boys2rgb
from dipy.core.track_metrics import mean_orientation, length, downsample
colors=np.random.rand(1,3).astype(np.float32)
print colors
self.list_index = gl.glGenLists(1)
gl.glNewList( self.list_index,gl.GL_COMPILE)
gl.glPushMatrix()
gl.glDisable(gl.GL_LIGHTING)
gl.glEnable(gl.GL_LINE_SMOOTH)
gl.glEnable(gl.GL_BLEND)
gl.glBlendFunc(gl.GL_SRC_ALPHA,gl.GL_ONE_MINUS_SRC_ALPHA)
#gl.glBlendFunc(gl.GL_SRC_ALPHA_SATURATE,gl.GL_ONE_MINUS_SRC_ALPHA)
#gl.glBlendFunc(gl.GL_SRC_ALPHA,gl.GL_ONE)
gl.glHint(gl.GL_LINE_SMOOTH_HINT,gl.GL_DONT_CARE)
#gl.glHint(gl.GL_LINE_SMOOTH_HINT,gl.GL_NICEST)
gl.glLineWidth(self.line_width)
#gl.glDepthMask(gl.GL_FALSE)
gl.glEnableClientState(gl.GL_VERTEX_ARRAY)
for d in self.data:
if length(d)> self.min_length:
#mo=mean_orientation(d)
if self.manycolors:
ds=downsample(d,6)
mo=ds[3]-ds[2]
mo=mo/np.sqrt(np.sum(mo**2))
mo.shape=(1,3)
color=boys2rgb(mo)
color4=np.array([color[0][0],color[0][1],color[0][2],self.opacity],np.float32)
gl.glColor4fv(color4)
else:
color4=np.array([self.brain_color[0],self.brain_color[1],\
self.brain_color[2],self.opacity],np.float32)
gl.glColor4fv(color4)
gl.glVertexPointerf(d)
gl.glDrawArrays(gl.GL_LINE_STRIP, 0, len(d))
gl.glDisableClientState(gl.GL_VERTEX_ARRAY)
#gl.glDisable(gl.GL_BLEND)
gl.glEnable(gl.GL_LIGHTING)
gl.glPopMatrix()
gl.glEndList()
def material_color(self):
self.list_index = gl.glGenLists(1)
gl.glNewList( self.list_index,gl.GL_COMPILE)
gl.glPushMatrix()
gl.glMaterialfv( gl.GL_FRONT, gl.GL_AMBIENT, self.ambient )
gl.glMaterialfv( gl.GL_FRONT, gl.GL_DIFFUSE, self.diffuse )
gl.glMaterialfv( gl.GL_FRONT, gl.GL_SPECULAR, self.specular )
gl.glMaterialf( gl.GL_FRONT, gl.GL_SHININESS, self.shininess )
gl.glMaterialfv(gl.GL_FRONT, gl.GL_EMISSION, self.emission)
gl.glEnableClientState(gl.GL_VERTEX_ARRAY)
for d in self.data:
gl.glVertexPointerd(d)
gl.glDrawArrays(gl.GL_LINE_STRIP, 0, len(d))
gl.glDisableClientState(gl.GL_VERTEX_ARRAY)
gl.glPopMatrix()
gl.glEndList()
#=========================================================================
class Image2D(object):
def __init__(self,fname):
self.position = [0,0,0]
self.fname = fname
#self.fname = pjoin(os.path.dirname(__file__), 'tests/data/small_latex1.png')
print self.fname
#'/home/eg01/Desktop/small_latex1.png'
self.size = None
self.win_size = None
self.data = None
self.alpha = 255 #None # 0 - 255
self.rm_blackish = True
pass
def init(self):
gl.glPixelStorei(gl.GL_UNPACK_ALIGNMENT, 1)
#x,y,width,height = gl.glGetDoublev(gl.GL_VIEWPORT)
#width,height = int(width),int(height)
img = Image.open(self.fname)
img = img.transpose(Image.FLIP_TOP_BOTTOM)
self.size = img.size
rgbi=iops.invert(img.convert('RGB'))
rgbai=rgbi.convert('RGBA')
if self.alpha != None:
rgbai.putalpha(self.alpha)
if self.rm_blackish:
for x,y in np.ndindex(self.size[0],self.size[1]):
r,g,b,a=rgbai.getpixel((x,y))
if r<50 and g<50 and b < 50:
rgbai.putpixel((x,y),(0,0,0,0))
#for x,y in
self.data=rgbai.tostring()
x,y,width,height = gl.glGetDoublev(gl.GL_VIEWPORT)
width,height = int(width),int(height)
self.win_size=(width,height)
print self.win_size
def display(self):
#gl.glRasterPos2i(100,0)
x,y,width,height = gl.glGetDoublev(gl.GL_VIEWPORT)
width,height = int(width),int(height)
self.win_size=(width,height)
#print self.win_size
gl.glWindowPos3iv(self.position)
w,h=self.size
gl.glEnable(gl.GL_BLEND)
gl.glBlendFunc(gl.GL_SRC_ALPHA,gl.GL_ONE_MINUS_SRC_ALPHA)
#gl.glBlendFunc(gl.GL_ONE,gl.GL_DST_COLOR)
gl.glDrawPixels(w, h,gl.GL_RGBA, gl.GL_UNSIGNED_BYTE, self.data)
gl.glDisable(gl.GL_BLEND)
class BrainSurface(object):
def __init__(self):
self.fname='/home/eg01/Data_Backup/Data/Adam/multiple_transp_volumes/freesurfer_trich/rh.pial.vtk'
#self.fname='/home/eg309/Desktop/rh.pial.vtk'
self.position = [0.0, 0.0, 0.0]
self.scale = None #[100., 50., 20.]
'''
self.ambient = [0.0, 0.0, 0.2, 1.]
self.diffuse = [0.0, 0.0, 0.7, 1.]
self.specular = [0.2, 0.2, 0.7, 1.]
self.shininess = 50.
self.emission = [0.2, 0.2, 0.2, 0]
'''
'''
self.ambient = [0.0, 0.0, 0.2, 1.]
self.diffuse = [0.0, 0.0, 0.5, 1.]
self.specular = [0.2, 0.2, 0.2, 1.]
self.shininess = 10.
self.emission = [0., 0., 0.2, 0]
'''
self.ambient = [0.55, 0.44, 0.36, 1.]
self.diffuse = [0.55, 0.44, 0.36, 1.]
self.specular = [0.1, 0.1, 0.6, 1.]
self.shininess = 5.
self.emission = [0.1, 0.1, 0.1, 1.]
self.list_index = None
self.name_index = None
self.pts = None
self.polys = None
def load_polydata(self):
f=open(self.fname,'r')
lines=f.readlines()
taglines=[l.startswith('POINTS') or l.startswith('POLYGONS') for l in lines]
pts_polys_tags=[i for i in lind(taglines,True)]
if len(pts_polys_tags)<2:
NameError('This must be the wrong file no polydata in.')
#read points
pts_index = pts_polys_tags[0]
pts_tag = lines[pts_index].split()
pts_no = int(pts_tag[1])
pts=lines[pts_index+1:pts_index+pts_no+1]
self.pts=np.array([np.array(p.split(),dtype=np.float32) for p in pts])
#read triangles
polys_index = pts_polys_tags[1]
#print polys_index
polys_tag = lines[polys_index].split()
polys_no = int(polys_tag[1])
polys=lines[polys_index+1:polys_index+polys_no+1]
self.polys=np.array([np.array(pl.split(),dtype=np.int) for pl in polys])[:,1:]
def init(self):
self.load_polydata()
n=gl.glNormal3fv
v=gl.glVertex3fv
p=self.pts
print 'adding triangles'
time1=time.clock()
#print pts.shape, polys.shape
self.list_index = gl.glGenLists(1)
gl.glNewList( self.list_index,gl.GL_COMPILE)
gl.glPushMatrix()
gl.glMaterialfv( gl.GL_FRONT, gl.GL_AMBIENT, self.ambient )
gl.glMaterialfv( gl.GL_FRONT, gl.GL_DIFFUSE, self.diffuse )
gl.glMaterialfv( gl.GL_FRONT, gl.GL_SPECULAR, self.specular )
gl.glMaterialf( gl.GL_FRONT, gl.GL_SHININESS, self.shininess )
gl.glMaterialfv(gl.GL_FRONT, gl.GL_EMISSION, self.emission)
gl.glEnable(gl.GL_NORMALIZE)
gl.glBegin(gl.GL_TRIANGLES)
for l in self.polys[:50000]:
n(p[l[0]])
v(p[l[0]])
n(p[l[1]])
v(p[l[1]])
n(p[l[2]])
v(p[l[2]])
gl.glEnd()
gl.glPopMatrix()
gl.glEndList()
print 'triangles ready in', time.clock()-time1, 'secs'
def display(self,mode=gl.GL_RENDER):
gl.glPushMatrix()
#gl.glLoadIdentity()
#x,y,z=self.position
#gl.glTranslatef(x,y,z)
#gl.glRotatef(30*np.random.rand(1)[0],0.,1.,0.)
gl.glCallList(self.list_index)
gl.glPopMatrix()
def load_polydata_using_mayavi(self):
try:
import enthought.mayavi.tools.sources as sources
from enthought.mayavi import mlab
except:
ImportError('Sources module from enthought.mayavi is missing')
src=sources.open(self.rname)
surf=mlab.pipeline.surface(src)
pd=src.outputs[0]
pts=pd.points.to_array()
polys=pd.polys.to_array()
lpol=len(polys)/4
polys=polys.reshape(lpol,4)
return pts,polys
#===============================================================
class DummyPlane(object):
def __init__(self):
self.position = (0.,0.,0.)
def init(self):
self.list_index = gl.glGenLists(1)
gl.glNewList(self.list_index, gl.GL_COMPILE)
gl.glPushMatrix()
d=np.array([[-100,100,0],[100,100,0],[100,-100,0],[-100,-100,0]]).astype(np.float32)
indices = np.array([0,1,2,3]).astype(np.ubyte)
gl.glDisable(gl.GL_LIGHTING)
gl.glEnableClientState(gl.GL_VERTEX_ARRAY)
gl.glColor3fv([1.,0.,0.])
gl.glVertexPointerd(d)
gl.glDrawElements(gl.GL_QUADS, 4, gl.GL_UNSIGNED_BYTE, indices)
gl.glDisableClientState(gl.GL_VERTEX_ARRAY)
gl.glEnable(gl.GL_LIGHTING)
gl.glPopMatrix()
gl.glEndList()
def display(self):
gl.glPushMatrix()
#gl.glLoadIdentity()
x,y,z=self.position
gl.glTranslatef(x,y,z)
gl.glCallList(self.list_index)
gl.glPopMatrix()
#===================================================================
class Collection(object):
def __init__(self):
self.position = [0.0, 0.0, -350.0]
self.scale = None #[100., 50., 20.]
self.ambient = [0.0, 0.0, 0.2, 1.]
self.diffuse = [0.0, 0.0, 0.7, 1.]
self.specular = [0.2, 0.2, 0.7, 1.]
self.shininess = 50.
self.emission = [0.2, 0.2, 0.2, 0]
self.list_index = None
self.name_index = None
self.gridx = None
self.gridy = None
self.gridz = None
self.is3dgrid = True
def init(self):
self.list_index = gl.glGenLists(1)
print self.list_index
gl.glNewList( self.list_index,gl.GL_COMPILE)
gl.glPushMatrix()
gl.glMaterialfv( gl.GL_FRONT, gl.GL_AMBIENT, self.ambient )
gl.glMaterialfv( gl.GL_FRONT, gl.GL_DIFFUSE, self.diffuse )
gl.glMaterialfv( gl.GL_FRONT, gl.GL_SPECULAR, self.specular )
gl.glMaterialf( gl.GL_FRONT, gl.GL_SHININESS, self.shininess )
gl.glMaterialfv(gl.GL_FRONT, gl.GL_EMISSION, self.emission)
#x,y,z = self.scale
#gl.glScalef(x,y,z)
glut.glutSolidCube(50.0)
#glut.glutSolidTeapot(20.0)
gl.glPopMatrix()
gl.glEndList()
if self.is3dgrid:
x,y,z=np.mgrid[-200:200:5j,-200:200:5j, -200:200:5j]
self.gridz=z.ravel()
else:
x,y=np.mgrid[-200:200:5j,-200:200:5j]
self.gridx=x.ravel()
self.gridy=y.ravel()
print self.list_index
def glyph(self,x,y,z):
gl.glPushMatrix()
#gl.glLoadIdentity()
#x,y,z=self.position
gl.glTranslatef(x,y,z)
gl.glRotatef(30*np.random.rand(1)[0],0.,1.,0.)
gl.glCallList(self.list_index)
gl.glPopMatrix()
def display(self,mode=gl.GL_RENDER):
gl.glInitNames()
gl.glPushName(0)
for i in range(len(self.gridx)):
x=self.gridx[i]
y=self.gridy[i]
if self.is3dgrid:
z=self.gridz[i]
else:
z=0
if mode == gl.GL_SELECT:
gl.glLoadName(i+1)
self.glyph(x+self.position[0],y+self.position[1],z+self.position[2])
| fos/fos-legacy | scratch/very_scratch/primitives.py | Python | bsd-3-clause | 18,231 | [
"Mayavi",
"VTK"
] | 4fec180d9e3baab67ff891ac5bb3c2653dfe30eb5d2caf945ba505e368b11711 |
# .. coding: utf-8
# $Id: __init__.py 8676 2021-04-08 16:36:09Z milde $
# Author: Engelbert Gruber, Günter Milde
# Maintainer: docutils-develop@lists.sourceforge.net
# Copyright: This module has been placed in the public domain.
"""LaTeX2e document tree Writer."""
__docformat__ = 'reStructuredText'
# code contributions from several people included, thanks to all.
# some named: David Abrahams, Julien Letessier, Lele Gaifax, and others.
#
# convention deactivate code by two # i.e. ##.
import os
import re
import string
import sys
if sys.version_info < (3, 0):
from io import open
from urllib import url2pathname
else:
from urllib.request import url2pathname
try:
import roman
except ImportError:
import docutils.utils.roman as roman
import docutils
from docutils import frontend, nodes, languages, writers, utils
from docutils.utils.error_reporting import SafeString
from docutils.transforms import writer_aux
from docutils.utils.math import pick_math_environment, unichar2tex
if sys.version_info >= (3, 0):
unicode = str # noqa
class Writer(writers.Writer):
supported = ('latex', 'latex2e')
"""Formats this writer supports."""
default_template = 'default.tex'
default_template_path = os.path.dirname(os.path.abspath(__file__))
default_preamble = '\n'.join([r'% PDF Standard Fonts',
r'\usepackage{mathptmx} % Times',
r'\usepackage[scaled=.90]{helvet}',
r'\usepackage{courier}'])
table_style_values = ('standard', 'booktabs', 'nolines', 'borderless',
'colwidths-auto', 'colwidths-given')
settings_spec = (
'LaTeX-Specific Options',
None,
(('Specify LaTeX documentclass. Default: "article".',
['--documentclass'],
{'default': 'article', }),
('Specify document options. Multiple options can be given, '
'separated by commas. Default: "a4paper".',
['--documentoptions'],
{'default': 'a4paper', }),
('Format for footnote references: one of "superscript" or '
'"brackets". Default: "superscript".',
['--footnote-references'],
{'choices': ['superscript', 'brackets'], 'default': 'superscript',
'metavar': '<format>',
'overrides': 'trim_footnote_reference_space'}),
('Use \\cite command for citations. (future default)',
['--use-latex-citations'],
{'default': False, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Use figure floats for citations '
'(might get mixed with real figures). (current default)',
['--figure-citations'],
{'dest': 'use_latex_citations', 'action': 'store_false',
'validator': frontend.validate_boolean}),
('Format for block quote attributions: one of "dash" (em-dash '
'prefix), "parentheses"/"parens", or "none". Default: "dash".',
['--attribution'],
{'choices': ['dash', 'parentheses', 'parens', 'none'],
'default': 'dash', 'metavar': '<format>'}),
('Specify LaTeX packages/stylesheets. '
'A style is referenced with "\\usepackage" if extension is '
'".sty" or omitted and with "\\input" else. '
' Overrides previous --stylesheet and --stylesheet-path settings.',
['--stylesheet'],
{'default': '', 'metavar': '<file[,file,...]>',
'overrides': 'stylesheet_path',
'validator': frontend.validate_comma_separated_list}),
('Comma separated list of LaTeX packages/stylesheets. '
'Relative paths are expanded if a matching file is found in '
'the --stylesheet-dirs. With --link-stylesheet, '
'the path is rewritten relative to the output *.tex file. ',
['--stylesheet-path'],
{'metavar': '<file[,file,...]>', 'overrides': 'stylesheet',
'validator': frontend.validate_comma_separated_list}),
('Link to the stylesheet(s) in the output file. (default)',
['--link-stylesheet'],
{'dest': 'embed_stylesheet', 'action': 'store_false'}),
('Embed the stylesheet(s) in the output file. '
'Stylesheets must be accessible during processing. ',
['--embed-stylesheet'],
{'default': False, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Comma-separated list of directories where stylesheets are found. '
'Used by --stylesheet-path when expanding relative path arguments. '
'Default: ".".',
['--stylesheet-dirs'],
{'metavar': '<dir[,dir,...]>',
'validator': frontend.validate_comma_separated_list,
'default': ['.']}),
('Customization by LaTeX code in the preamble. '
'Default: select PDF standard fonts (Times, Helvetica, Courier).',
['--latex-preamble'],
{'default': default_preamble}),
('Specify the template file. Default: "%s".' % default_template,
['--template'],
{'default': default_template, 'metavar': '<file>'}),
('Table of contents by LaTeX. (default)',
['--use-latex-toc'],
{'default': True, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Table of contents by Docutils (without page numbers).',
['--use-docutils-toc'],
{'dest': 'use_latex_toc', 'action': 'store_false',
'validator': frontend.validate_boolean}),
('Add parts on top of the section hierarchy.',
['--use-part-section'],
{'default': False, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Attach author and date to the document info table. (default)',
['--use-docutils-docinfo'],
{'dest': 'use_latex_docinfo', 'action': 'store_false',
'validator': frontend.validate_boolean}),
('Attach author and date to the document title.',
['--use-latex-docinfo'],
{'default': False, 'action': 'store_true',
'validator': frontend.validate_boolean}),
("Typeset abstract as topic. (default)",
['--topic-abstract'],
{'dest': 'use_latex_abstract', 'action': 'store_false',
'validator': frontend.validate_boolean}),
("Use LaTeX abstract environment for the document's abstract.",
['--use-latex-abstract'],
{'default': False, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Color of any hyperlinks embedded in text. '
'Default: "blue" (use "false" to disable).',
['--hyperlink-color'], {'default': 'blue'}),
('Additional options to the "hyperref" package.',
['--hyperref-options'], {'default': ''}),
('Enable compound enumerators for nested enumerated lists '
'(e.g. "1.2.a.ii").',
['--compound-enumerators'],
{'default': False, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Disable compound enumerators for nested enumerated lists. '
'(default)',
['--no-compound-enumerators'],
{'action': 'store_false', 'dest': 'compound_enumerators'}),
('Enable section ("." subsection ...) prefixes for compound '
'enumerators. This has no effect without --compound-enumerators.',
['--section-prefix-for-enumerators'],
{'default': None, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Disable section prefixes for compound enumerators. (default)',
['--no-section-prefix-for-enumerators'],
{'action': 'store_false', 'dest': 'section_prefix_for_enumerators'}),
('Set the separator between section number and enumerator '
'for compound enumerated lists. Default: "-".',
['--section-enumerator-separator'],
{'default': '-', 'metavar': '<char>'}),
('When possible, use the specified environment for literal-blocks. '
'Default: "" (fall back to "alltt").',
['--literal-block-env'],
{'default': ''}),
('When possible, use "verbatim" for literal-blocks. '
'Compatibility alias for "--literal-block-env=verbatim".',
['--use-verbatim-when-possible'],
{'default': False, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Table style. "standard" with horizontal and vertical lines, '
'"booktabs" (LaTeX booktabs style) only horizontal lines '
'above and below the table and below the header, or "borderless". '
'Default: "standard"',
['--table-style'],
{'default': ['standard'],
'metavar': '<format>',
'action': 'append',
'validator': frontend.validate_comma_separated_list,
'choices': table_style_values}),
('LaTeX graphicx package option. '
'Possible values are "dvipdfmx", "dvips", "dvisvgm", '
'"luatex", "pdftex", and "xetex".'
'Default: "".',
['--graphicx-option'],
{'default': ''}),
('LaTeX font encoding. '
'Possible values are "", "T1" (default), "OT1", "LGR,T1" or '
'any other combination of options to the `fontenc` package. ',
['--font-encoding'],
{'default': 'T1'}),
('Per default the latex-writer puts the reference title into '
'hyperreferences. Specify "ref*" or "pageref*" to get the section '
'number or the page number.',
['--reference-label'],
{'default': ''}),
('Specify style and database for bibtex, for example '
'"--use-bibtex=mystyle,mydb1,mydb2".',
['--use-bibtex'],
{'default': ''}),
('Use legacy functions with class value list for '
'\\DUtitle and \\DUadmonition (current default). ',
['--legacy-class-functions'],
{'default': True,
'action': 'store_true',
'validator': frontend.validate_boolean}),
('Use \\DUrole and "DUclass" wrappers for class values. '
'Place admonition content in an environment (future default).',
['--new-class-functions'],
{'dest': 'legacy_class_functions',
'action': 'store_false',
'validator': frontend.validate_boolean}),
# TODO: implement "latex footnotes" alternative
('Footnotes with numbers/symbols by Docutils. (default) '
'(The alternative, --latex-footnotes, is not implemented yet.)',
['--docutils-footnotes'],
{'default': True,
'action': 'store_true',
'validator': frontend.validate_boolean}),
),)
settings_defaults = {'sectnum_depth': 0 # updated by SectNum transform
}
config_section = 'latex2e writer'
config_section_dependencies = ('writers', 'latex writers')
head_parts = ('head_prefix', 'requirements', 'latex_preamble',
'stylesheet', 'fallbacks', 'pdfsetup', 'titledata')
visitor_attributes = head_parts + ('title', 'subtitle',
'body_pre_docinfo', 'docinfo',
'dedication', 'abstract', 'body')
output = None
"""Final translated form of `document`."""
def __init__(self):
writers.Writer.__init__(self)
self.translator_class = LaTeXTranslator
# Override parent method to add latex-specific transforms
def get_transforms(self):
return writers.Writer.get_transforms(self) + [
# Convert specific admonitions to generic one
writer_aux.Admonitions,
# TODO: footnote collection transform
]
def translate(self):
visitor = self.translator_class(self.document)
self.document.walkabout(visitor)
# copy parts
for part in self.visitor_attributes:
setattr(self, part, getattr(visitor, part))
# get template string from file
try:
template_file = open(self.document.settings.template,
encoding='utf8')
except IOError:
template_file = open(os.path.join(self.default_template_path,
self.document.settings.template), encoding= 'utf8')
template = string.Template(template_file.read())
template_file.close()
# fill template
self.assemble_parts() # create dictionary of parts
self.output = template.substitute(self.parts)
def assemble_parts(self):
"""Assemble the `self.parts` dictionary of output fragments."""
writers.Writer.assemble_parts(self)
for part in self.visitor_attributes:
lines = getattr(self, part)
if part in self.head_parts:
if lines:
lines.append('') # to get a trailing newline
self.parts[part] = '\n'.join(lines)
else:
# body contains inline elements, so join without newline
self.parts[part] = ''.join(lines)
class Babel(object):
"""Language specifics for LaTeX."""
# TeX (babel) language names:
# ! not all of these are supported by Docutils!
#
# based on LyX' languages file with adaptions to `BCP 47`_
# (http://www.rfc-editor.org/rfc/bcp/bcp47.txt) and
# http://www.tug.org/TUGboat/Articles/tb29-3/tb93miklavec.pdf
# * the key without subtags is the default
# * case is ignored
# cf. http://docutils.sourceforge.net/docs/howto/i18n.html
# http://www.w3.org/International/articles/language-tags/
# and http://www.iana.org/assignments/language-subtag-registry
language_codes = {
# code TeX/Babel-name comment
'af': 'afrikaans',
'ar': 'arabic',
# 'be': 'belarusian',
'bg': 'bulgarian',
'br': 'breton',
'ca': 'catalan',
# 'cop': 'coptic',
'cs': 'czech',
'cy': 'welsh',
'da': 'danish',
'de': 'ngerman', # new spelling (de_1996)
'de-1901': 'german', # old spelling
'de-AT': 'naustrian',
'de-AT-1901': 'austrian',
'dsb': 'lowersorbian',
'el': 'greek', # monotonic (el-monoton)
'el-polyton': 'polutonikogreek',
'en': 'english', # TeX' default language
'en-AU': 'australian',
'en-CA': 'canadian',
'en-GB': 'british',
'en-NZ': 'newzealand',
'en-US': 'american',
'eo': 'esperanto',
'es': 'spanish',
'et': 'estonian',
'eu': 'basque',
# 'fa': 'farsi',
'fi': 'finnish',
'fr': 'french',
'fr-CA': 'canadien',
'ga': 'irish', # Irish Gaelic
# 'grc': # Ancient Greek
'grc-ibycus': 'ibycus', # Ibycus encoding
'gl': 'galician',
'he': 'hebrew',
'hr': 'croatian',
'hsb': 'uppersorbian',
'hu': 'magyar',
'ia': 'interlingua',
'id': 'bahasai', # Bahasa (Indonesian)
'is': 'icelandic',
'it': 'italian',
'ja': 'japanese',
'kk': 'kazakh',
'la': 'latin',
'lt': 'lithuanian',
'lv': 'latvian',
'mn': 'mongolian', # Mongolian, Cyrillic script (mn-cyrl)
'ms': 'bahasam', # Bahasa (Malay)
'nb': 'norsk', # Norwegian Bokmal
'nl': 'dutch',
'nn': 'nynorsk', # Norwegian Nynorsk
'no': 'norsk', # Norwegian (Bokmal)
'pl': 'polish',
'pt': 'portuges',
'pt-BR': 'brazil',
'ro': 'romanian',
'ru': 'russian',
'se': 'samin', # North Sami
'sh-Cyrl': 'serbianc', # Serbo-Croatian, Cyrillic script
'sh-Latn': 'serbian', # Serbo-Croatian, Latin script see also 'hr'
'sk': 'slovak',
'sl': 'slovene',
'sq': 'albanian',
'sr': 'serbianc', # Serbian, Cyrillic script (contributed)
'sr-Latn': 'serbian', # Serbian, Latin script
'sv': 'swedish',
# 'th': 'thai',
'tr': 'turkish',
'uk': 'ukrainian',
'vi': 'vietnam',
# zh-Latn: Chinese Pinyin
}
# normalize (downcase) keys
language_codes = dict([(k.lower(), v) for (k, v) in language_codes.items()])
warn_msg = 'Language "%s" not supported by LaTeX (babel)'
# "Active characters" are shortcuts that start a LaTeX macro and may need
# escaping for literals use. Characters that prevent literal use (e.g.
# starting accent macros like "a -> ä) will be deactivated if one of the
# defining languages is used in the document.
# Special cases:
# ~ (tilde) -- used in estonian, basque, galician, and old versions of
# spanish -- cannot be deactivated as it denotes a no-break space macro,
# " (straight quote) -- used in albanian, austrian, basque
# brazil, bulgarian, catalan, czech, danish, dutch, estonian,
# finnish, galician, german, icelandic, italian, latin, naustrian,
# ngerman, norsk, nynorsk, polish, portuges, russian, serbian, slovak,
# slovene, spanish, swedish, ukrainian, and uppersorbian --
# is escaped as ``\textquotedbl``.
active_chars = {# TeX/Babel-name: active characters to deactivate
# 'breton': ':;!?' # ensure whitespace
# 'esperanto': '^',
# 'estonian': '~"`',
# 'french': ':;!?' # ensure whitespace
'galician': '.<>', # also '~"'
# 'magyar': '`', # for special hyphenation cases
'spanish': '.<>', # old versions also '~'
# 'turkish': ':!=' # ensure whitespace
}
def __init__(self, language_code, reporter=None):
self.reporter = reporter
self.language = self.language_name(language_code)
self.otherlanguages = {}
def __call__(self):
"""Return the babel call with correct options and settings"""
languages = sorted(self.otherlanguages.keys())
languages.append(self.language or 'english')
self.setup = [r'\usepackage[%s]{babel}' % ','.join(languages)]
# Deactivate "active characters"
shorthands = []
for c in ''.join([self.active_chars.get(l, '') for l in languages]):
if c not in shorthands:
shorthands.append(c)
if shorthands:
self.setup.append(r'\AtBeginDocument{\shorthandoff{%s}}'
% ''.join(shorthands))
# Including '~' in shorthandoff prevents its use as no-break space
if 'galician' in languages:
self.setup.append(r'\deactivatetilden % restore ~ in Galician')
if 'estonian' in languages:
self.setup.extend([r'\makeatletter',
r' \addto\extrasestonian{\bbl@deactivate{~}}',
r'\makeatother'])
if 'basque' in languages:
self.setup.extend([r'\makeatletter',
r' \addto\extrasbasque{\bbl@deactivate{~}}',
r'\makeatother'])
if (languages[-1] == 'english' and
'french' in self.otherlanguages.keys()):
self.setup += ['% Prevent side-effects if French hyphenation '
'patterns are not loaded:',
r'\frenchbsetup{StandardLayout}',
r'\AtBeginDocument{\selectlanguage{%s}'
r'\noextrasfrench}' % self.language]
return '\n'.join(self.setup)
def language_name(self, language_code):
"""Return TeX language name for `language_code`"""
for tag in utils.normalize_language_tag(language_code):
try:
return self.language_codes[tag]
except KeyError:
pass
if self.reporter is not None:
self.reporter.warning(self.warn_msg % language_code)
return ''
def get_language(self):
# Obsolete, kept for backwards compatibility with Sphinx
return self.language
# Building blocks for the latex preamble
# --------------------------------------
class SortableDict(dict):
"""Dictionary with additional sorting methods
Tip: use key starting with with '_' for sorting before small letters
and with '~' for sorting after small letters.
"""
def sortedkeys(self):
"""Return sorted list of keys"""
keys = sorted(self.keys())
return keys
def sortedvalues(self):
"""Return list of values sorted by keys"""
return [self[key] for key in self.sortedkeys()]
# PreambleCmds
# `````````````
# A container for LaTeX code snippets that can be
# inserted into the preamble if required in the document.
#
# .. The package 'makecmds' would enable shorter definitions using the
# \providelength and \provideenvironment commands.
# However, it is pretty non-standard (texlive-latex-extra).
class PreambleCmds(object):
"""Building blocks for the latex preamble."""
# Requirements
PreambleCmds.color = r"""\usepackage{color}"""
PreambleCmds.float = r"""\usepackage{float} % extended float configuration
\floatplacement{figure}{H} % place figures here definitely"""
PreambleCmds.linking = r"""%% hyperlinks:
\ifthenelse{\isundefined{\hypersetup}}{
\usepackage[%s]{hyperref}
\usepackage{bookmark}
\urlstyle{same} %% normal text font (alternatives: tt, rm, sf)
}{}"""
PreambleCmds.minitoc = r"""%% local table of contents
\usepackage{minitoc}"""
PreambleCmds.table = r"""\usepackage{longtable,ltcaption,array}
\setlength{\extrarowheight}{2pt}
\newlength{\DUtablewidth} % internal use in tables"""
PreambleCmds.textcomp = r"""\usepackage{textcomp} % text symbol macros"""
# TODO? Options [force,almostfull] prevent spurious error messages,
# see de.comp.text.tex/2005-12/msg01855
# backwards compatibility definitions
PreambleCmds.abstract_legacy = r"""
% abstract title
\providecommand*{\DUtitleabstract}[1]{\centerline{\textbf{#1}}}"""
# see https://sourceforge.net/p/docutils/bugs/339/
PreambleCmds.admonition_legacy = r"""
% admonition (specially marked topic)
\providecommand{\DUadmonition}[2][class-arg]{%
% try \DUadmonition#1{#2}:
\ifcsname DUadmonition#1\endcsname%
\csname DUadmonition#1\endcsname{#2}%
\else
\begin{center}
\fbox{\parbox{0.9\linewidth}{#2}}
\end{center}
\fi
}"""
PreambleCmds.error_legacy = r"""
% error admonition title
\providecommand*{\DUtitleerror}[1]{\DUtitle{\color{red}#1}}"""
PreambleCmds.title_legacy = r"""
% title for topics, admonitions, unsupported section levels, and sidebar
\providecommand*{\DUtitle}[2][class-arg]{%
% call \DUtitle#1{#2} if it exists:
\ifcsname DUtitle#1\endcsname%
\csname DUtitle#1\endcsname{#2}%
\else
\smallskip\noindent\textbf{#2}\smallskip%
\fi
}"""
## PreambleCmds.caption = r"""% configure caption layout
## \usepackage{caption}
## \captionsetup{singlelinecheck=false}% no exceptions for one-liners"""
# Definitions from docutils.sty::
def _read_block(fp):
block = [next(fp)] # first line (empty)
for line in fp:
if not line.strip():
break
block.append(line)
return ''.join(block).rstrip()
_du_sty = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'docutils.sty')
with open(_du_sty, encoding='utf8') as fp:
for line in fp:
line = line.strip('% \n')
if not line.endswith('::'):
continue
block_name = line.rstrip(':')
if not block_name:
continue
definitions = _read_block(fp)
if block_name in ('color', 'float', 'table', 'textcomp'):
definitions = definitions.strip()
# print('Block: `%s`'% block_name)
# print(definitions)
setattr(PreambleCmds, block_name, definitions)
# LaTeX encoding maps
# -------------------
# ::
class CharMaps(object):
"""LaTeX representations for active and Unicode characters."""
# characters that need escaping even in `alltt` environments:
alltt = {
ord('\\'): u'\\textbackslash{}',
ord('{'): u'\\{',
ord('}'): u'\\}',
}
# characters that normally need escaping:
special = {
ord('#'): u'\\#',
ord('$'): u'\\$',
ord('%'): u'\\%',
ord('&'): u'\\&',
ord('~'): u'\\textasciitilde{}',
ord('_'): u'\\_',
ord('^'): u'\\textasciicircum{}',
# straight double quotes are 'active' in many languages
ord('"'): u'\\textquotedbl{}',
# Square brackets are ordinary chars and cannot be escaped with '\',
# so we put them in a group '{[}'. (Alternative: ensure that all
# macros with optional arguments are terminated with {} and text
# inside any optional argument is put in a group ``[{text}]``).
# Commands with optional args inside an optional arg must be put in a
# group, e.g. ``\item[{\hyperref[label]{text}}]``.
ord('['): u'{[}',
ord(']'): u'{]}',
# the soft hyphen is unknown in 8-bit text
# and not properly handled by XeTeX
0x00AD: u'\\-', # SOFT HYPHEN
}
# Unicode chars that are not recognized by LaTeX's utf8 encoding
unsupported_unicode = {
# TODO: ensure white space also at the beginning of a line?
# 0x00A0: u'\\leavevmode\\nobreak\\vadjust{}~'
0x2000: u'\\enskip', # EN QUAD
0x2001: u'\\quad', # EM QUAD
0x2002: u'\\enskip', # EN SPACE
0x2003: u'\\quad', # EM SPACE
0x2008: u'\\,', # PUNCTUATION SPACE
0x200b: u'\\hspace{0pt}', # ZERO WIDTH SPACE
0x202F: u'\\,', # NARROW NO-BREAK SPACE
# 0x02d8: u'\\\u{ }', # BREVE
0x2011: u'\\hbox{-}', # NON-BREAKING HYPHEN
0x212b: u'\\AA', # ANGSTROM SIGN
0x21d4: u'\\ensuremath{\\Leftrightarrow}',
# Docutils footnote symbols:
0x2660: u'\\ensuremath{\\spadesuit}',
0x2663: u'\\ensuremath{\\clubsuit}',
0xfb00: u'ff', # LATIN SMALL LIGATURE FF
0xfb01: u'fi', # LATIN SMALL LIGATURE FI
0xfb02: u'fl', # LATIN SMALL LIGATURE FL
0xfb03: u'ffi', # LATIN SMALL LIGATURE FFI
0xfb04: u'ffl', # LATIN SMALL LIGATURE FFL
}
# Unicode chars that are recognized by LaTeX's utf8 encoding
utf8_supported_unicode = {
0x00A0: u'~', # NO-BREAK SPACE
0x00AB: u'\\guillemotleft{}', # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00bb: u'\\guillemotright{}', # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x200C: u'\\textcompwordmark{}', # ZERO WIDTH NON-JOINER
0x2013: u'\\textendash{}',
0x2014: u'\\textemdash{}',
0x2018: u'\\textquoteleft{}',
0x2019: u'\\textquoteright{}',
0x201A: u'\\quotesinglbase{}', # SINGLE LOW-9 QUOTATION MARK
0x201C: u'\\textquotedblleft{}',
0x201D: u'\\textquotedblright{}',
0x201E: u'\\quotedblbase{}', # DOUBLE LOW-9 QUOTATION MARK
0x2030: u'\\textperthousand{}', # PER MILLE SIGN
0x2031: u'\\textpertenthousand{}', # PER TEN THOUSAND SIGN
0x2039: u'\\guilsinglleft{}',
0x203A: u'\\guilsinglright{}',
0x2423: u'\\textvisiblespace{}', # OPEN BOX
0x2020: u'\\dag{}',
0x2021: u'\\ddag{}',
0x2026: u'\\dots{}',
0x2122: u'\\texttrademark{}',
}
# recognized with 'utf8', if textcomp is loaded
textcomp = {
# Latin-1 Supplement
0x00a2: u'\\textcent{}', # ¢ CENT SIGN
0x00a4: u'\\textcurrency{}', # ¤ CURRENCY SYMBOL
0x00a5: u'\\textyen{}', # ¥ YEN SIGN
0x00a6: u'\\textbrokenbar{}', # ¦ BROKEN BAR
0x00a7: u'\\textsection{}', # § SECTION SIGN
0x00a8: u'\\textasciidieresis{}', # ¨ DIAERESIS
0x00a9: u'\\textcopyright{}', # © COPYRIGHT SIGN
0x00aa: u'\\textordfeminine{}', # ª FEMININE ORDINAL INDICATOR
0x00ac: u'\\textlnot{}', # ¬ NOT SIGN
0x00ae: u'\\textregistered{}', # ® REGISTERED SIGN
0x00af: u'\\textasciimacron{}', # ¯ MACRON
0x00b0: u'\\textdegree{}', # ° DEGREE SIGN
0x00b1: u'\\textpm{}', # ± PLUS-MINUS SIGN
0x00b2: u'\\texttwosuperior{}', # ² SUPERSCRIPT TWO
0x00b3: u'\\textthreesuperior{}', # ³ SUPERSCRIPT THREE
0x00b4: u'\\textasciiacute{}', # ´ ACUTE ACCENT
0x00b5: u'\\textmu{}', # µ MICRO SIGN
0x00b6: u'\\textparagraph{}', # ¶ PILCROW SIGN # != \textpilcrow
0x00b9: u'\\textonesuperior{}', # ¹ SUPERSCRIPT ONE
0x00ba: u'\\textordmasculine{}', # º MASCULINE ORDINAL INDICATOR
0x00bc: u'\\textonequarter{}', # 1/4 FRACTION
0x00bd: u'\\textonehalf{}', # 1/2 FRACTION
0x00be: u'\\textthreequarters{}', # 3/4 FRACTION
0x00d7: u'\\texttimes{}', # × MULTIPLICATION SIGN
0x00f7: u'\\textdiv{}', # ÷ DIVISION SIGN
# others
0x0192: u'\\textflorin{}', # LATIN SMALL LETTER F WITH HOOK
0x02b9: u'\\textasciiacute{}', # MODIFIER LETTER PRIME
0x02ba: u'\\textacutedbl{}', # MODIFIER LETTER DOUBLE PRIME
0x2016: u'\\textbardbl{}', # DOUBLE VERTICAL LINE
0x2022: u'\\textbullet{}', # BULLET
0x2032: u'\\textasciiacute{}', # PRIME
0x2033: u'\\textacutedbl{}', # DOUBLE PRIME
0x2035: u'\\textasciigrave{}', # REVERSED PRIME
0x2036: u'\\textgravedbl{}', # REVERSED DOUBLE PRIME
0x203b: u'\\textreferencemark{}', # REFERENCE MARK
0x203d: u'\\textinterrobang{}', # INTERROBANG
0x2044: u'\\textfractionsolidus{}', # FRACTION SLASH
0x2045: u'\\textlquill{}', # LEFT SQUARE BRACKET WITH QUILL
0x2046: u'\\textrquill{}', # RIGHT SQUARE BRACKET WITH QUILL
0x2052: u'\\textdiscount{}', # COMMERCIAL MINUS SIGN
0x20a1: u'\\textcolonmonetary{}', # COLON SIGN
0x20a3: u'\\textfrenchfranc{}', # FRENCH FRANC SIGN
0x20a4: u'\\textlira{}', # LIRA SIGN
0x20a6: u'\\textnaira{}', # NAIRA SIGN
0x20a9: u'\\textwon{}', # WON SIGN
0x20ab: u'\\textdong{}', # DONG SIGN
0x20ac: u'\\texteuro{}', # EURO SIGN
0x20b1: u'\\textpeso{}', # PESO SIGN
0x20b2: u'\\textguarani{}', # GUARANI SIGN
0x2103: u'\\textcelsius{}', # DEGREE CELSIUS
0x2116: u'\\textnumero{}', # NUMERO SIGN
0x2117: u'\\textcircledP{}', # SOUND RECORDING COYRIGHT
0x211e: u'\\textrecipe{}', # PRESCRIPTION TAKE
0x2120: u'\\textservicemark{}', # SERVICE MARK
0x2122: u'\\texttrademark{}', # TRADE MARK SIGN
0x2126: u'\\textohm{}', # OHM SIGN
0x2127: u'\\textmho{}', # INVERTED OHM SIGN
0x212e: u'\\textestimated{}', # ESTIMATED SYMBOL
0x2190: u'\\textleftarrow{}', # LEFTWARDS ARROW
0x2191: u'\\textuparrow{}', # UPWARDS ARROW
0x2192: u'\\textrightarrow{}', # RIGHTWARDS ARROW
0x2193: u'\\textdownarrow{}', # DOWNWARDS ARROW
0x2212: u'\\textminus{}', # MINUS SIGN
0x2217: u'\\textasteriskcentered{}', # ASTERISK OPERATOR
0x221a: u'\\textsurd{}', # SQUARE ROOT
0x2422: u'\\textblank{}', # BLANK SYMBOL
0x25e6: u'\\textopenbullet{}', # WHITE BULLET
0x25ef: u'\\textbigcircle{}', # LARGE CIRCLE
0x266a: u'\\textmusicalnote{}', # EIGHTH NOTE
0x26ad: u'\\textmarried{}', # MARRIAGE SYMBOL
0x26ae: u'\\textdivorced{}', # DIVORCE SYMBOL
0x27e8: u'\\textlangle{}', # MATHEMATICAL LEFT ANGLE BRACKET
0x27e9: u'\\textrangle{}', # MATHEMATICAL RIGHT ANGLE BRACKET
}
# Unicode chars that require a feature/package to render
pifont = {
0x2665: u'\\ding{170}', # black heartsuit
0x2666: u'\\ding{169}', # black diamondsuit
0x2713: u'\\ding{51}', # check mark
0x2717: u'\\ding{55}', # check mark
}
# TODO: greek alphabet ... ?
# see also LaTeX codec
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/252124
# and unimap.py from TeXML
class DocumentClass(object):
"""Details of a LaTeX document class."""
def __init__(self, document_class, with_part=False):
self.document_class = document_class
self._with_part = with_part
self.sections = ['section', 'subsection', 'subsubsection',
'paragraph', 'subparagraph']
if self.document_class in ('book', 'memoir', 'report',
'scrbook', 'scrreprt'):
self.sections.insert(0, 'chapter')
if self._with_part:
self.sections.insert(0, 'part')
def section(self, level):
"""Return the LaTeX section name for section `level`.
The name depends on the specific document class.
Level is 1,2,3..., as level 0 is the title.
"""
if level <= len(self.sections):
return self.sections[level-1]
# unsupported levels
return 'DUtitle'
class Table(object):
"""Manage a table while traversing.
Maybe change to a mixin defining the visit/departs, but then
class Table internal variables are in the Translator.
Table style might be
:standard: horizontal and vertical lines
:booktabs: only horizontal lines (requires "booktabs" LaTeX package)
:borderless: no borders around table cells
:nolines: alias for borderless
:colwidths-auto: column widths determined by LaTeX
:colwidths-given: use colum widths from rST source
"""
def __init__(self, translator, latex_type):
self._translator = translator
self._latex_type = latex_type
self._open = False
# miscellaneous attributes
self._attrs = {}
self._col_width = []
self._rowspan = []
self.stubs = []
self.colwidths_auto = False
self._in_thead = 0
def open(self):
self._open = True
self._col_specs = []
self.caption = []
self._attrs = {}
self._in_head = False # maybe context with search
def close(self):
self._open = False
self._col_specs = None
self.caption = []
self._attrs = {}
self.stubs = []
self.colwidths_auto = False
def is_open(self):
return self._open
def set_table_style(self, table_style, classes):
borders = [cls.replace('nolines', 'borderless')
for cls in table_style+classes
if cls in ('standard', 'booktabs', 'borderless', 'nolines')]
try:
self.borders = borders[-1]
except IndexError:
self.borders = 'standard'
self.colwidths_auto = (('colwidths-auto' in classes
and 'colwidths-given' not in table_style)
or ('colwidths-auto' in table_style
and ('colwidths-given' not in classes)))
def get_latex_type(self):
if self._latex_type == 'longtable' and not self.caption:
# do not advance the "table" counter (requires "ltcaption" package)
return('longtable*')
return self._latex_type
def set(self, attr, value):
self._attrs[attr] = value
def get(self, attr):
if attr in self._attrs:
return self._attrs[attr]
return None
def get_vertical_bar(self):
if self.borders == 'standard':
return '|'
return ''
# horizontal lines are drawn below a row,
def get_opening(self, width=r'\linewidth'):
align_map = {'left': '[l]',
'center': '[c]',
'right': '[r]',
None: ''}
align = align_map.get(self.get('align'))
latex_type = self.get_latex_type()
if align and latex_type not in ("longtable", "longtable*"):
opening = [r'\noindent\makebox[\linewidth]%s{%%' % (align,),
r'\begin{%s}' % (latex_type,),
]
else:
opening = [r'\begin{%s}%s' % (latex_type, align)]
if not self.colwidths_auto:
opening.insert(-1, r'\setlength{\DUtablewidth}{%s}%%'%width)
return '\n'.join(opening)
def get_closing(self):
closing = []
if self.borders == 'booktabs':
closing.append(r'\bottomrule')
# elif self.borders == 'standard':
# closing.append(r'\hline')
closing.append(r'\end{%s}' % self.get_latex_type())
if self.get('align') and self.get_latex_type() not in ("longtable", "longtable*"):
closing.append('}')
return '\n'.join(closing)
def visit_colspec(self, node):
self._col_specs.append(node)
# "stubs" list is an attribute of the tgroup element:
self.stubs.append(node.attributes.get('stub'))
def get_colspecs(self, node):
"""Return column specification for longtable.
Assumes reST line length being 80 characters.
Table width is hairy.
=== ===
ABC DEF
=== ===
usually gets to narrow, therefore we add 1 (fiddlefactor).
"""
bar = self.get_vertical_bar()
self._rowspan= [0] * len(self._col_specs)
self._col_width = []
if self.colwidths_auto:
latex_table_spec = (bar+'l')*len(self._col_specs)
return latex_table_spec+bar
width = 80
total_width = 0.0
# first see if we get too wide.
for node in self._col_specs:
colwidth = float(node['colwidth']+1) / width
total_width += colwidth
# donot make it full linewidth
factor = 0.93
if total_width > 1.0:
factor /= total_width
latex_table_spec = ''
for node in self._col_specs:
colwidth = factor * float(node['colwidth']+1) / width
self._col_width.append(colwidth+0.005)
latex_table_spec += '%sp{%.3f\\DUtablewidth}' % (bar, colwidth+0.005)
return latex_table_spec+bar
def get_column_width(self):
"""Return columnwidth for current cell (not multicell)."""
try:
return '%.2f\\DUtablewidth' % self._col_width[self._cell_in_row]
except IndexError:
return '*'
def get_multicolumn_width(self, start, len_):
"""Return sum of columnwidths for multicell."""
try:
mc_width = sum([width
for width in ([self._col_width[start + co]
for co in range (len_)])])
return 'p{%.2f\\DUtablewidth}' % mc_width
except IndexError:
return 'l'
def get_caption(self):
if not self.caption:
return ''
caption = ''.join(self.caption)
if 1 == self._translator.thead_depth():
return r'\caption{%s}\\' '\n' % caption
return r'\caption[]{%s (... continued)}\\' '\n' % caption
def need_recurse(self):
if self._latex_type == 'longtable':
return 1 == self._translator.thead_depth()
return 0
def visit_thead(self):
self._in_thead += 1
if self.borders == 'standard':
return ['\\hline\n']
elif self.borders == 'booktabs':
return ['\\toprule\n']
return []
def depart_thead(self):
a = []
#if self.borders == 'standard':
# a.append('\\hline\n')
if self.borders == 'booktabs':
a.append('\\midrule\n')
if self._latex_type == 'longtable':
if 1 == self._translator.thead_depth():
a.append('\\endfirsthead\n')
else:
a.append('\\endhead\n')
a.append(r'\multicolumn{%d}{c}' % len(self._col_specs) +
r'{\hfill ... continued on next page} \\')
a.append('\n\\endfoot\n\\endlastfoot\n')
# for longtable one could add firsthead, foot and lastfoot
self._in_thead -= 1
return a
def visit_row(self):
self._cell_in_row = 0
def depart_row(self):
res = [' \\\\\n']
self._cell_in_row = None # remove cell counter
for i in range(len(self._rowspan)):
if (self._rowspan[i]>0):
self._rowspan[i] -= 1
if self.borders == 'standard':
rowspans = [i+1 for i in range(len(self._rowspan))
if (self._rowspan[i]<=0)]
if len(rowspans)==len(self._rowspan):
res.append('\\hline\n')
else:
cline = ''
rowspans.reverse()
# TODO merge clines
while True:
try:
c_start = rowspans.pop()
except:
break
cline += '\\cline{%d-%d}\n' % (c_start, c_start)
res.append(cline)
return res
def set_rowspan(self, cell, value):
try:
self._rowspan[cell] = value
except:
pass
def get_rowspan(self, cell):
try:
return self._rowspan[cell]
except:
return 0
def get_entry_number(self):
return self._cell_in_row
def visit_entry(self):
self._cell_in_row += 1
def is_stub_column(self):
if len(self.stubs) >= self._cell_in_row:
return self.stubs[self._cell_in_row]
return False
class LaTeXTranslator(nodes.NodeVisitor):
"""
Generate code for 8-bit LaTeX from a Docutils document tree.
See the docstring of docutils.writers._html_base.HTMLTranslator for
notes on and examples of safe subclassing.
"""
# When options are given to the documentclass, latex will pass them
# to other packages, as done with babel.
# Dummy settings might be taken from document settings
# Generate code for typesetting with 8-bit latex/pdflatex vs.
# xelatex/lualatex engine. Overwritten by the XeTeX writer
is_xetex = False
# Config setting defaults
# -----------------------
# TODO: use mixins for different implementations.
# list environment for docinfo. else tabularx
## use_optionlist_for_docinfo = False # TODO: NOT YET IN USE
# Use compound enumerations (1.A.1.)
compound_enumerators = False
# If using compound enumerations, include section information.
section_prefix_for_enumerators = False
# This is the character that separates the section ("." subsection ...)
# prefix from the regular list enumerator.
section_enumerator_separator = '-'
# Auxiliary variables
# -------------------
has_latex_toc = False # is there a toc in the doc? (needed by minitoc)
is_toc_list = False # is the current bullet_list a ToC?
section_level = 0
# Flags to encode():
# inside citation reference labels underscores dont need to be escaped
inside_citation_reference_label = False
verbatim = False # do not encode
insert_non_breaking_blanks = False # replace blanks by "~"
insert_newline = False # add latex newline commands
literal = False # literal text (block or inline)
alltt = False # inside `alltt` environment
def __init__(self, document, babel_class=Babel):
nodes.NodeVisitor.__init__(self, document)
# Reporter
# ~~~~~~~~
self.warn = self.document.reporter.warning
self.error = self.document.reporter.error
# Settings
# ~~~~~~~~
self.settings = settings = document.settings
self.latex_encoding = self.to_latex_encoding(settings.output_encoding)
self.use_latex_toc = settings.use_latex_toc
self.use_latex_docinfo = settings.use_latex_docinfo
self._use_latex_citations = settings.use_latex_citations
self._reference_label = settings.reference_label
self.hyperlink_color = settings.hyperlink_color
self.compound_enumerators = settings.compound_enumerators
self.font_encoding = getattr(settings, 'font_encoding', '')
self.section_prefix_for_enumerators = (
settings.section_prefix_for_enumerators)
self.section_enumerator_separator = (
settings.section_enumerator_separator.replace('_', r'\_'))
# literal blocks:
self.literal_block_env = ''
self.literal_block_options = ''
if settings.literal_block_env:
(none,
self.literal_block_env,
self.literal_block_options,
none) = re.split(r'(\w+)(.*)', settings.literal_block_env)
elif settings.use_verbatim_when_possible:
self.literal_block_env = 'verbatim'
#
if self.settings.use_bibtex:
self.bibtex = self.settings.use_bibtex.split(',', 1)
# TODO avoid errors on not declared citations.
else:
self.bibtex = None
# language module for Docutils-generated text
# (labels, bibliographic_fields, and author_separators)
self.language_module = languages.get_language(settings.language_code,
document.reporter)
self.babel = babel_class(settings.language_code, document.reporter)
self.author_separator = self.language_module.author_separators[0]
d_options = [self.settings.documentoptions]
if self.babel.language not in ('english', ''):
d_options.append(self.babel.language)
self.documentoptions = ','.join(filter(None, d_options))
self.d_class = DocumentClass(settings.documentclass,
settings.use_part_section)
# graphic package options:
if self.settings.graphicx_option == '':
self.graphicx_package = r'\usepackage{graphicx}'
else:
self.graphicx_package = (r'\usepackage[%s]{graphicx}' %
self.settings.graphicx_option)
# footnotes: TODO: implement LaTeX footnotes
self.docutils_footnotes = settings.docutils_footnotes
# @@ table_style: list of values from fixed set: warn?
# for s in self.settings.table_style:
# if s not in Writer.table_style_values:
# self.warn('Ignoring value "%s" in "table-style" setting.' %s)
# Output collection stacks
# ~~~~~~~~~~~~~~~~~~~~~~~~
# Document parts
self.head_prefix = [r'\documentclass[%s]{%s}' %
(self.documentoptions, self.settings.documentclass)]
self.requirements = SortableDict() # made a list in depart_document()
self.requirements['__static'] = r'\usepackage{ifthen}'
self.latex_preamble = [settings.latex_preamble]
self.fallbacks = SortableDict() # made a list in depart_document()
self.pdfsetup = [] # PDF properties (hyperref package)
self.title = []
self.subtitle = []
self.titledata = [] # \title, \author, \date
## self.body_prefix = ['\\begin{document}\n']
self.body_pre_docinfo = [] # \maketitle
self.docinfo = []
self.dedication = []
self.abstract = []
self.body = []
## self.body_suffix = ['\\end{document}\n']
self.context = []
"""Heterogeneous stack.
Used by visit_* and depart_* functions in conjunction with the tree
traversal. Make sure that the pops correspond to the pushes."""
# Title metadata:
self.title_labels = []
self.subtitle_labels = []
# (if use_latex_docinfo: collects lists of
# author/organization/contact/address lines)
self.author_stack = []
self.date = []
# PDF properties: pdftitle, pdfauthor
self.pdfauthor = []
self.pdfinfo = []
if settings.language_code != 'en':
self.pdfinfo.append(' pdflang={%s},'%settings.language_code)
# Stack of section counters so that we don't have to use_latex_toc.
# This will grow and shrink as processing occurs.
# Initialized for potential first-level sections.
self._section_number = [0]
# The current stack of enumerations so that we can expand
# them into a compound enumeration.
self._enumeration_counters = []
# The maximum number of enumeration counters we've used.
# If we go beyond this number, we need to create a new
# counter; otherwise, just reuse an old one.
self._max_enumeration_counters = 0
self._bibitems = []
# object for a table while proccessing.
self.table_stack = []
self.active_table = Table(self, 'longtable')
# Where to collect the output of visitor methods (default: body)
self.out = self.body
self.out_stack = [] # stack of output collectors
# Process settings
# ~~~~~~~~~~~~~~~~
# Encodings:
# Docutils' output-encoding => TeX input encoding
if self.latex_encoding != 'ascii':
self.requirements['_inputenc'] = (r'\usepackage[%s]{inputenc}'
% self.latex_encoding)
# TeX font encoding
if not self.is_xetex:
if self.font_encoding:
self.requirements['_fontenc'] = (r'\usepackage[%s]{fontenc}' %
self.font_encoding)
# ensure \textquotedbl is defined:
for enc in self.font_encoding.split(','):
enc = enc.strip()
if enc == 'OT1':
self.requirements['_textquotedblOT1'] = (
r'\DeclareTextSymbol{\textquotedbl}{OT1}{`\"}')
elif enc not in ('T1', 'T2A', 'T2B', 'T2C', 'T4', 'T5'):
self.requirements['_textquotedbl'] = (
r'\DeclareTextSymbolDefault{\textquotedbl}{T1}')
# page layout with typearea (if there are relevant document options)
if (settings.documentclass.find('scr') == -1 and
(self.documentoptions.find('DIV') != -1 or
self.documentoptions.find('BCOR') != -1)):
self.requirements['typearea'] = r'\usepackage{typearea}'
# Stylesheets
# (the name `self.stylesheet` is singular because only one
# stylesheet was supported before Docutils 0.6).
stylesheet_list = utils.get_stylesheet_list(settings)
self.fallback_stylesheet = 'docutils' in stylesheet_list
if self.fallback_stylesheet:
stylesheet_list = [sheet for sheet in stylesheet_list
if sheet != 'docutils']
if self.settings.legacy_class_functions:
# docutils.sty is incompatible with legacy functions
self.fallback_stylesheet = False
else:
# require a minimal version:
self.fallbacks['docutils.sty'
] = r'\usepackage{docutils}[2020/08/28]'
self.stylesheet = [self.stylesheet_call(path)
for path in stylesheet_list]
# PDF setup
if self.hyperlink_color in ('0', 'false', 'False', ''):
self.hyperref_options = ''
else:
self.hyperref_options = 'colorlinks=true,linkcolor=%s,urlcolor=%s' % (
self.hyperlink_color, self.hyperlink_color)
if settings.hyperref_options:
self.hyperref_options += ',' + settings.hyperref_options
# LaTeX Toc
# include all supported sections in toc and PDF bookmarks
# (or use documentclass-default (as currently))?
## if self.use_latex_toc:
## self.requirements['tocdepth'] = (r'\setcounter{tocdepth}{%d}' %
## len(self.d_class.sections))
# Section numbering
if settings.sectnum_xform: # section numbering by Docutils
PreambleCmds.secnumdepth = r'\setcounter{secnumdepth}{0}'
else: # section numbering by LaTeX:
secnumdepth = settings.sectnum_depth
# Possible values of settings.sectnum_depth:
# None "sectnum" directive without depth arg -> LaTeX default
# 0 no "sectnum" directive -> no section numbers
# >0 value of "depth" argument -> translate to LaTeX levels:
# -1 part (0 with "article" document class)
# 0 chapter (missing in "article" document class)
# 1 section
# 2 subsection
# 3 subsubsection
# 4 paragraph
# 5 subparagraph
if secnumdepth is not None:
# limit to supported levels
secnumdepth = min(secnumdepth, len(self.d_class.sections))
# adjust to document class and use_part_section settings
if 'chapter' in self.d_class.sections:
secnumdepth -= 1
if self.d_class.sections[0] == 'part':
secnumdepth -= 1
PreambleCmds.secnumdepth = \
r'\setcounter{secnumdepth}{%d}' % secnumdepth
# start with specified number:
if (hasattr(settings, 'sectnum_start') and
settings.sectnum_start != 1):
self.requirements['sectnum_start'] = (
r'\setcounter{%s}{%d}' % (self.d_class.sections[0],
settings.sectnum_start-1))
# TODO: currently ignored (configure in a stylesheet):
## settings.sectnum_prefix
## settings.sectnum_suffix
# Auxiliary Methods
# -----------------
def stylesheet_call(self, path):
"""Return code to reference or embed stylesheet file `path`"""
# is it a package (no extension or *.sty) or "normal" tex code:
(base, ext) = os.path.splitext(path)
is_package = ext in ['.sty', '']
# Embed content of style file:
if self.settings.embed_stylesheet:
if is_package:
path = base + '.sty' # ensure extension
try:
content = docutils.io.FileInput(source_path=path,
encoding='utf-8').read()
self.settings.record_dependencies.add(path)
except IOError as err:
msg = u'Cannot embed stylesheet %r:\n %s.' % (
path, SafeString(err.strerror))
self.document.reporter.error(msg)
return '% ' + msg.replace('\n', '\n% ')
if is_package:
content = '\n'.join([r'\makeatletter',
content,
r'\makeatother'])
return '%% embedded stylesheet: %s\n%s' % (path, content)
# Link to style file:
if is_package:
path = base # drop extension
cmd = r'\usepackage{%s}'
else:
cmd = r'\input{%s}'
if self.settings.stylesheet_path:
# adapt path relative to output (cf. config.html#stylesheet-path)
path = utils.relative_path(self.settings._destination, path)
return cmd % path
def to_latex_encoding(self, docutils_encoding):
"""Translate docutils encoding name into LaTeX's.
Default method is remove "-" and "_" chars from docutils_encoding.
"""
tr = { 'iso-8859-1': 'latin1', # west european
'iso-8859-2': 'latin2', # east european
'iso-8859-3': 'latin3', # esperanto, maltese
'iso-8859-4': 'latin4', # north european, scandinavian, baltic
'iso-8859-5': 'iso88595', # cyrillic (ISO)
'iso-8859-9': 'latin5', # turkish
'iso-8859-15': 'latin9', # latin9, update to latin1.
'mac_cyrillic': 'maccyr', # cyrillic (on Mac)
'windows-1251': 'cp1251', # cyrillic (on Windows)
'koi8-r': 'koi8-r', # cyrillic (Russian)
'koi8-u': 'koi8-u', # cyrillic (Ukrainian)
'windows-1250': 'cp1250', #
'windows-1252': 'cp1252', #
'us-ascii': 'ascii', # ASCII (US)
# unmatched encodings
#'': 'applemac',
#'': 'ansinew', # windows 3.1 ansi
#'': 'ascii', # ASCII encoding for the range 32--127.
#'': 'cp437', # dos latin us
#'': 'cp850', # dos latin 1
#'': 'cp852', # dos latin 2
#'': 'decmulti',
#'': 'latin10',
#'iso-8859-6': '' # arabic
#'iso-8859-7': '' # greek
#'iso-8859-8': '' # hebrew
#'iso-8859-10': '' # latin6, more complete iso-8859-4
}
encoding = docutils_encoding.lower()
if encoding in tr:
return tr[encoding]
# drop hyphen or low-line from "latin-1", "latin_1", "utf-8" and similar
encoding = encoding.replace('_', '').replace('-', '')
# strip the error handler
return encoding.split(':')[0]
def language_label(self, docutil_label):
return self.language_module.labels[docutil_label]
def encode(self, text):
"""Return text with 'problematic' characters escaped.
* Escape the special printing characters ``# $ % & ~ _ ^ \\ { }``,
square brackets ``[ ]``, double quotes and (in OT1) ``< | >``.
* Translate non-supported Unicode characters.
* Separate ``-`` (and more in literal text) to prevent input ligatures.
"""
if self.verbatim:
return text
# Set up the translation table:
table = CharMaps.alltt.copy()
if not self.alltt:
table.update(CharMaps.special)
# keep the underscore in citation references
if self.inside_citation_reference_label and not self.alltt:
del(table[ord('_')])
# Workarounds for OT1 font-encoding
if self.font_encoding in ['OT1', ''] and not self.is_xetex:
# * out-of-order characters in cmtt
if self.literal:
# replace underscore by underlined blank,
# because this has correct width.
table[ord('_')] = u'\\underline{~}'
# the backslash doesn't work, so we use a mirrored slash.
# \reflectbox is provided by graphicx:
self.requirements['graphicx'] = self.graphicx_package
table[ord('\\')] = u'\\reflectbox{/}'
# * ``< | >`` come out as different chars (except for cmtt):
else:
table[ord('|')] = u'\\textbar{}'
table[ord('<')] = u'\\textless{}'
table[ord('>')] = u'\\textgreater{}'
if self.insert_non_breaking_blanks:
table[ord(' ')] = u'~'
# tab chars may occur in included files (literal or code)
# quick-and-dirty replacement with spaces
# (for better results use `--literal-block-env=lstlisting`)
table[ord('\t')] = u'~' * self.settings.tab_width
# Unicode replacements for 8-bit tex engines (not required with XeTeX/LuaTeX):
if not self.is_xetex:
if not self.latex_encoding.startswith('utf8'):
table.update(CharMaps.unsupported_unicode)
table.update(CharMaps.utf8_supported_unicode)
table.update(CharMaps.textcomp)
table.update(CharMaps.pifont)
# Characters that require a feature/package to render
for ch in text:
cp = ord(ch)
if cp in CharMaps.textcomp and not self.fallback_stylesheet:
self.requirements['textcomp'] = PreambleCmds.textcomp
elif cp in CharMaps.pifont:
self.requirements['pifont'] = '\\usepackage{pifont}'
# preamble-definitions for unsupported Unicode characters
elif (self.latex_encoding == 'utf8'
and cp in CharMaps.unsupported_unicode):
self.requirements['_inputenc'+str(cp)] = (
'\\DeclareUnicodeCharacter{%04X}{%s}'
% (cp, CharMaps.unsupported_unicode[cp]))
text = text.translate(table)
# Break up input ligatures e.g. '--' to '-{}-'.
if not self.is_xetex: # Not required with xetex/luatex
separate_chars = '-'
# In monospace-font, we also separate ',,', '``' and "''" and some
# other characters which can't occur in non-literal text.
if self.literal:
separate_chars += ',`\'"<>'
for char in separate_chars * 2:
# Do it twice ("* 2") because otherwise we would replace
# '---' by '-{}--'.
text = text.replace(char + char, char + '{}' + char)
# Literal line breaks (in address or literal blocks):
if self.insert_newline:
lines = text.split('\n')
# Add a protected space to blank lines (except the last)
# to avoid ``! LaTeX Error: There's no line here to end.``
for i, line in enumerate(lines[:-1]):
if not line.lstrip():
lines[i] += '~'
text = (r'\\' + '\n').join(lines)
if self.literal and not self.insert_non_breaking_blanks:
# preserve runs of spaces but allow wrapping
text = text.replace(' ', ' ~')
return text
def attval(self, text,
whitespace=re.compile('[\n\r\t\v\f]')):
"""Cleanse, encode, and return attribute value text."""
return self.encode(whitespace.sub(' ', text))
# TODO: is this used anywhere? -> update (use template) or delete
## def astext(self):
## """Assemble document parts and return as string."""
## head = '\n'.join(self.head_prefix + self.stylesheet + self.head)
## body = ''.join(self.body_prefix + self.body + self.body_suffix)
## return head + '\n' + body
def is_inline(self, node):
"""Check whether a node represents an inline or block-level element"""
return isinstance(node.parent, nodes.TextElement)
def append_hypertargets(self, node):
"""Append hypertargets for all ids of `node`"""
# hypertarget places the anchor at the target's baseline,
# so we raise it explicitely
self.out.append('%\n'.join(['\\raisebox{1em}{\\hypertarget{%s}{}}' %
id for id in node['ids']]))
def ids_to_labels(self, node, set_anchor=True, protect=False):
"""Return list of label definitions for all ids of `node`
If `set_anchor` is True, an anchor is set with \\phantomsection.
If `protect` is True, the \\label cmd is made robust.
"""
labels = ['\\label{%s}' % id for id in node.get('ids', [])]
if protect:
labels = ['\\protect'+label for label in labels]
if set_anchor and labels:
labels.insert(0, '\\phantomsection')
return labels
def set_align_from_classes(self, node):
"""Convert ``align-*`` class arguments into alignment args."""
# separate:
align = [cls for cls in node['classes'] if cls.startswith('align-')]
if align:
node['align'] = align[-1].replace('align-', '')
node['classes'] = [cls for cls in node['classes']
if not cls.startswith('align-')]
def insert_align_declaration(self, node, default=None):
align = node.get('align', default)
if align == 'left':
self.out.append('\\raggedright\n')
elif align == 'center':
self.out.append('\\centering\n')
elif align == 'right':
self.out.append('\\raggedleft\n')
def duclass_open(self, node):
"""Open a group and insert declarations for class values."""
if not isinstance(node.parent, nodes.compound):
self.out.append('\n')
for cls in node['classes']:
if cls.startswith('language-'):
language = self.babel.language_name(cls[9:])
if language:
self.babel.otherlanguages[language] = True
self.out.append('\\begin{selectlanguage}{%s}\n' % language)
elif isinstance(node, nodes.table) and cls in Writer.table_style_values:
pass
else:
if not self.fallback_stylesheet:
self.fallbacks['DUclass'] = PreambleCmds.duclass
self.out.append('\\begin{DUclass}{%s}\n' % cls)
def duclass_close(self, node):
"""Close a group of class declarations."""
for cls in reversed(node['classes']):
if cls.startswith('language-'):
language = self.babel.language_name(cls[9:])
if language:
self.out.append('\\end{selectlanguage}\n')
elif isinstance(node, nodes.table) and cls in Writer.table_style_values:
pass
else:
if not self.fallback_stylesheet:
self.fallbacks['DUclass'] = PreambleCmds.duclass
self.out.append('\\end{DUclass}\n')
def push_output_collector(self, new_out):
self.out_stack.append(self.out)
self.out = new_out
def pop_output_collector(self):
self.out = self.out_stack.pop()
def term_postfix(self, node):
"""
Return LaTeX code required between term or field name and content.
In a LaTeX "description" environment (used for definition
lists and non-docinfo field lists), a ``\\leavevmode``
between an item's label and content ensures the correct
placement of certain block constructs.
"""
for child in node:
if not isinstance(child, (nodes.Invisible, nodes.footnote,
nodes.citation)):
break
else:
return ''
if isinstance(child, (nodes.image)):
return '\\leavevmode\n' # Images get an additional newline.
if isinstance(child, (nodes.container, nodes.compound)):
return self.term_postfix(child)
if not isinstance(child,
(nodes.paragraph, nodes.math_block)):
return r'\leavevmode'
return ''
# Visitor methods
# ---------------
def visit_Text(self, node):
self.out.append(self.encode(node.astext()))
def depart_Text(self, node):
pass
def visit_abbreviation(self, node):
node['classes'].insert(0, 'abbreviation')
self.visit_inline(node)
def depart_abbreviation(self, node):
self.depart_inline(node)
def visit_acronym(self, node):
node['classes'].insert(0, 'acronym')
self.visit_inline(node)
def depart_acronym(self, node):
self.depart_inline(node)
def visit_address(self, node):
self.visit_docinfo_item(node, 'address')
def depart_address(self, node):
self.depart_docinfo_item(node)
def visit_admonition(self, node):
# strip the generic 'admonition' from the list of classes
node['classes'] = [cls for cls in node['classes']
if cls != 'admonition']
if self.settings.legacy_class_functions:
self.fallbacks['admonition'] = PreambleCmds.admonition_legacy
if 'error' in node['classes']:
self.fallbacks['error'] = PreambleCmds.error_legacy
self.out.append('\n\\DUadmonition[%s]{' % ','.join(node['classes']))
return
if not self.fallback_stylesheet:
self.fallbacks['admonition'] = PreambleCmds.admonition
if 'error' in node['classes'] and not self.fallback_stylesheet:
self.fallbacks['error'] = PreambleCmds.error
self.duclass_open(node)
self.out.append('\\begin{DUadmonition}')
def depart_admonition(self, node):
if self.settings.legacy_class_functions:
self.out.append('}\n')
return
self.out.append('\\end{DUadmonition}\n')
self.duclass_close(node)
def visit_author(self, node):
self.pdfauthor.append(self.attval(node.astext()))
self.visit_docinfo_item(node, 'author')
def depart_author(self, node):
self.depart_docinfo_item(node)
def visit_authors(self, node):
# not used: visit_author is called anyway for each author.
pass
def depart_authors(self, node):
pass
def visit_block_quote(self, node):
self.duclass_open(node)
self.out.append('\\begin{quote}')
def depart_block_quote(self, node):
self.out.append('\\end{quote}\n')
self.duclass_close(node)
def visit_bullet_list(self, node):
self.duclass_open(node)
if self.is_toc_list:
self.out.append('\\begin{list}{}{}')
else:
self.out.append('\\begin{itemize}')
def depart_bullet_list(self, node):
if self.is_toc_list:
self.out.append('\\end{list}\n')
else:
self.out.append('\\end{itemize}\n')
self.duclass_close(node)
def visit_superscript(self, node):
self.out.append(r'\textsuperscript{')
if node['classes']:
self.visit_inline(node)
def depart_superscript(self, node):
if node['classes']:
self.depart_inline(node)
self.out.append('}')
def visit_subscript(self, node):
self.out.append(r'\textsubscript{')
if node['classes']:
self.visit_inline(node)
def depart_subscript(self, node):
if node['classes']:
self.depart_inline(node)
self.out.append('}')
def visit_caption(self, node):
self.out.append('\n\\caption{')
def depart_caption(self, node):
self.out.append('}\n')
def visit_title_reference(self, node):
if not self.fallback_stylesheet:
self.fallbacks['titlereference'] = PreambleCmds.titlereference
self.out.append(r'\DUroletitlereference{')
if node['classes']:
self.visit_inline(node)
def depart_title_reference(self, node):
if node['classes']:
self.depart_inline(node)
self.out.append('}')
def visit_citation(self, node):
if self._use_latex_citations:
self.push_output_collector([])
else:
## self.requirements['~fnt_floats'] = PreambleCmds.footnote_floats
self.out.append(r'\begin{figure}[b]')
self.append_hypertargets(node)
def depart_citation(self, node):
if self._use_latex_citations:
# TODO: normalize label
label = self.out[0]
text = ''.join(self.out[1:])
self._bibitems.append([label, text])
self.pop_output_collector()
else:
self.out.append('\\end{figure}\n')
def visit_citation_reference(self, node):
if self._use_latex_citations:
if not self.inside_citation_reference_label:
self.out.append(r'\cite{')
self.inside_citation_reference_label = 1
else:
assert self.out[-1] in (' ', '\n'),\
'unexpected non-whitespace while in reference label'
del self.out[-1]
else:
href = ''
if 'refid' in node:
href = node['refid']
elif 'refname' in node:
href = self.document.nameids[node['refname']]
self.out.append('\\hyperlink{%s}{[' % href)
def depart_citation_reference(self, node):
# TODO: normalize labels
if self._use_latex_citations:
followup_citation = False
# check for a following citation separated by a space or newline
sibling = node.next_node(descend=False, siblings=True)
if (isinstance(sibling, nodes.Text)
and sibling.astext() in (' ', '\n')):
sibling2 = sibling.next_node(descend=False, siblings=True)
if isinstance(sibling2, nodes.citation_reference):
followup_citation = True
if followup_citation:
self.out.append(',')
else:
self.out.append('}')
self.inside_citation_reference_label = False
else:
self.out.append(']}')
def visit_classifier(self, node):
self.out.append('(\\textbf{')
def depart_classifier(self, node):
self.out.append('})')
def visit_colspec(self, node):
self.active_table.visit_colspec(node)
def depart_colspec(self, node):
pass
def visit_comment(self, node):
if not isinstance(node.parent, nodes.compound):
self.out.append('\n')
# Precede every line with a comment sign, wrap in newlines
self.out.append('%% %s\n' % node.astext().replace('\n', '\n% '))
raise nodes.SkipNode
def depart_comment(self, node):
pass
def visit_compound(self, node):
if isinstance(node.parent, nodes.compound):
self.out.append('\n')
node['classes'].insert(0, 'compound')
self.duclass_open(node)
def depart_compound(self, node):
self.duclass_close(node)
def visit_contact(self, node):
self.visit_docinfo_item(node, 'contact')
def depart_contact(self, node):
self.depart_docinfo_item(node)
def visit_container(self, node):
self.duclass_open(node)
def depart_container(self, node):
self.duclass_close(node)
def visit_copyright(self, node):
self.visit_docinfo_item(node, 'copyright')
def depart_copyright(self, node):
self.depart_docinfo_item(node)
def visit_date(self, node):
self.visit_docinfo_item(node, 'date')
def depart_date(self, node):
self.depart_docinfo_item(node)
def visit_decoration(self, node):
# header and footer
pass
def depart_decoration(self, node):
pass
def visit_definition(self, node):
pass
def depart_definition(self, node):
self.out.append('\n') # TODO: just pass?
def visit_definition_list(self, node):
self.duclass_open(node)
self.out.append('\\begin{description}\n')
def depart_definition_list(self, node):
self.out.append('\\end{description}\n')
self.duclass_close(node)
def visit_definition_list_item(self, node):
pass
def depart_definition_list_item(self, node):
pass
def visit_description(self, node):
self.out.append(' ')
def depart_description(self, node):
pass
def visit_docinfo(self, node):
self.push_output_collector(self.docinfo)
def depart_docinfo(self, node):
self.pop_output_collector()
# Some itmes (e.g. author) end up at other places
if self.docinfo:
# tabularx: automatic width of columns, no page breaks allowed.
self.requirements['tabularx'] = r'\usepackage{tabularx}'
if not self.fallback_stylesheet:
self.fallbacks['_providelength'] = PreambleCmds.providelength
self.fallbacks['docinfo'] = PreambleCmds.docinfo
#
self.docinfo.insert(0, '\n% Docinfo\n'
'\\begin{center}\n'
'\\begin{tabularx}{\\DUdocinfowidth}{lX}\n')
self.docinfo.append('\\end{tabularx}\n'
'\\end{center}\n')
def visit_docinfo_item(self, node, name):
if self.use_latex_docinfo:
if name in ('author', 'organization', 'contact', 'address'):
# We attach these to the last author. If any of them precedes
# the first author, put them in a separate "author" group
# (in lack of better semantics).
if name == 'author' or not self.author_stack:
self.author_stack.append([])
if name == 'address': # newlines are meaningful
self.insert_newline = True
text = self.encode(node.astext())
self.insert_newline = False
else:
text = self.attval(node.astext())
self.author_stack[-1].append(text)
raise nodes.SkipNode
elif name == 'date':
self.date.append(self.attval(node.astext()))
raise nodes.SkipNode
self.out.append('\\textbf{%s}: &\n\t' % self.language_label(name))
if name == 'address':
self.insert_newline = True
self.out.append('{\\raggedright\n')
self.context.append(' } \\\\\n')
else:
self.context.append(' \\\\\n')
def depart_docinfo_item(self, node):
self.out.append(self.context.pop())
# for address we did set insert_newline
self.insert_newline = False
def visit_doctest_block(self, node):
self.visit_literal_block(node)
def depart_doctest_block(self, node):
self.depart_literal_block(node)
def visit_document(self, node):
# titled document?
if (self.use_latex_docinfo or len(node) and
isinstance(node[0], nodes.title)):
protect = (self.settings.documentclass == 'memoir')
self.title_labels += self.ids_to_labels(node, set_anchor=False,
protect=protect)
def depart_document(self, node):
# Complete header with information gained from walkabout
# * language setup
if (self.babel.otherlanguages or
self.babel.language not in ('', 'english')):
self.requirements['babel'] = self.babel()
# * conditional requirements (before style sheet)
self.requirements = self.requirements.sortedvalues()
# * coditional fallback definitions (after style sheet)
self.fallbacks = self.fallbacks.sortedvalues()
# * PDF properties
self.pdfsetup.append(PreambleCmds.linking % self.hyperref_options)
if self.pdfauthor:
authors = self.author_separator.join(self.pdfauthor)
self.pdfinfo.append(' pdfauthor={%s}' % authors)
if self.pdfinfo:
self.pdfsetup += [r'\hypersetup{'] + self.pdfinfo + ['}']
# Complete body
# * document title (with "use_latex_docinfo" also
# 'author', 'organization', 'contact', 'address' and 'date')
if self.title or (
self.use_latex_docinfo and (self.author_stack or self.date)):
# \title (empty \title prevents error with \maketitle)
title = [''.join(self.title)]
if self.title:
title += self.title_labels
if self.subtitle:
title += [r'\\',
r'\DUdocumentsubtitle{%s}' % ''.join(self.subtitle)
] + self.subtitle_labels
self.titledata.append(r'\title{%s}' % '%\n '.join(title))
# \author (empty \author prevents warning with \maketitle)
authors = ['\\\\\n'.join(author_entry)
for author_entry in self.author_stack]
self.titledata.append(r'\author{%s}' %
' \\and\n'.join(authors))
# \date (empty \date prevents defaulting to \today)
self.titledata.append(r'\date{%s}' % ', '.join(self.date))
# \maketitle in the body formats title with LaTeX
self.body_pre_docinfo.append('\\maketitle\n')
# * bibliography
# TODO insertion point of bibliography should be configurable.
if self._use_latex_citations and len(self._bibitems)>0:
if not self.bibtex:
widest_label = ''
for bi in self._bibitems:
if len(widest_label)<len(bi[0]):
widest_label = bi[0]
self.out.append('\n\\begin{thebibliography}{%s}\n' %
widest_label)
for bi in self._bibitems:
# cite_key: underscores must not be escaped
cite_key = bi[0].replace(r'\_', '_')
self.out.append('\\bibitem[%s]{%s}{%s}\n' %
(bi[0], cite_key, bi[1]))
self.out.append('\\end{thebibliography}\n')
else:
self.out.append('\n\\bibliographystyle{%s}\n' %
self.bibtex[0])
self.out.append('\\bibliography{%s}\n' % self.bibtex[1])
# * make sure to generate a toc file if needed for local contents:
if 'minitoc' in self.requirements and not self.has_latex_toc:
self.out.append('\n\\faketableofcontents % for local ToCs\n')
def visit_emphasis(self, node):
self.out.append('\\emph{')
if node['classes']:
self.visit_inline(node)
def depart_emphasis(self, node):
if node['classes']:
self.depart_inline(node)
self.out.append('}')
# Append column delimiters and advance column counter,
# if the current cell is a multi-row continuation."""
def insert_additional_table_colum_delimiters(self):
while self.active_table.get_rowspan(
self.active_table.get_entry_number()):
self.out.append(' & ')
self.active_table.visit_entry() # increment cell count
def visit_entry(self, node):
# cell separation
if self.active_table.get_entry_number() == 0:
self.insert_additional_table_colum_delimiters()
else:
self.out.append(' & ')
# multirow, multicolumn
if 'morerows' in node and 'morecols' in node:
raise NotImplementedError('Cells that '
'span multiple rows *and* columns currently not supported, sorry.')
# TODO: should be possible with LaTeX, see e.g.
# http://texblog.org/2012/12/21/multi-column-and-multi-row-cells-in-latex-tables/
# multirow in LaTeX simply will enlarge the cell over several rows
# (the following n if n is positive, the former if negative).
if 'morerows' in node:
self.requirements['multirow'] = r'\usepackage{multirow}'
mrows = node['morerows'] + 1
self.active_table.set_rowspan(
self.active_table.get_entry_number(), mrows)
self.out.append('\\multirow{%d}{%s}{' %
(mrows, self.active_table.get_column_width()))
self.context.append('}')
elif 'morecols' in node:
# the vertical bar before column is missing if it is the first
# column. the one after always.
if self.active_table.get_entry_number() == 0:
bar1 = self.active_table.get_vertical_bar()
else:
bar1 = ''
mcols = node['morecols'] + 1
self.out.append('\\multicolumn{%d}{%s%s%s}{' %
(mcols, bar1,
self.active_table.get_multicolumn_width(
self.active_table.get_entry_number(),
mcols),
self.active_table.get_vertical_bar()))
self.context.append('}')
else:
self.context.append('')
# bold header/stub-column
if len(node) and (isinstance(node.parent.parent, nodes.thead)
or self.active_table.is_stub_column()):
self.out.append('\\textbf{')
self.context.append('}')
else:
self.context.append('')
# if line ends with '{', mask line break to prevent spurious whitespace
if (not self.active_table.colwidths_auto
and self.out[-1].endswith("{")
and node.astext()):
self.out.append("%")
self.active_table.visit_entry() # increment cell count
def depart_entry(self, node):
self.out.append(self.context.pop()) # header / not header
self.out.append(self.context.pop()) # multirow/column
# insert extra "&"s, if following rows are spanned from above:
self.insert_additional_table_colum_delimiters()
def visit_row(self, node):
self.active_table.visit_row()
def depart_row(self, node):
self.out.extend(self.active_table.depart_row())
def visit_enumerated_list(self, node):
# enumeration styles:
types = {'': '',
'arabic':'arabic',
'loweralpha':'alph',
'upperalpha':'Alph',
'lowerroman':'roman',
'upperroman':'Roman'}
# the 4 default LaTeX enumeration labels: präfix, enumtype, suffix,
labels = [('', 'arabic', '.'), # 1.
('(', 'alph', ')'), # (a)
('', 'roman', '.'), # i.
('', 'Alph', '.')] # A.
prefix = ''
if self.compound_enumerators:
if (self.section_prefix_for_enumerators and self.section_level
and not self._enumeration_counters):
prefix = '.'.join([str(n) for n in
self._section_number[:self.section_level]]
) + self.section_enumerator_separator
if self._enumeration_counters:
prefix += self._enumeration_counters[-1]
# TODO: use LaTeX default for unspecified label-type?
# (needs change of parser)
prefix += node.get('prefix', '')
enumtype = types[node.get('enumtype' '')]
suffix = node.get('suffix', '')
enumeration_level = len(self._enumeration_counters)+1
counter_name = 'enum' + roman.toRoman(enumeration_level).lower()
label = r'%s\%s{%s}%s' % (prefix, enumtype, counter_name, suffix)
self._enumeration_counters.append(label)
self.duclass_open(node)
if enumeration_level <= 4:
self.out.append('\\begin{enumerate}')
if (prefix, enumtype, suffix
) != labels[enumeration_level-1]:
self.out.append('\n\\renewcommand{\\label%s}{%s}' %
(counter_name, label))
else:
self.fallbacks[counter_name] = '\\newcounter{%s}' % counter_name
self.out.append('\\begin{list}')
self.out.append('{%s}' % label)
self.out.append('{\\usecounter{%s}}' % counter_name)
if 'start' in node:
self.out.append('\n\\setcounter{%s}{%d}' %
(counter_name, node['start']-1))
def depart_enumerated_list(self, node):
if len(self._enumeration_counters) <= 4:
self.out.append('\\end{enumerate}\n')
else:
self.out.append('\\end{list}\n')
self.duclass_close(node)
self._enumeration_counters.pop()
def visit_field(self, node):
# output is done in field_body, field_name
pass
def depart_field(self, node):
pass
def visit_field_body(self, node):
if not isinstance(node.parent.parent, nodes.docinfo):
self.out.append(self.term_postfix(node))
def depart_field_body(self, node):
if self.out is self.docinfo:
self.out.append(r'\\'+'\n')
def visit_field_list(self, node):
self.duclass_open(node)
if self.out is not self.docinfo:
if not self.fallback_stylesheet:
self.fallbacks['fieldlist'] = PreambleCmds.fieldlist
self.out.append('\\begin{DUfieldlist}')
def depart_field_list(self, node):
if self.out is not self.docinfo:
self.out.append('\\end{DUfieldlist}\n')
self.duclass_close(node)
def visit_field_name(self, node):
if self.out is self.docinfo:
self.out.append('\\textbf{')
else:
# Commands with optional args inside an optional arg must be put
# in a group, e.g. ``\item[{\hyperref[label]{text}}]``.
self.out.append('\n\\item[{')
def depart_field_name(self, node):
if self.out is self.docinfo:
self.out.append('}: &')
else:
self.out.append(':}]')
def visit_figure(self, node):
self.requirements['float'] = PreambleCmds.float
self.duclass_open(node)
# The 'align' attribute sets the "outer alignment",
# for "inner alignment" use LaTeX default alignment (similar to HTML)
alignment = node.attributes.get('align', 'center')
if alignment != 'center':
# The LaTeX "figure" environment always uses the full linewidth,
# so "outer alignment" is ignored. Just write a comment.
# TODO: use the wrapfigure environment?
self.out.append('\\begin{figure} %% align = "%s"\n' % alignment)
else:
self.out.append('\\begin{figure}\n')
if node.get('ids'):
self.out += self.ids_to_labels(node) + ['\n']
def depart_figure(self, node):
self.out.append('\\end{figure}\n')
self.duclass_close(node)
def visit_footer(self, node):
self.push_output_collector([])
self.out.append(r'\newcommand{\DUfooter}{')
def depart_footer(self, node):
self.out.append('}')
self.requirements['~footer'] = ''.join(self.out)
self.pop_output_collector()
def visit_footnote(self, node):
try:
backref = node['backrefs'][0]
except IndexError:
backref = node['ids'][0] # no backref, use self-ref instead
if self.docutils_footnotes:
if not self.fallback_stylesheet:
self.fallbacks['footnotes'] = PreambleCmds.footnotes
num = node[0].astext()
if self.settings.footnote_references == 'brackets':
num = '[%s]' % num
self.out.append('%%\n\\DUfootnotetext{%s}{%s}{%s}{' %
(node['ids'][0], backref, self.encode(num)))
if node['ids'] == node['names']:
self.out += self.ids_to_labels(node)
# mask newline to prevent spurious whitespace if paragraph follows:
if node[1:] and isinstance(node[1], nodes.paragraph):
self.out.append('%')
# TODO: "real" LaTeX \footnote{}s (see visit_footnotes_reference())
def depart_footnote(self, node):
self.out.append('}\n')
def visit_footnote_reference(self, node):
href = ''
if 'refid' in node:
href = node['refid']
elif 'refname' in node:
href = self.document.nameids[node['refname']]
# if not self.docutils_footnotes:
# TODO: insert footnote content at (or near) this place
# print("footnote-ref to", node['refid'])
# footnotes = (self.document.footnotes +
# self.document.autofootnotes +
# self.document.symbol_footnotes)
# for footnote in footnotes:
# # print(footnote['ids'])
# if node.get('refid', '') in footnote['ids']:
# print('matches', footnote['ids'])
format = self.settings.footnote_references
if format == 'brackets':
self.append_hypertargets(node)
self.out.append('\\hyperlink{%s}{[' % href)
self.context.append(']}')
else:
if not self.fallback_stylesheet:
self.fallbacks['footnotes'] = PreambleCmds.footnotes
self.out.append(r'\DUfootnotemark{%s}{%s}{' %
(node['ids'][0], href))
self.context.append('}')
def depart_footnote_reference(self, node):
self.out.append(self.context.pop())
# footnote/citation label
def label_delim(self, node, bracket, superscript):
if isinstance(node.parent, nodes.footnote):
raise nodes.SkipNode
else:
assert isinstance(node.parent, nodes.citation)
if not self._use_latex_citations:
self.out.append(bracket)
def visit_label(self, node):
"""footnote or citation label: in brackets or as superscript"""
self.label_delim(node, '[', '\\textsuperscript{')
def depart_label(self, node):
self.label_delim(node, ']', '}')
# elements generated by the framework e.g. section numbers.
def visit_generated(self, node):
pass
def depart_generated(self, node):
pass
def visit_header(self, node):
self.push_output_collector([])
self.out.append(r'\newcommand{\DUheader}{')
def depart_header(self, node):
self.out.append('}')
self.requirements['~header'] = ''.join(self.out)
self.pop_output_collector()
def to_latex_length(self, length_str, pxunit=None):
"""Convert `length_str` with rst lenght to LaTeX length
"""
if pxunit is not None:
sys.stderr.write('deprecation warning: LaTeXTranslator.to_latex_length()'
' option `pxunit` will be removed.')
match = re.match(r'(\d*\.?\d*)\s*(\S*)', length_str)
if not match:
return length_str
value, unit = match.groups()[:2]
# no unit or "DTP" points (called 'bp' in TeX):
if unit in ('', 'pt'):
length_str = '%sbp' % value
# percentage: relate to current line width
elif unit == '%':
length_str = '%.3f\\linewidth' % (float(value)/100.0)
elif self.is_xetex and unit == 'px':
# XeTeX does not know the length unit px.
# Use \pdfpxdimen, the macro to set the value of 1 px in pdftex.
# This way, configuring works the same for pdftex and xetex.
if not self.fallback_stylesheet:
self.fallbacks['_providelength'] = PreambleCmds.providelength
self.fallbacks['px'] = '\n\\DUprovidelength{\\pdfpxdimen}{1bp}\n'
length_str = r'%s\pdfpxdimen' % value
return length_str
def visit_image(self, node):
self.requirements['graphicx'] = self.graphicx_package
attrs = node.attributes
# Convert image URI to a local file path
imagepath = url2pathname(attrs['uri']).replace('\\', '/')
# alignment defaults:
if not 'align' in attrs:
# Set default align of image in a figure to 'center'
if isinstance(node.parent, nodes.figure):
attrs['align'] = 'center'
self.set_align_from_classes(node)
# pre- and postfix (prefix inserted in reverse order)
pre = []
post = []
include_graphics_options = []
align_codes = {
# inline images: by default latex aligns the bottom.
'bottom': ('', ''),
'middle': (r'\raisebox{-0.5\height}{', '}'),
'top': (r'\raisebox{-\height}{', '}'),
# block level images:
'center': (r'\noindent\makebox[\linewidth][c]{', '}'),
'left': (r'\noindent{', r'\hfill}'),
'right': (r'\noindent{\hfill', '}'),}
if 'align' in attrs:
# TODO: warn or ignore non-applicable alignment settings?
try:
align_code = align_codes[attrs['align']]
pre.append(align_code[0])
post.append(align_code[1])
except KeyError:
pass # TODO: warn?
if 'height' in attrs:
include_graphics_options.append('height=%s' %
self.to_latex_length(attrs['height']))
if 'scale' in attrs:
include_graphics_options.append('scale=%f' %
(attrs['scale'] / 100.0))
if 'width' in attrs:
include_graphics_options.append('width=%s' %
self.to_latex_length(attrs['width']))
if not (self.is_inline(node) or
isinstance(node.parent, (nodes.figure, nodes.compound))):
pre.append('\n')
if not (self.is_inline(node) or
isinstance(node.parent, nodes.figure)):
post.append('\n')
pre.reverse()
self.out.extend(pre)
options = ''
if include_graphics_options:
options = '[%s]' % (','.join(include_graphics_options))
self.out.append('\\includegraphics%s{%s}' % (options, imagepath))
self.out.extend(post)
def depart_image(self, node):
if node.get('ids'):
self.out += self.ids_to_labels(node) + ['\n']
def visit_inline(self, node): # <span>, i.e. custom roles
for cls in node['classes']:
if cls.startswith('language-'):
language = self.babel.language_name(cls[9:])
if language:
self.babel.otherlanguages[language] = True
self.out.append(r'\foreignlanguage{%s}{' % language)
else:
if not self.fallback_stylesheet:
self.fallbacks['inline'] = PreambleCmds.inline
self.out.append(r'\DUrole{%s}{' % cls)
def depart_inline(self, node):
self.out.append('}' * len(node['classes']))
def visit_legend(self, node):
if not self.fallback_stylesheet:
self.fallbacks['legend'] = PreambleCmds.legend
self.out.append('\\begin{DUlegend}')
def depart_legend(self, node):
self.out.append('\\end{DUlegend}\n')
def visit_line(self, node):
self.out.append(r'\item[] ')
def depart_line(self, node):
self.out.append('\n')
def visit_line_block(self, node):
if not self.fallback_stylesheet:
self.fallbacks['_providelength'] = PreambleCmds.providelength
self.fallbacks['lineblock'] = PreambleCmds.lineblock
self.set_align_from_classes(node)
if isinstance(node.parent, nodes.line_block):
self.out.append('\\item[]\n'
'\\begin{DUlineblock}{\\DUlineblockindent}\n')
# nested line-blocks cannot be given class arguments
else:
self.duclass_open(node)
self.out.append('\\begin{DUlineblock}{0em}\n')
self.insert_align_declaration(node)
def depart_line_block(self, node):
self.out.append('\\end{DUlineblock}\n')
self.duclass_close(node)
def visit_list_item(self, node):
self.out.append('\n\\item ')
def depart_list_item(self, node):
pass
def visit_literal(self, node):
self.literal = True
if ('code' in node['classes'] and
self.settings.syntax_highlight != 'none'):
self.requirements['color'] = PreambleCmds.color
if not self.fallback_stylesheet:
self.fallbacks['code'] = PreambleCmds.highlight_rules
self.out.append('\\texttt{')
if node['classes']:
self.visit_inline(node)
def depart_literal(self, node):
self.literal = False
if node['classes']:
self.depart_inline(node)
self.out.append('}')
# Literal blocks are used for '::'-prefixed literal-indented
# blocks of text, where the inline markup is not recognized,
# but are also the product of the "parsed-literal" directive,
# where the markup is respected.
#
# In both cases, we want to use a typewriter/monospaced typeface.
# For "real" literal-blocks, we can use \verbatim, while for all
# the others we must use \ttfamily and \raggedright.
#
# We can distinguish between the two kinds by the number of
# siblings that compose this node: if it is composed by a
# single element, it's either
# * a real one,
# * a parsed-literal that does not contain any markup, or
# * a parsed-literal containing just one markup construct.
def is_plaintext(self, node):
"""Check whether a node can be typeset verbatim"""
return (len(node) == 1) and isinstance(node[0], nodes.Text)
def visit_literal_block(self, node):
"""Render a literal block.
Corresponding rST elements: literal block, parsed-literal, code.
"""
packages = {'lstlisting': r'\usepackage{listings}' '\n'
r'\lstset{xleftmargin=\leftmargin}',
'listing': r'\usepackage{moreverb}',
'Verbatim': r'\usepackage{fancyvrb}',
'verbatimtab': r'\usepackage{moreverb}'}
literal_env = self.literal_block_env
# Check, if it is possible to use a literal-block environment
_plaintext = self.is_plaintext(node)
_in_table = self.active_table.is_open()
# TODO: fails if normal text precedes the literal block.
# Check parent node instead?
_autowidth_table = _in_table and self.active_table.colwidths_auto
_use_env = _plaintext and not isinstance(node.parent,
(nodes.footnote, nodes.admonition, nodes.system_message))
_use_listings = (literal_env == 'lstlisting') and _use_env
# Labels and classes:
if node.get('ids'):
self.out += ['\n'] + self.ids_to_labels(node)
self.duclass_open(node)
# Highlight code?
if (not _plaintext
and 'code' in node['classes']
and self.settings.syntax_highlight != 'none'):
self.requirements['color'] = PreambleCmds.color
if not self.fallback_stylesheet:
self.fallbacks['code'] = PreambleCmds.highlight_rules
# Wrap?
if _in_table and _use_env and not _autowidth_table:
# Wrap in minipage to prevent extra vertical space
# with alltt and verbatim-like environments:
self.fallbacks['ttem'] = '\n'.join(['',
r'% character width in monospaced font',
r'\newlength{\ttemwidth}',
r'\settowidth{\ttemwidth}{\ttfamily M}'])
self.out.append('\\begin{minipage}{%d\\ttemwidth}\n' %
(max(len(line) for line in node.astext().split('\n'))))
self.context.append('\n\\end{minipage}\n')
elif not _in_table and not _use_listings:
# Wrap in quote to set off vertically and indent
self.out.append('\\begin{quote}\n')
self.context.append('\n\\end{quote}\n')
else:
self.context.append('\n')
# Use verbatim-like environment, if defined and possible
# (in an auto-width table, only listings works):
if literal_env and _use_env and (not _autowidth_table
or _use_listings):
try:
self.requirements['literal_block'] = packages[literal_env]
except KeyError:
pass
self.verbatim = True
if _in_table and _use_listings:
self.out.append('\\lstset{xleftmargin=0pt}\n')
self.out.append('\\begin{%s}%s\n' %
(literal_env, self.literal_block_options))
self.context.append('\n\\end{%s}' % literal_env)
elif _use_env and not _autowidth_table:
self.alltt = True
self.requirements['alltt'] = r'\usepackage{alltt}'
self.out.append('\\begin{alltt}\n')
self.context.append('\n\\end{alltt}')
else:
self.literal = True
self.insert_newline = True
self.insert_non_breaking_blanks = True
# \raggedright ensures leading blanks are respected but
# leads to additional leading vspace if the first line
# of the block is overfull :-(
self.out.append('\\ttfamily\\raggedright\n')
self.context.append('')
def depart_literal_block(self, node):
self.insert_non_breaking_blanks = False
self.insert_newline = False
self.literal = False
self.verbatim = False
self.alltt = False
self.out.append(self.context.pop())
self.out.append(self.context.pop())
self.duclass_close(node)
def visit_meta(self, node):
name = node.attributes.get('name')
content = node.attributes.get('content')
if not name or not content:
return
if name in ('author', 'creator', 'keywords', 'subject', 'title'):
# fields with dedicated hyperref options:
self.pdfinfo.append(' pdf%s={%s},'%(name, content))
elif name == 'producer':
self.pdfinfo.append(' addtopdfproducer={%s},'%content)
else:
# generic interface (case sensitive!)
self.pdfinfo.append(' pdfinfo={%s={%s}},'%(name, content))
def depart_meta(self, node):
pass
def visit_math(self, node, math_env='$'):
"""math role"""
if node['classes']:
self.visit_inline(node)
self.requirements['amsmath'] = r'\usepackage{amsmath}'
math_code = node.astext().translate(unichar2tex.uni2tex_table)
if node.get('ids'):
math_code = '\n'.join([math_code] + self.ids_to_labels(node))
if math_env == '$':
if self.alltt:
wrapper = u'\\(%s\\)'
else:
wrapper = u'$%s$'
else:
wrapper = u'\n'.join(['%%',
r'\begin{%s}' % math_env,
'%s',
r'\end{%s}' % math_env])
self.out.append(wrapper % math_code)
if node['classes']:
self.depart_inline(node)
# Content already processed:
raise nodes.SkipNode
def depart_math(self, node):
pass # never reached
def visit_math_block(self, node):
math_env = pick_math_environment(node.astext())
self.visit_math(node, math_env=math_env)
def depart_math_block(self, node):
pass # never reached
def visit_option(self, node):
if self.context[-1]:
# this is not the first option
self.out.append(', ')
def depart_option(self, node):
# flag that the first option is done.
self.context[-1] += 1
def visit_option_argument(self, node):
"""Append the delimiter betweeen an option and its argument to body."""
self.out.append(node.get('delimiter', ' '))
def depart_option_argument(self, node):
pass
def visit_option_group(self, node):
self.out.append('\n\\item[')
# flag for first option
self.context.append(0)
def depart_option_group(self, node):
self.context.pop() # the flag
self.out.append('] ')
def visit_option_list(self, node):
if not self.fallback_stylesheet:
self.fallbacks['_providelength'] = PreambleCmds.providelength
self.fallbacks['optionlist'] = PreambleCmds.optionlist
self.duclass_open(node)
self.out.append('\\begin{DUoptionlist}')
def depart_option_list(self, node):
self.out.append('\\end{DUoptionlist}\n')
self.duclass_close(node)
def visit_option_list_item(self, node):
pass
def depart_option_list_item(self, node):
pass
def visit_option_string(self, node):
##self.out.append(self.starttag(node, 'span', '', CLASS='option'))
pass
def depart_option_string(self, node):
##self.out.append('</span>')
pass
def visit_organization(self, node):
self.visit_docinfo_item(node, 'organization')
def depart_organization(self, node):
self.depart_docinfo_item(node)
def visit_paragraph(self, node):
# insert blank line, unless
# * the paragraph is first in a list item, compound, or container
# * follows a non-paragraph node in a compound,
# * is in a table with auto-width columns
index = node.parent.index(node)
if index == 0 and isinstance(node.parent,
(nodes.list_item, nodes.description,
nodes.compound, nodes.container)):
pass
elif (index > 0
and isinstance(node.parent, nodes.compound)
and not isinstance(node.parent[index - 1],
(nodes.paragraph, nodes.compound))):
pass
elif self.active_table.colwidths_auto:
if index == 1: # second paragraph
self.warn('LaTeX merges paragraphs in tables '
'with auto-sized columns!', base_node=node)
if index > 0:
self.out.append('\n')
else:
self.out.append('\n')
if node.get('ids'):
self.out += self.ids_to_labels(node) + ['\n']
if node['classes']:
self.visit_inline(node)
def depart_paragraph(self, node):
if node['classes']:
self.depart_inline(node)
if not self.active_table.colwidths_auto:
self.out.append('\n')
def visit_problematic(self, node):
self.requirements['color'] = PreambleCmds.color
self.out.append('%\n')
self.append_hypertargets(node)
self.out.append(r'\hyperlink{%s}{\textbf{\color{red}' % node['refid'])
def depart_problematic(self, node):
self.out.append('}}')
def visit_raw(self, node):
if not 'latex' in node.get('format', '').split():
raise nodes.SkipNode
if not self.is_inline(node):
self.out.append('\n')
if node['classes']:
self.visit_inline(node)
# append "as-is" skipping any LaTeX-encoding
self.verbatim = True
def depart_raw(self, node):
self.verbatim = False
if node['classes']:
self.depart_inline(node)
if not self.is_inline(node):
self.out.append('\n')
def has_unbalanced_braces(self, string):
"""Test whether there are unmatched '{' or '}' characters."""
level = 0
for ch in string:
if ch == '{':
level += 1
if ch == '}':
level -= 1
if level < 0:
return True
return level != 0
def visit_reference(self, node):
# We need to escape #, \, and % if we use the URL in a command.
special_chars = {ord('#'): u'\\#',
ord('%'): u'\\%',
ord('\\'): u'\\\\',
}
# external reference (URL)
if 'refuri' in node:
href = unicode(node['refuri']).translate(special_chars)
# problematic chars double caret and unbalanced braces:
if href.find('^^') != -1 or self.has_unbalanced_braces(href):
self.error(
'External link "%s" not supported by LaTeX.\n'
' (Must not contain "^^" or unbalanced braces.)' % href)
if node['refuri'] == node.astext():
self.out.append(r'\url{%s}' % href)
raise nodes.SkipNode
self.out.append(r'\href{%s}{' % href)
return
# internal reference
if 'refid' in node:
href = node['refid']
elif 'refname' in node:
href = self.document.nameids[node['refname']]
else:
raise AssertionError('Unknown reference.')
if not self.is_inline(node):
self.out.append('\n')
self.out.append('\\hyperref[%s]{' % href)
if self._reference_label:
self.out.append('\\%s{%s}}' %
(self._reference_label, href.replace('#', '')))
raise nodes.SkipNode
def depart_reference(self, node):
self.out.append('}')
if not self.is_inline(node):
self.out.append('\n')
def visit_revision(self, node):
self.visit_docinfo_item(node, 'revision')
def depart_revision(self, node):
self.depart_docinfo_item(node)
def visit_rubric(self, node):
if not self.fallback_stylesheet:
self.fallbacks['rubric'] = PreambleCmds.rubric
# class wrapper would interfere with ``\section*"`` type commands
# (spacing/indent of first paragraph)
self.out.append('\n\\DUrubric{')
def depart_rubric(self, node):
self.out.append('}\n')
def visit_section(self, node):
self.section_level += 1
# Initialize counter for potential subsections:
self._section_number.append(0)
# Counter for this section's level (initialized by parent section):
self._section_number[self.section_level - 1] += 1
def depart_section(self, node):
# Remove counter for potential subsections:
self._section_number.pop()
self.section_level -= 1
def visit_sidebar(self, node):
self.duclass_open(node)
self.requirements['color'] = PreambleCmds.color
if not self.fallback_stylesheet:
self.fallbacks['sidebar'] = PreambleCmds.sidebar
self.out.append('\\DUsidebar{')
def depart_sidebar(self, node):
self.out.append('}\n')
self.duclass_close(node)
attribution_formats = {'dash': (u'—', ''), # EM DASH
'parentheses': ('(', ')'),
'parens': ('(', ')'),
'none': ('', '')}
def visit_attribution(self, node):
prefix, suffix = self.attribution_formats[self.settings.attribution]
self.out.append('\\nopagebreak\n\n\\raggedleft ')
self.out.append(prefix)
self.context.append(suffix)
def depart_attribution(self, node):
self.out.append(self.context.pop() + '\n')
def visit_status(self, node):
self.visit_docinfo_item(node, 'status')
def depart_status(self, node):
self.depart_docinfo_item(node)
def visit_strong(self, node):
self.out.append('\\textbf{')
if node['classes']:
self.visit_inline(node)
def depart_strong(self, node):
if node['classes']:
self.depart_inline(node)
self.out.append('}')
def visit_substitution_definition(self, node):
raise nodes.SkipNode
def visit_substitution_reference(self, node):
self.unimplemented_visit(node)
def visit_subtitle(self, node):
if isinstance(node.parent, nodes.document):
self.push_output_collector(self.subtitle)
if not self.fallback_stylesheet:
self.fallbacks['documentsubtitle'] = PreambleCmds.documentsubtitle
protect = (self.settings.documentclass == 'memoir')
self.subtitle_labels += self.ids_to_labels(node, set_anchor=False,
protect=protect)
# section subtitle: "starred" (no number, not in ToC)
elif isinstance(node.parent, nodes.section):
self.out.append(r'\%s*{' %
self.d_class.section(self.section_level + 1))
else:
if not self.fallback_stylesheet:
self.fallbacks['subtitle'] = PreambleCmds.subtitle
self.out.append('\n\\DUsubtitle{')
def depart_subtitle(self, node):
if isinstance(node.parent, nodes.document):
self.pop_output_collector()
else:
self.out.append('}\n')
def visit_system_message(self, node):
self.requirements['color'] = PreambleCmds.color
if not self.fallback_stylesheet:
self.fallbacks['title'] = PreambleCmds.title
if self.settings.legacy_class_functions:
self.fallbacks['title'] = PreambleCmds.title_legacy
node['classes'] = ['system-message']
self.visit_admonition(node)
if self.settings.legacy_class_functions:
self.out.append('\n\\DUtitle[system-message]{system-message\n')
else:
self.out.append('\n\\DUtitle{system-message\n')
self.append_hypertargets(node)
try:
line = ', line~%s' % node['line']
except KeyError:
line = ''
self.out.append('}\n\n{\\color{red}%s/%s} in \\texttt{%s}%s\n' %
(node['type'], node['level'],
self.encode(node['source']), line))
if len(node['backrefs']) == 1:
self.out.append('\n\\hyperlink{%s}{' % node['backrefs'][0])
self.context.append('}')
else:
backrefs = ['\\hyperlink{%s}{%d}' % (href, i+1)
for (i, href) in enumerate(node['backrefs'])]
self.context.append('backrefs: ' + ' '.join(backrefs))
def depart_system_message(self, node):
self.out.append(self.context.pop())
self.depart_admonition(node)
def visit_table(self, node):
self.duclass_open(node)
self.requirements['table'] = PreambleCmds.table
if self.active_table.is_open():
self.table_stack.append(self.active_table)
# nesting longtable does not work (e.g. 2007-04-18)
# TODO: don't use a longtable or add \noindent before
# the next paragraph, when in a "compound paragraph".
self.active_table = Table(self, 'tabular')
# A longtable moves before \paragraph and \subparagraph
# section titles if it immediately follows them:
if (self.active_table._latex_type == 'longtable' and
isinstance(node.parent, nodes.section) and
node.parent.index(node) == 1 and
self.d_class.section(self.section_level).find('paragraph') != -1):
self.out.append('\\leavevmode')
self.active_table.open()
self.active_table.set_table_style(self.settings.table_style,
node['classes'])
if 'align' in node:
self.active_table.set('align', node['align'])
if self.active_table.borders == 'booktabs':
self.requirements['booktabs'] = r'\usepackage{booktabs}'
self.push_output_collector([])
def depart_table(self, node):
# wrap content in the right environment:
content = self.out
self.pop_output_collector()
try:
width = self.to_latex_length(node.attributes['width'])
except KeyError:
width = r'\linewidth'
if isinstance(node.parent, nodes.compound):
self.out.append('\n')
self.out.append(self.active_table.get_opening(width))
self.out += content
self.out.append(self.active_table.get_closing() + '\n')
self.active_table.close()
if len(self.table_stack)>0:
self.active_table = self.table_stack.pop()
# Insert hyperlabel after (long)table, as
# other places (beginning, caption) result in LaTeX errors.
if node.get('ids'):
self.out += self.ids_to_labels(node, set_anchor=False) + ['\n']
self.duclass_close(node)
def visit_target(self, node):
# Skip indirect targets:
if ('refuri' in node # external hyperlink
or 'refid' in node # resolved internal link
or 'refname' in node): # unresolved internal link
## self.out.append('%% %s\n' % node) # for debugging
return
self.out.append('%\n')
# do we need an anchor (\phantomsection)?
set_anchor = not isinstance(node.parent, (nodes.caption, nodes.title))
# TODO: where else can/must we omit the \phantomsection?
self.out += self.ids_to_labels(node, set_anchor)
def depart_target(self, node):
pass
def visit_tbody(self, node):
# BUG write preamble if not yet done (colspecs not [])
# for tables without heads.
if not self.active_table.get('preamble written'):
self.visit_thead(node)
self.depart_thead(None)
def depart_tbody(self, node):
pass
def visit_term(self, node):
"""definition list term"""
# Commands with optional args inside an optional arg must be put
# in a group, e.g. ``\item[{\hyperref[label]{text}}]``.
self.out.append('\\item[{')
def depart_term(self, node):
self.out.append('}] ')
# Do we need a \leavevmode (line break if the field body begins
# with a list or environment)?
next_node = node.next_node(descend=False, siblings=True)
if not isinstance(next_node, nodes.classifier):
self.out.append(self.term_postfix(next_node))
def visit_tgroup(self, node):
pass
def depart_tgroup(self, node):
pass
_thead_depth = 0
def thead_depth (self):
return self._thead_depth
def visit_thead(self, node):
self._thead_depth += 1
if 1 == self.thead_depth():
self.out.append('{%s}\n' % self.active_table.get_colspecs(node))
self.active_table.set('preamble written', 1)
self.out.append(self.active_table.get_caption())
self.out.extend(self.active_table.visit_thead())
def depart_thead(self, node):
if node is not None:
self.out.extend(self.active_table.depart_thead())
if self.active_table.need_recurse():
node.walkabout(self)
self._thead_depth -= 1
def visit_title(self, node):
"""Append section and other titles."""
# Document title
if isinstance(node.parent, nodes.document):
self.push_output_collector(self.title)
self.context.append('')
self.pdfinfo.append(' pdftitle={%s},' %
self.encode(node.astext()))
# Topic titles (topic, admonition, sidebar)
elif (isinstance(node.parent, nodes.topic) or
isinstance(node.parent, nodes.admonition) or
isinstance(node.parent, nodes.sidebar)):
classes = node.parent['classes'] or [node.parent.tagname]
if self.settings.legacy_class_functions:
self.fallbacks['title'] = PreambleCmds.title_legacy
self.out.append('\n\\DUtitle[%s]{' % ','.join(classes))
else:
if not self.fallback_stylesheet:
self.fallbacks['title'] = PreambleCmds.title
self.out.append('\n\\DUtitle{')
self.context.append('}\n')
# Table caption
elif isinstance(node.parent, nodes.table):
self.push_output_collector(self.active_table.caption)
self.context.append('')
# Section title
else:
if hasattr(PreambleCmds, 'secnumdepth'):
self.requirements['secnumdepth'] = PreambleCmds.secnumdepth
level = self.section_level
section_name = self.d_class.section(level)
self.out.append('\n\n')
if level > len(self.d_class.sections):
# section level not supported by LaTeX
if self.settings.legacy_class_functions:
self.fallbacks['title'] = PreambleCmds.title_legacy
section_name += '[section%s]' % roman.toRoman(level)
else:
if not self.fallback_stylesheet:
self.fallbacks['title'] = PreambleCmds.title
self.fallbacks['DUclass'] = PreambleCmds.duclass
self.out.append('\\begin{DUclass}{section%s}\n'
% roman.toRoman(level))
# System messages heading in red:
if ('system-messages' in node.parent['classes']):
self.requirements['color'] = PreambleCmds.color
section_title = self.encode(node.astext())
self.out.append(r'\%s[%s]{\color{red}' % (
section_name, section_title))
else:
self.out.append(r'\%s{' % section_name)
# label and ToC entry:
bookmark = ['']
# add sections with unsupported level to toc and pdfbookmarks?
## if level > len(self.d_class.sections):
## section_title = self.encode(node.astext())
## bookmark.append(r'\addcontentsline{toc}{%s}{%s}' %
## (section_name, section_title))
bookmark += self.ids_to_labels(node.parent, set_anchor=False)
self.context.append('%\n '.join(bookmark) + '%\n}\n')
if (level > len(self.d_class.sections)
and not self.settings.legacy_class_functions):
self.context[-1] += '\\end{DUclass}\n'
# MAYBE postfix paragraph and subparagraph with \leavemode to
# ensure floats stay in the section and text starts on a new line.
def depart_title(self, node):
self.out.append(self.context.pop())
if isinstance(node.parent, (nodes.table, nodes.document)):
self.pop_output_collector()
def minitoc(self, node, title, depth):
"""Generate a local table of contents with LaTeX package minitoc"""
section_name = self.d_class.section(self.section_level)
# name-prefix for current section level
minitoc_names = {'part': 'part', 'chapter': 'mini'}
if 'chapter' not in self.d_class.sections:
minitoc_names['section'] = 'sect'
try:
minitoc_name = minitoc_names[section_name]
except KeyError: # minitoc only supports part- and toplevel
self.warn('Skipping local ToC at %s level.\n' % section_name +
' Feature not supported with option "use-latex-toc"',
base_node=node)
return
# Requirements/Setup
self.requirements['minitoc'] = PreambleCmds.minitoc
self.requirements['minitoc-'+minitoc_name] = (r'\do%stoc' %
minitoc_name)
# depth: (Docutils defaults to unlimited depth)
maxdepth = len(self.d_class.sections)
self.requirements['minitoc-%s-depth' % minitoc_name] = (
r'\mtcsetdepth{%stoc}{%d}' % (minitoc_name, maxdepth))
# Process 'depth' argument (!Docutils stores a relative depth while
# minitoc expects an absolute depth!):
offset = {'sect': 1, 'mini': 0, 'part': 0}
if 'chapter' in self.d_class.sections:
offset['part'] = -1
if depth:
self.out.append('\\setcounter{%stocdepth}{%d}' %
(minitoc_name, depth + offset[minitoc_name]))
# title:
self.out.append('\\mtcsettitle{%stoc}{%s}\n' % (minitoc_name, title))
# the toc-generating command:
self.out.append('\\%stoc\n' % minitoc_name)
def visit_topic(self, node):
# Topic nodes can be generic topic, abstract, dedication, or ToC.
# table of contents:
if 'contents' in node['classes']:
self.out.append('\n')
self.out += self.ids_to_labels(node)
# add contents to PDF bookmarks sidebar
if (isinstance(node.next_node(), nodes.title)
and self.settings.documentclass != 'memoir'):
self.out.append('\n\\pdfbookmark[%d]{%s}{%s}' %
(self.section_level+1,
node.next_node().astext(),
node.get('ids', ['contents'])[0]
))
if self.use_latex_toc:
title = ''
if isinstance(node.next_node(), nodes.title):
title = self.encode(node.pop(0).astext())
depth = node.get('depth', 0)
if 'local' in node['classes']:
self.minitoc(node, title, depth)
return
if depth:
self.out.append('\\setcounter{tocdepth}{%d}\n' % depth)
if title != 'Contents':
self.out.append('\n\\renewcommand{\\contentsname}{%s}' %
title)
self.out.append('\n\\tableofcontents\n')
self.has_latex_toc = True
# ignore rest of node content
raise nodes.SkipNode
else: # Docutils generated contents list
# set flag for visit_bullet_list() and visit_title()
self.is_toc_list = True
elif ('abstract' in node['classes'] and
self.settings.use_latex_abstract):
self.push_output_collector(self.abstract)
self.out.append('\\begin{abstract}')
if isinstance(node.next_node(), nodes.title):
node.pop(0) # LaTeX provides its own title
else:
# special topics:
if 'abstract' in node['classes']:
if not self.fallback_stylesheet:
self.fallbacks['abstract'] = PreambleCmds.abstract
if self.settings.legacy_class_functions:
self.fallbacks['abstract'] = PreambleCmds.abstract_legacy
self.push_output_collector(self.abstract)
elif 'dedication' in node['classes']:
if not self.fallback_stylesheet:
self.fallbacks['dedication'] = PreambleCmds.dedication
self.push_output_collector(self.dedication)
else:
node['classes'].insert(0, 'topic')
self.visit_block_quote(node)
def depart_topic(self, node):
self.is_toc_list = False
if ('abstract' in node['classes']
and self.settings.use_latex_abstract):
self.out.append('\\end{abstract}\n')
elif not 'contents' in node['classes']:
self.depart_block_quote(node)
if ('abstract' in node['classes'] or
'dedication' in node['classes']):
self.pop_output_collector()
def visit_transition(self, node):
if not self.fallback_stylesheet:
self.fallbacks['transition'] = PreambleCmds.transition
self.out.append('\n%' + '_' * 75 + '\n')
self.out.append('\\DUtransition\n')
def depart_transition(self, node):
pass
def visit_version(self, node):
self.visit_docinfo_item(node, 'version')
def depart_version(self, node):
self.depart_docinfo_item(node)
def unimplemented_visit(self, node):
raise NotImplementedError('visiting unimplemented node type: %s' %
node.__class__.__name__)
# def unknown_visit(self, node):
# def default_visit(self, node):
# vim: set ts=4 et ai :
| sonntagsgesicht/regtest | .aux/venv/lib/python3.9/site-packages/docutils/writers/latex2e/__init__.py | Python | apache-2.0 | 132,799 | [
"VisIt"
] | e35de26b047ea0338c8d71ed739868fe0b9dd52b58c85d0adf650709c1cf1cdf |
"""Generate Java code from an ASDL description."""
# TO DO
# handle fields that have a type but no name
import os
import sys
import traceback
import asdl
TABSIZE = 4
MAX_COL = 76
def reflow_lines(s, depth):
"""Reflow the line s indented depth tabs.
Return a sequence of lines where no line extends beyond MAX_COL
when properly indented. The first line is properly indented based
exclusively on depth * TABSIZE. All following lines -- these are
the reflowed lines generated by this function -- start at the same
column as the first character beyond the opening { in the first
line.
"""
size = MAX_COL - depth * TABSIZE
if len(s) < size:
return [s]
lines = []
cur = s
padding = ""
while len(cur) > size:
i = cur.rfind(' ', 0, size)
assert i != -1, "Impossible line to reflow: %s" % s.repr()
lines.append(padding + cur[:i])
if len(lines) == 1:
# find new size based on brace
j = cur.find('{', 0, i)
if j >= 0:
j += 2 # account for the brace and the space after it
size -= j
padding = " " * j
cur = cur[i+1:]
else:
lines.append(padding + cur)
return lines
class EmitVisitor(asdl.VisitorBase):
"""Visit that emits lines"""
def __init__(self, direction):
self.direction = direction
super(EmitVisitor, self).__init__()
def open(self, name, refersToPythonNode=1, useDataOutput=0):
self.file = open(self.direction + "%s.java" % name, "w")
print >> self.file, "// Autogenerated AST node"
print >> self.file, 'package pers.xia.jpython.ast;'
print >> self.file, 'import pers.xia.jpython.object.PyObject;'
# if refersToPythonNode:
# print >> self.file, 'import pers.xia.jpython.parser.Node;'
if useDataOutput:
print >> self.file, 'import java.io.DataOutputStream;'
print >> self.file, 'import java.io.IOException;'
print >> self.file
def close(self):
self.file.close()
def emit(self, s, depth):
# XXX reflow long lines?
lines = reflow_lines(s, depth)
for line in lines:
line = (" " * TABSIZE * depth) + line + "\n"
self.file.write(line)
# This step will add a 'simple' boolean attribute to all Sum and Product
# nodes and add a 'typedef' link to each Field node that points to the
# Sum or Product node that defines the field.
class AnalyzeVisitor(EmitVisitor):
index = 0
def makeIndex(self):
self.index += 1
return self.index
def visitModule(self, mod):
self.types = {}
for dfn in mod.dfns:
self.types[str(dfn.name)] = dfn.value
for dfn in mod.dfns:
self.visit(dfn)
def visitType(self, type, depth=0):
self.visit(type.value, type.name, depth)
def visitSum(self, sum, name, depth):
sum.simple = 1
for t in sum.types:
if t.fields:
sum.simple = 0
break
for t in sum.types:
if not sum.simple:
t.index = self.makeIndex()
self.visit(t, name, depth)
def visitProduct(self, product, name, depth):
product.simple = 0
product.index = self.makeIndex()
for f in product.fields:
self.visit(f, depth + 1)
def visitConstructor(self, cons, name, depth):
for f in cons.fields:
self.visit(f, depth + 1)
def visitField(self, field, depth):
field.typedef = self.types.get(str(field.type))
# The code generator itself.
class JavaVisitor(EmitVisitor):
def visitModule(self, mod):
for dfn in mod.dfns:
self.visit(dfn)
def visitType(self, type, depth=0):
self.visit(type.value, type.name, depth)
def visitSum(self, sum, name, depth):
if sum.simple:
self.simple_sum(sum, name, depth)
else:
self.sum_with_constructor(sum, name, depth)
def simple_sum(self, sum, name, depth):
self.open("%sType" % name, refersToPythonNode=0)
self.emit("public enum %(name)sType {" % locals(), depth)
self.emit('UNDEFINED,', depth+1)
for type in sum.types:
self.emit('%s,' % type.name, depth+1)
self.emit("}", depth)
self.close()
def sum_with_constructor(self, sum, name, depth):
self.open("%sType" % name)
# self.emit("public abstract class %(name)sType extends Node {" %
self.emit("public abstract class %(name)sType{" %
locals(), depth)
for attribute in sum.attributes:
self.emit('public %s %s;' % (attribute.type, attribute.name), depth + 1)
self.emit("}", depth)
self.close()
for t in sum.types:
self.visit(t, name, depth, sum.attributes)
def visitProduct(self, product, name, depth):
self.open("%sType" % name, useDataOutput=1)
# self.emit("public class %(name)sType extends Node {" % locals(),
self.emit("public class %(name)sType{" % locals(),
depth)
for f in product.fields:
self.visit(f, depth + 1)
for attr in product.attributes:
self.visit(attr, depth + 1)
self.emit("", depth)
self.javaMethods(product, name, "%sType" % name, product.fields,
depth+1, product.attributes)
self.emit("}", depth)
self.close()
def visitConstructor(self, cons, name, depth, attributes):
self.open(cons.name, useDataOutput=1)
enums = []
for f in cons.fields:
if f.typedef and f.typedef.simple:
enums.append("%sType" % f.type)
# if enums:
if False:
s = "implements %s " % ", ".join(enums)
else:
s = ""
self.emit("public class %s extends %sType %s{" %
(cons.name, name, s), depth)
for f in cons.fields:
self.visit(f, depth + 1)
self.emit("", depth)
self.javaMethods(cons, cons.name, cons.name, cons.fields, depth+1, attributes)
self.emit("}", depth)
self.close()
def javaMethods(self, type, clsname, ctorname, fields, depth, attributes):
# The java ctors
fpargs = ", ".join([self.fieldDef(f) for f in fields])
if attributes:
if fpargs:
fpargs += ","
else:
fpargs = ""
fpargs += ", ".join([self.fieldDef(a) for a in attributes])
self.emit("public %s(%s) {" % (ctorname, fpargs), depth)
for f in fields:
self.emit("this.%s = %s;" % (f.name, f.name), depth+1)
if attributes:
for attr in attributes:
self.emit("this.%s = %s;" % (attr.name, attr.name), depth+1)
self.emit("}", depth)
self.emit("", 0)
# if fpargs:
# fpargs += ", "
# self.emit("public %s(%sNode parent) {" %
# (ctorname, fpargs), depth)
# self.emit("this(%s);" %
# ", ".join([str(f.name) for f in fields]), depth+1)
# self.emit("this.lineno = parent.lineNo;", depth+1)
# self.emit("this.col_offset = parent.colOffset;", depth+1)
# self.emit("}", depth)
# self.emit("", 0)
# The toString() method
self.emit("public String toString() {", depth)
self.emit("return \"%s\";" % ctorname, depth+1)
self.emit("}", depth)
self.emit("", 0)
# The pickle() method
self.emit("public void pickle(DataOutputStream ostream) " +
"throws IOException {", depth)
# self.emit("pickleThis(%s, ostream);" % type.index, depth+1)
# for f in fields:
# self.emit("pickleThis(this.%s, ostream);" % f.name, depth+1)
self.emit("}", depth)
self.emit("", 0)
# The accept() method
self.emit("public Object accept(VisitorIF visitor) throws Exception {",
depth)
if clsname == ctorname:
self.emit('return visitor.visit%s(this);' % clsname, depth+1)
else:
self.emit('traverse(visitor);', depth+1)
self.emit('return null;', depth+1)
self.emit("}", depth)
self.emit("", 0)
# The visitChildren() method
self.emit("public void traverse(VisitorIF visitor) throws Exception {",
depth)
# for f in fields:
# if self.bltinnames.has_key(str(f.type)):
# continue
# if f.typedef and f.typedef.simple:
# continue
# if f.seq:
# self.emit('if (%s != null) {' % f.name, depth+1)
# self.emit('for (int i = 0; i < %s.length; i++) {' % f.name,
# depth+2)
# self.emit('if (%s[i] != null)' % f.name, depth+3)
# self.emit('%s[i].accept(visitor);' % f.name, depth+4)
# self.emit('}', depth+2)
# self.emit('}', depth+1)
# else:
# self.emit('if (%s != null)' % f.name, depth+1)
# self.emit('%s.accept(visitor);' % f.name, depth+2)
self.emit('}', depth)
self.emit("", 0)
def visitField(self, field, depth):
self.emit("public %s;" % self.fieldDef(field), depth)
bltinnames = {
'int': 'int',
'identifier': 'String',
'string': 'PyObject',
'object': 'PyObject', # was PyObject
'bytes': 'PyObject',
'singleton': 'PyObject',
}
def fieldDef(self, field):
jtype = str(field.type)
# if field.typedef and field.typedef.simple:
# jtype = 'int'
# else:
jtype = self.bltinnames.get(jtype, jtype + 'Type')
name = field.name
# seq = field.seq and "[]" or ""
seq = ""
if field.seq:
return "java.util.List<%s> %s" % (jtype, name)
else:
return "%(jtype)s%(seq)s %(name)s" % locals()
class VisitorVisitor(EmitVisitor):
def __init__(self, direction):
EmitVisitor.__init__(self, direction)
self.ctors = []
def visitModule(self, mod):
for dfn in mod.dfns:
self.visit(dfn)
self.open("VisitorIF", refersToPythonNode=0)
self.emit('public interface VisitorIF {', 0)
for ctor in self.ctors:
self.emit("public Object visit%s(%s node) throws Exception;" %
(ctor, ctor), 1)
self.emit('}', 0)
self.close()
'''
self.open("VisitorBase")
self.emit('public abstract class VisitorBase implements VisitorIF {', 0)
for ctor in self.ctors:
self.emit("public Object visit%s(%s node) throws Exception {" %
(ctor, ctor), 1)
self.emit("Object ret = unhandled_node(node);", 2)
self.emit("traverse(node);", 2)
self.emit("return ret;", 2)
self.emit('}', 1)
self.emit('', 0)
self.emit("abstract protected Object unhandled_node(Node node) throws Exception;", 1)
self.emit("abstract public void traverse(Node node) throws Exception;", 1)
self.emit('}', 0)
self.close()
'''
def visitType(self, type, depth=1):
self.visit(type.value, type.name, depth)
def visitSum(self, sum, name, depth):
if not sum.simple:
for t in sum.types:
self.visit(t, name, depth)
def visitProduct(self, product, name, depth):
pass
def visitConstructor(self, cons, name, depth):
self.ctors.append(cons.name)
class ChainOfVisitors:
def __init__(self, *visitors):
self.visitors = visitors
def visit(self, object):
for v in self.visitors:
v.visit(object)
if __name__ == "__main__":
mod = asdl.parse(sys.argv[1])
direction = "../src/pers/xia/jpython/ast/"
if len(sys.argv) > 2:
direction = sys.argv[2]
if not asdl.check(mod):
sys.exit(1)
c = ChainOfVisitors(AnalyzeVisitor(direction),
JavaVisitor(direction),
VisitorVisitor(direction))
c.visit(mod)
| xia-st/JPython | ast/asdl_java.py | Python | gpl-2.0 | 12,721 | [
"VisIt"
] | a29b91f9e3c3fc38b05c99c6e6c5cd58da1132242f6ad88a23b0437adae04b83 |
"""
Utility functions for the HapMix workflow
"""
def which(program):
"""Check if binary exists"""
import os
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def validate_tree_structure(tree_format, num_clones):
"""Verify if tree structure is admissable"""
import sys
# fixed structure
if isinstance(tree_format, (list, tuple)):
# single-level
if len(tree_format) > 2:
if not all([isinstance(tree_el, (str, unicode)) for tree_el in tree_format]):
sys.exit("Invalid tree format!\nAllowed formats: binary tree, single-level tree\n")
clones = tree_format
# binary
elif len(tree_format) == 2:
num_children = []
set_num_children(tree_format, num_children)
if not all([num == 2 for num in num_children]):
sys.exit("Invalid tree format!\nAllowed formats: binary tree, single-level tree\n")
clones = get_clone_names_binary(tree_format)
# check correctness
if len(clones) != num_clones:
sys.exit("Number of evolutionary tree leaves does not match number of clones\n")
if len(clones) != len(set(clones)):
sys.exit("Duplicated clone name\n")
elif tree_format not in ["random_binary", "random_single_level"]:
sys.exit("Invalid tree format\n")
def get_clones_names(tree_format, num_clones):
"""Return list of clones in avolutionary tree"""
import sys
clones = []
# random tree
if tree_format in ["random_binary", "random_single_level"]:
labels = list("ABCDEFGHIJKLMNOPQRSTWXYZ") # assign default clone names
if num_clones==2:
clones = ["MajCl", "A"]
else:
clones = ["MajCl"] + labels[:(num_clones-1)]
# fixed structure
elif isinstance(tree_format, (list, tuple)):
# monoclonal
if len(tree_format) == 1:
clones = tree_format
else:
# single-level
if len(tree_format) > 2:
clones = tree_format
# binary
elif len(tree_format) == 2:
clones = get_clone_names_binary(tree_format)
return clones
def get_clone_names_binary(binary_tree):
"""Return list of clones present in a binary evolutionary tree (leaves)"""
tree_list = binary_tree[:]
for i, x in enumerate(tree_list):
while isinstance(tree_list[i], list):
tree_list[i:i+1] = tree_list[i]
return tree_list
def set_num_children(binary_tree_node, num_children):
"""Visit binary clonal evolutionary tree and return list of number of children for each non-leaf node """
if isinstance(binary_tree_node, (list, tuple)) and len(binary_tree_node) > 1:
num_children.append(len(binary_tree_node))
set_num_children(binary_tree_node[0], num_children)
set_num_children(binary_tree_node[1], num_children)
def add_BED_complement(truth_BED_file, hg_file, sort = True, out_dir = None, hap_split = True):
"""Add complement diploid regions to truth bed files for CNVs"""
import subprocess, re, os
if out_dir is None:
out_dir = os.path.dirname(truth_BED_file)
# Add complement regions to truth_BED_file, set to diploid
cmd = "bedtools complement -i %s -g %s" % (truth_BED_file, hg_file)
print cmd
pc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
compl = pc.stdout.readlines()
with open(truth_BED_file, "r") as rbed:
lines = [line.strip() for line in rbed.readlines()]
for line in compl:
if hap_split:
lines.append(line.rstrip() + "\t" + str(1) + "\t" + str(1))
else:
lines.append(line.rstrip() + "\t" + str(2))
out_bed = os.path.join(out_dir, re.sub(".bed$","_compl.bed", os.path.basename(truth_BED_file)))
with open(out_bed, "w") as wbed:
wbed.writelines("\n".join(lines))
# Sort output bed file
if sort:
cmd = "bedtools sort -i %s" % out_bed
pc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
sortedbed = pc.stdout.readlines()
out_bed = os.path.join(out_dir, re.sub(".bed$","_sorted.bed", os.path.basename(truth_BED_file)))
with open(out_bed, "w") as wbed:
wbed.writelines(sortedbed)
return out_bed
def get_clone_ploidy(clone_truth_files, hg_file, chr_rm=[]):
"""Compute overall ploidy of sample from truth bed file"""
clone_ploidy = 0
with open(clone_truth_files, "r") as tfile:
for var_line in tfile:
(chr_id, start, end, cnA, cnB) = var_line.strip().split("\t")
if chr_id not in chr_rm:
clone_ploidy += (int(end) - int(start)) * (float(cnA) + float(cnB))
gen = 0
with open(hg_file, "r") as gfile:
for chrl in gfile:
(chr_id, len_chr) = chrl.strip().split("\t")
if chr_id not in chr_rm:
gen += int(len_chr)
clone_ploidy = clone_ploidy/gen
return clone_ploidy
def get_chrs_len(hg_file):
"""Read genome file and return chromosome lengths"""
chrs = {}
gen_file = open(hg_file, "r")
for line in gen_file:
if len(line.strip()) > 0:
(chr_id, chr_len) = line.strip().split("\t")
chrs[chr_id] = int(chr_len)
return chrs
class CNsegment:
def __init__(self, id, start, end, CN_hapA, CN_hapB, perc, truth_file):
self.ID = id
self.start = start
self.end = end
self.CN_hapA = CN_hapA
self.CN_hapB = CN_hapB
self.perc = perc
self.truth_file = truth_file
def print_segment(self):
print "start: " + str(self.start) + "\tend: " + str(self.end) + "\tCN: " + str(self.CN_hapA) + "|" + str(self.CN_hapB) + "\tperc: " + str(self.perc)
def merge_clonal_CNs(clone_truth_files, clones_perc, purity, tmp_dir):
"""Merge clonal haplotypes into two ancestral haplotypes"""
import os, collections, random
truth_set_cn_calls = {}
merged_file = os.path.join(tmp_dir, "mergedSegments.bed")
mergedSegments = open(merged_file, "w")
purity = float(purity)
for clID, perc in clones_perc.items():
truth_set = open(clone_truth_files[clID],'r')
for line in truth_set:
(chr, start, end, CN_hapA, CN_hapB) = line.strip().split("\t")
(start, end, CN_hapA, CN_hapB) = (int(start), int(end), float(CN_hapA), float(CN_hapB))
if not chr in truth_set_cn_calls:
truth_set_cn_calls[chr] = collections.defaultdict(list)
segment = CNsegment((start, end, random.random()), start, end, CN_hapA, CN_hapB, perc, clone_truth_files[clID])
segment.print_segment()
# use start and end to accumulate all clones that have the segment
truth_set_cn_calls[chr][start].append(segment)
truth_set_cn_calls[chr][end].append(segment)
chrs = truth_set_cn_calls.keys()
for chr in chrs:
# create OrderedDict to sort by key
truth_set_cn_calls[chr] = collections.OrderedDict(sorted(truth_set_cn_calls[chr].items(), key = lambda t: t[0]))
for chr in chrs:
start = -1
end = -1
current_dict = {}
for key, value in truth_set_cn_calls[chr].iteritems():
if start == -1:
start = key
for i in range(len(value)):
current_dict[value[i].ID] = value[i]
continue
elif end == -1:
end = key - 1
tmpCN_hapA = 0
tmpCN_hapB = 0
# iterate over all segments
for key_cd, value_cd in current_dict.iteritems():
#print value_cd.print_segment()
tmpCN_hapA += value_cd.perc * purity/100 * value_cd.CN_hapA
tmpCN_hapB += value_cd.perc * purity/100 * value_cd.CN_hapB
for i in range(len(value)):
if value[i].ID in current_dict:
del current_dict[value[i].ID]
else:
current_dict[value[i].ID] = value[i]
if start != -1 and end != -1:
mergedSegments.write(chr + "\t" + str(start) + "\t" + str(end) + "\t" + str(tmpCN_hapA + 1*(1 - purity/100)) + "\t" + str(tmpCN_hapB + 1*(1 - purity/100)) + "\n" )
print "Clonal CN for segment: start\tend\t" + chr + "\t" + str(start) + "\t" + str(end) + "\t" + str(tmpCN_hapA) + "\t" + str(tmpCN_hapB)
start = end + 1# overlapping? should be +1?
end = -1
return merged_file
| Illumina/HapMix | bin/HapMixUtil.py | Python | gpl-3.0 | 9,343 | [
"VisIt"
] | ed7a1f29a30d8e21fcb2f1b072cb8f3ea48d7e1d0fb9102e12622281911dde01 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.